@@ -441,30 +441,6 @@ check_suspended:
}
EXPORT_SYMBOL(md_handle_request);
-struct md_io {
- struct mddev *mddev;
- bio_end_io_t *orig_bi_end_io;
- void *orig_bi_private;
- struct block_device *orig_bi_bdev;
- unsigned long start_time;
-};
-
-static void md_end_io(struct bio *bio)
-{
- struct md_io *md_io = bio->bi_private;
- struct mddev *mddev = md_io->mddev;
-
- bio_end_io_acct_remapped(bio, md_io->start_time, md_io->orig_bi_bdev);
-
- bio->bi_end_io = md_io->orig_bi_end_io;
- bio->bi_private = md_io->orig_bi_private;
-
- mempool_free(md_io, &mddev->md_io_pool);
-
- if (bio->bi_end_io)
- bio->bi_end_io(bio);
-}
-
static blk_qc_t md_submit_bio(struct bio *bio)
{
const int rw = bio_data_dir(bio);
@@ -489,21 +465,6 @@ static blk_qc_t md_submit_bio(struct bio *bio)
return BLK_QC_T_NONE;
}
- if (bio->bi_end_io != md_end_io) {
- struct md_io *md_io;
-
- md_io = mempool_alloc(&mddev->md_io_pool, GFP_NOIO);
- md_io->mddev = mddev;
- md_io->orig_bi_end_io = bio->bi_end_io;
- md_io->orig_bi_private = bio->bi_private;
- md_io->orig_bi_bdev = bio->bi_bdev;
-
- bio->bi_end_io = md_end_io;
- bio->bi_private = md_io;
-
- md_io->start_time = bio_start_io_acct(bio);
- }
-
/* bio could be mergeable after passing to underlayer */
bio->bi_opf &= ~REQ_NOMERGE;
@@ -5614,7 +5575,6 @@ static void md_free(struct kobject *ko)
bioset_exit(&mddev->bio_set);
bioset_exit(&mddev->sync_set);
- mempool_exit(&mddev->md_io_pool);
kfree(mddev);
}
@@ -5710,11 +5670,6 @@ static int md_alloc(dev_t dev, char *name)
*/
mddev->hold_active = UNTIL_STOP;
- error = mempool_init_kmalloc_pool(&mddev->md_io_pool, BIO_POOL_SIZE,
- sizeof(struct md_io));
- if (error)
- goto abort;
-
error = -ENOMEM;
mddev->queue = blk_alloc_queue(NUMA_NO_NODE);
if (!mddev->queue)
@@ -487,7 +487,6 @@ struct mddev {
struct bio_set sync_set; /* for sync operations like
* metadata and bitmap writes
*/
- mempool_t md_io_pool;
/* Generic flush handling.
* The last to finish preflush schedules a worker to submit