@@ -359,6 +359,7 @@ static const struct blk_mq_ops mmc_mq_ops = {
static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
{
struct mmc_host *host = card->host;
+ struct device *dev = mmc_dev(host);
unsigned block_size = 512;
blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
@@ -366,13 +367,12 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
if (mmc_can_erase(card))
mmc_queue_setup_discard(mq->queue, card);
- if (!mmc_dev(host)->dma_mask || !*mmc_dev(host)->dma_mask)
+ if (!dev->dma_mask || !*dev->dma_mask)
blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
blk_queue_max_hw_sectors(mq->queue,
min(host->max_blk_count, host->max_req_size / 512));
if (host->can_dma_map_merge)
- WARN(!blk_queue_can_use_dma_map_merging(mq->queue,
- mmc_dev(host)),
+ WARN(!blk_queue_can_use_dma_map_merging(mq->queue, dev),
"merging was advertised but not possible");
blk_queue_max_segments(mq->queue, mmc_get_max_segments(host));
@@ -389,7 +389,10 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
blk_queue_max_segment_size(mq->queue,
round_down(host->max_seg_size, block_size));
- dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));
+ if (!dev->dma_parms)
+ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
+ GFP_KERNEL);
+ dma_set_max_seg_size(dev, queue_max_segment_size(mq->queue));
INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);