@@ -660,21 +660,12 @@ static int ish_fw_xfer_direct_dma(struct
*/
payload_max_size &= ~(L1_CACHE_BYTES - 1);
- dma_buf = kmalloc(payload_max_size, GFP_KERNEL | GFP_DMA32);
+ dma_buf = dma_alloc_coherent(devc, payload_max_size, &dma_buf_phy, GFP_KERNEL);
if (!dma_buf) {
client_data->flag_retry = true;
return -ENOMEM;
}
- dma_buf_phy = dma_map_single(devc, dma_buf, payload_max_size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(devc, dma_buf_phy)) {
- dev_err(cl_data_to_dev(client_data), "DMA map failed\n");
- client_data->flag_retry = true;
- rv = -ENOMEM;
- goto end_err_dma_buf_release;
- }
-
ldr_xfer_dma_frag.fragment.hdr.command = LOADER_CMD_XFER_FRAGMENT;
ldr_xfer_dma_frag.fragment.xfer_mode = LOADER_XFER_MODE_DIRECT_DMA;
ldr_xfer_dma_frag.ddr_phys_addr = (u64)dma_buf_phy;
@@ -694,14 +685,7 @@ static int ish_fw_xfer_direct_dma(struct
ldr_xfer_dma_frag.fragment.size = fragment_size;
memcpy(dma_buf, &fw->data[fragment_offset], fragment_size);
- dma_sync_single_for_device(devc, dma_buf_phy,
- payload_max_size,
- DMA_TO_DEVICE);
-
- /*
- * Flush cache here because the dma_sync_single_for_device()
- * does not do for x86.
- */
+ /* Flush cache to be sure the data is in main memory. */
clflush_cache_range(dma_buf, payload_max_size);
dev_dbg(cl_data_to_dev(client_data),
@@ -724,15 +708,8 @@ static int ish_fw_xfer_direct_dma(struct
fragment_offset += fragment_size;
}
- dma_unmap_single(devc, dma_buf_phy, payload_max_size, DMA_TO_DEVICE);
- kfree(dma_buf);
- return 0;
-
end_err_resp_buf_release:
- /* Free ISH buffer if not done already, in error case */
- dma_unmap_single(devc, dma_buf_phy, payload_max_size, DMA_TO_DEVICE);
-end_err_dma_buf_release:
- kfree(dma_buf);
+ dma_free_coherent(devc, payload_max_size, dma_buf, dma_buf_phy);
return rv;
}