diff mbox series

[v2] bnx2fc: Remove dma_alloc_coherent to suppress the BUG_ON.

Message ID 20230824061838.13103-1-skashyap@marvell.com
State New
Headers show
Series [v2] bnx2fc: Remove dma_alloc_coherent to suppress the BUG_ON. | expand

Commit Message

Saurav Kashyap Aug. 24, 2023, 6:18 a.m. UTC
From: Jerry Snitselaar <jsnitsel@redhat.com>

dma_free_coherent should not be called under spin_lock_bh,
this patch changed dma coherent calls to kzalloc and dma_map_single.

[  449.843143] ------------[ cut here ]------------
[  449.848302] kernel BUG at mm/vmalloc.c:2727!
[  449.853072] invalid opcode: 0000 [#1] PREEMPT SMP PTI
[  449.858712] CPU: 5 PID: 1996 Comm: kworker/u24:2 Not tainted 5.14.0-118.el9.x86_64 #1
Rebooting.
[  449.867454] Hardware name: Dell Inc. PowerEdge R730/0WCJNT, BIOS 2.3.4 11/08/2016
[  449.876966] Workqueue: fc_rport_eq fc_rport_work [libfc]
[  449.882910] RIP: 0010:vunmap+0x2e/0x30
[  449.887098] Code: 00 65 8b 05 14 a2 f0 4a a9 00 ff ff 00 75 1b 55 48 89 fd e8 34 36 79 00 48 85 ed 74 0b 48 89 ef 31 f6 5d e9 14 fc ff ff 5d c3 <0f> 0b 0f 1f 44 00 00 41 57 41 56 49 89 ce 41 55 49 89 fd 41 54 41
[  449.908054] RSP: 0018:ffffb83d878b3d68 EFLAGS: 00010206
[  449.913887] RAX: 0000000080000201 RBX: ffff8f4355133550 RCX: 000000000d400005
[  449.921843] RDX: 0000000000000001 RSI: 0000000000001000 RDI: ffffb83da53f5000
[  449.929808] RBP: ffff8f4ac6675800 R08: ffffb83d878b3d30 R09: 00000000000efbdf
[  449.937774] R10: 0000000000000003 R11: ffff8f434573e000 R12: 0000000000001000
[  449.945736] R13: 0000000000001000 R14: ffffb83da53f5000 R15: ffff8f43d4ea3ae0
[  449.953701] FS:  0000000000000000(0000) GS:ffff8f529fc80000(0000) knlGS:0000000000000000
[  449.962732] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[  449.969138] CR2: 00007f8cf993e150 CR3: 0000000efbe10003 CR4: 00000000003706e0
[  449.977102] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
[  449.985065] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
[  449.993028] Call Trace:
[  449.995756]  __iommu_dma_free+0x96/0x100
[  450.000139]  bnx2fc_free_session_resc+0x67/0x240 [bnx2fc]
[  450.006171]  bnx2fc_upload_session+0xce/0x100 [bnx2fc]
[  450.011910]  bnx2fc_rport_event_handler+0x9f/0x240 [bnx2fc]
[  450.018136]  fc_rport_work+0x103/0x5b0 [libfc]
[  450.023103]  process_one_work+0x1e8/0x3c0
[  450.027581]  worker_thread+0x50/0x3b0
[  450.031669]  ? rescuer_thread+0x370/0x370
[  450.036143]  kthread+0x149/0x170
[  450.039744]  ? set_kthread_struct+0x40/0x40
[  450.044411]  ret_from_fork+0x22/0x30
[  450.048404] Modules linked in: vfat msdos fat xfs nfs_layout_nfsv41_files rpcsec_gss_krb5 auth_rpcgss nfsv4 dns_resolver dm_service_time qedf qed crc8 bnx2fc libfcoe libfc scsi_transport_fc intel_rapl_msr intel_rapl_common x86_pkg_temp_thermal intel_powerclamp dcdbas rapl intel_cstate intel_uncore mei_me pcspkr mei ipmi_ssif lpc_ich ipmi_si fuse zram ext4 mbcache jbd2 loop nfsv3 nfs_acl nfs lockd grace fscache netfs irdma ice sd_mod t10_pi sg ib_uverbs ib_core 8021q garp mrp stp llc mgag200 i2c_algo_bit drm_kms_helper syscopyarea sysfillrect sysimgblt mxm_wmi fb_sys_fops cec crct10dif_pclmul ahci crc32_pclmul bnx2x drm ghash_clmulni_intel libahci rfkill i40e libata megaraid_sas mdio wmi sunrpc lrw dm_crypt dm_round_robin dm_multipath dm_snapshot dm_bufio dm_mirror dm_region_hash dm_log dm_zero dm_mod linear raid10 raid456 async_raid6_recov async_memcpy async_pq async_xor async_tx raid6_pq libcrc32c crc32c_intel raid1 raid0 iscsi_ibft squashfs be2iscsi bnx2i cnic uio cxgb4i cxgb4 tls
[  450.048497]  libcxgbi libcxgb qla4xxx iscsi_boot_sysfs iscsi_tcp libiscsi_tcp libiscsi scsi_transport_iscsi edd ipmi_devintf ipmi_msghandler
[  450.159753] ---[ end trace 712de2c57c64abc8 ]---

Reported-by: Guangwu Zhang <guazhang@redhat.com>
Tested-by: Ravi Adabala <radabala@marvell.com>
Signed-off-by: Jerry Snitselaar <jsnitsel@redhat.com>
Signed-off-by: Saurav Kashyap <skashyap@marvell.com>
---
v1 -> v2
- Added patch description
- Corrected Jerry's name

 drivers/scsi/bnx2fc/bnx2fc_tgt.c | 228 ++++++++++++++++++++++---------
 1 file changed, 166 insertions(+), 62 deletions(-)

Comments

John Meneghini Oct. 18, 2023, 3 p.m. UTC | #1
Martin, Red Hat would like to get this merged.

Can we do that, or are there other issues that need to be addressed with this patch?

Reviewed-by: John Meneghini <jmeneghi@redhat.com>

On 8/24/23 02:18, Saurav Kashyap wrote:
> From: Jerry Snitselaar <jsnitsel@redhat.com>
> 
> dma_free_coherent should not be called under spin_lock_bh,
> this patch changed dma coherent calls to kzalloc and dma_map_single.
> 
> [  449.843143] ------------[ cut here ]------------
> [  449.848302] kernel BUG at mm/vmalloc.c:2727!
> [  449.853072] invalid opcode: 0000 [#1] PREEMPT SMP PTI
> [  449.858712] CPU: 5 PID: 1996 Comm: kworker/u24:2 Not tainted 5.14.0-118.el9.x86_64 #1
> Rebooting.
> [  449.867454] Hardware name: Dell Inc. PowerEdge R730/0WCJNT, BIOS 2.3.4 11/08/2016
> [  449.876966] Workqueue: fc_rport_eq fc_rport_work [libfc]
> [  449.882910] RIP: 0010:vunmap+0x2e/0x30
> [  449.887098] Code: 00 65 8b 05 14 a2 f0 4a a9 00 ff ff 00 75 1b 55 48 89 fd e8 34 36 79 00 48 85 ed 74 0b 48 89 ef 31 f6 5d e9 14 fc ff ff 5d c3 <0f> 0b 0f 1f 44 00 00 41 57 41 56 49 89 ce 41 55 49 89 fd 41 54 41
> [  449.908054] RSP: 0018:ffffb83d878b3d68 EFLAGS: 00010206
> [  449.913887] RAX: 0000000080000201 RBX: ffff8f4355133550 RCX: 000000000d400005
> [  449.921843] RDX: 0000000000000001 RSI: 0000000000001000 RDI: ffffb83da53f5000
> [  449.929808] RBP: ffff8f4ac6675800 R08: ffffb83d878b3d30 R09: 00000000000efbdf
> [  449.937774] R10: 0000000000000003 R11: ffff8f434573e000 R12: 0000000000001000
> [  449.945736] R13: 0000000000001000 R14: ffffb83da53f5000 R15: ffff8f43d4ea3ae0
> [  449.953701] FS:  0000000000000000(0000) GS:ffff8f529fc80000(0000) knlGS:0000000000000000
> [  449.962732] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
> [  449.969138] CR2: 00007f8cf993e150 CR3: 0000000efbe10003 CR4: 00000000003706e0
> [  449.977102] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
> [  449.985065] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
> [  449.993028] Call Trace:
> [  449.995756]  __iommu_dma_free+0x96/0x100
> [  450.000139]  bnx2fc_free_session_resc+0x67/0x240 [bnx2fc]
> [  450.006171]  bnx2fc_upload_session+0xce/0x100 [bnx2fc]
> [  450.011910]  bnx2fc_rport_event_handler+0x9f/0x240 [bnx2fc]
> [  450.018136]  fc_rport_work+0x103/0x5b0 [libfc]
> [  450.023103]  process_one_work+0x1e8/0x3c0
> [  450.027581]  worker_thread+0x50/0x3b0
> [  450.031669]  ? rescuer_thread+0x370/0x370
> [  450.036143]  kthread+0x149/0x170
> [  450.039744]  ? set_kthread_struct+0x40/0x40
> [  450.044411]  ret_from_fork+0x22/0x30
> [  450.048404] Modules linked in: vfat msdos fat xfs nfs_layout_nfsv41_files rpcsec_gss_krb5 auth_rpcgss nfsv4 dns_resolver dm_service_time qedf qed crc8 bnx2fc libfcoe libfc scsi_transport_fc intel_rapl_msr intel_rapl_common x86_pkg_temp_thermal intel_powerclamp dcdbas rapl intel_cstate intel_uncore mei_me pcspkr mei ipmi_ssif lpc_ich ipmi_si fuse zram ext4 mbcache jbd2 loop nfsv3 nfs_acl nfs lockd grace fscache netfs irdma ice sd_mod t10_pi sg ib_uverbs ib_core 8021q garp mrp stp llc mgag200 i2c_algo_bit drm_kms_helper syscopyarea sysfillrect sysimgblt mxm_wmi fb_sys_fops cec crct10dif_pclmul ahci crc32_pclmul bnx2x drm ghash_clmulni_intel libahci rfkill i40e libata megaraid_sas mdio wmi sunrpc lrw dm_crypt dm_round_robin dm_multipath dm_snapshot dm_bufio dm_mirror dm_region_hash dm_log dm_zero dm_mod linear raid10 raid456 async_raid6_recov async_memcpy async_pq async_xor async_tx raid6_pq libcrc32c crc32c_intel raid1 raid0 iscsi_ibft squashfs be2iscsi bnx2i cnic uio cxgb4i cxgb4 tls
> [  450.048497]  libcxgbi libcxgb qla4xxx iscsi_boot_sysfs iscsi_tcp libiscsi_tcp libiscsi scsi_transport_iscsi edd ipmi_devintf ipmi_msghandler
> [  450.159753] ---[ end trace 712de2c57c64abc8 ]---
> 
> Reported-by: Guangwu Zhang <guazhang@redhat.com>
> Tested-by: Ravi Adabala <radabala@marvell.com>
> Signed-off-by: Jerry Snitselaar <jsnitsel@redhat.com>
> Signed-off-by: Saurav Kashyap <skashyap@marvell.com>
> ---
> v1 -> v2
> - Added patch description
> - Corrected Jerry's name
> 
>   drivers/scsi/bnx2fc/bnx2fc_tgt.c | 228 ++++++++++++++++++++++---------
>   1 file changed, 166 insertions(+), 62 deletions(-)
> 
> diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
> index 2c246e80c1c4..03628f7760e7 100644
> --- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
> +++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
> @@ -671,12 +671,18 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
>   	tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) &
>   			   CNIC_PAGE_MASK;
>   
> -	tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
> -				     &tgt->sq_dma, GFP_KERNEL);
> +	tgt->sq = kzalloc(tgt->sq_mem_size, GFP_KERNEL);
>   	if (!tgt->sq) {
>   		printk(KERN_ERR PFX "unable to allocate SQ memory %d\n",
>   			tgt->sq_mem_size);
> -		goto mem_alloc_failure;
> +		goto sq_alloc_failure;
> +	}
> +
> +	tgt->sq_dma = dma_map_single(&hba->pcidev->dev, tgt->sq,
> +				     tgt->sq_mem_size, DMA_BIDIRECTIONAL);
> +	if (unlikely(dma_mapping_error(&hba->pcidev->dev, tgt->sq_dma))) {
> +		pr_err(PFX "unable to map SQ memory %d\n", tgt->sq_mem_size);
> +		goto sq_map_failure;
>   	}
>   
>   	/* Allocate and map CQ */
> @@ -684,12 +690,18 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
>   	tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) &
>   			   CNIC_PAGE_MASK;
>   
> -	tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
> -				     &tgt->cq_dma, GFP_KERNEL);
> +	tgt->cq = kzalloc(tgt->cq_mem_size, GFP_KERNEL);
>   	if (!tgt->cq) {
> -		printk(KERN_ERR PFX "unable to allocate CQ memory %d\n",
> -			tgt->cq_mem_size);
> -		goto mem_alloc_failure;
> +		pr_err(PFX "unable to allocate CQ memory %d\n",
> +		       tgt->cq_mem_size);
> +		goto cq_alloc_failure;
> +	}
> +
> +	tgt->cq_dma = dma_map_single(&hba->pcidev->dev, tgt->cq,
> +				     tgt->cq_mem_size, DMA_BIDIRECTIONAL);
> +	if (unlikely(dma_mapping_error(&hba->pcidev->dev, tgt->cq_dma))) {
> +		pr_err(PFX "unable to map CQ memory %d\n", tgt->cq_mem_size);
> +		goto cq_map_failure;
>   	}
>   
>   	/* Allocate and map RQ and RQ PBL */
> @@ -697,24 +709,36 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
>   	tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) &
>   			   CNIC_PAGE_MASK;
>   
> -	tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
> -				     &tgt->rq_dma, GFP_KERNEL);
> +	tgt->rq = kzalloc(tgt->rq_mem_size, GFP_KERNEL);
>   	if (!tgt->rq) {
> -		printk(KERN_ERR PFX "unable to allocate RQ memory %d\n",
> -			tgt->rq_mem_size);
> -		goto mem_alloc_failure;
> +		pr_err(PFX "unable to allocate RQ memory %d\n",
> +		       tgt->rq_mem_size);
> +		goto rq_alloc_failure;
> +	}
> +
> +	tgt->rq_dma = dma_map_single(&hba->pcidev->dev, tgt->rq,
> +				     tgt->rq_mem_size, DMA_BIDIRECTIONAL);
> +	if (unlikely(dma_mapping_error(&hba->pcidev->dev, tgt->rq_dma))) {
> +		pr_err(PFX "unable to map RQ memory %d\n", tgt->rq_mem_size);
> +		goto rq_map_failure;
>   	}
>   
>   	tgt->rq_pbl_size = (tgt->rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
>   	tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) &
>   			   CNIC_PAGE_MASK;
>   
> -	tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
> -					 &tgt->rq_pbl_dma, GFP_KERNEL);
> +	tgt->rq_pbl = kzalloc(tgt->rq_pbl_size, GFP_KERNEL);
>   	if (!tgt->rq_pbl) {
> -		printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n",
> -			tgt->rq_pbl_size);
> -		goto mem_alloc_failure;
> +		pr_err(PFX "unable to allocate RQ PBL %d\n", tgt->rq_pbl_size);
> +		goto rq_pbl_alloc_failure;
> +	}
> +
> +	tgt->rq_pbl_dma = dma_map_single(&hba->pcidev->dev, tgt->rq_pbl,
> +					 tgt->rq_pbl_size, DMA_BIDIRECTIONAL);
> +	if (unlikely(dma_mapping_error(&hba->pcidev->dev, tgt->rq_pbl_dma))) {
> +		pr_err(PFX "unable to map RQ PBL memory %d\n",
> +		       tgt->rq_pbl_size);
> +		goto rq_pbl_map_failure;
>   	}
>   
>   	num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE;
> @@ -734,13 +758,19 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
>   	tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) &
>   			       CNIC_PAGE_MASK;
>   
> -	tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev,
> -					tgt->xferq_mem_size, &tgt->xferq_dma,
> -					GFP_KERNEL);
> +	tgt->xferq = kzalloc(tgt->xferq_mem_size, GFP_KERNEL);
>   	if (!tgt->xferq) {
>   		printk(KERN_ERR PFX "unable to allocate XFERQ %d\n",
>   			tgt->xferq_mem_size);
> -		goto mem_alloc_failure;
> +		goto xferq_alloc_failure;
> +	}
> +
> +	tgt->xferq_dma = dma_map_single(&hba->pcidev->dev, tgt->xferq,
> +					tgt->xferq_mem_size, DMA_BIDIRECTIONAL);
> +	if (unlikely(dma_mapping_error(&hba->pcidev->dev, tgt->xferq_dma))) {
> +		pr_err(PFX "unable to map XFERQ memory %d\n",
> +		       tgt->xferq_mem_size);
> +		goto xferq_map_failure;
>   	}
>   
>   	/* Allocate and map CONFQ & CONFQ PBL */
> @@ -748,13 +778,19 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
>   	tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) &
>   			       CNIC_PAGE_MASK;
>   
> -	tgt->confq = dma_alloc_coherent(&hba->pcidev->dev,
> -					tgt->confq_mem_size, &tgt->confq_dma,
> -					GFP_KERNEL);
> +	tgt->confq = kzalloc(tgt->confq_mem_size, GFP_KERNEL);
>   	if (!tgt->confq) {
> -		printk(KERN_ERR PFX "unable to allocate CONFQ %d\n",
> -			tgt->confq_mem_size);
> -		goto mem_alloc_failure;
> +		pr_err(PFX "unable to allocate CONFQ %d\n",
> +		       tgt->confq_mem_size);
> +		goto confq_alloc_failure;
> +	}
> +
> +	tgt->confq_dma = dma_map_single(&hba->pcidev->dev, tgt->confq,
> +					tgt->confq_mem_size, DMA_BIDIRECTIONAL);
> +	if (unlikely(dma_mapping_error(&hba->pcidev->dev, tgt->confq_dma))) {
> +		pr_err(PFX "unable to map CONFQ memory %d\n",
> +		       tgt->confq_mem_size);
> +		goto confq_map_failure;
>   	}
>   
>   	tgt->confq_pbl_size =
> @@ -762,13 +798,19 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
>   	tgt->confq_pbl_size =
>   		(tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
>   
> -	tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
> -					    tgt->confq_pbl_size,
> -					    &tgt->confq_pbl_dma, GFP_KERNEL);
> +	tgt->confq_pbl = kzalloc(tgt->confq_pbl_size, GFP_KERNEL);
>   	if (!tgt->confq_pbl) {
> -		printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n",
> -			tgt->confq_pbl_size);
> -		goto mem_alloc_failure;
> +		pr_err(PFX "unable to allocate CONFQ PBL %d\n",
> +		       tgt->confq_pbl_size);
> +		goto confq_pbl_alloc_failure;
> +	}
> +
> +	tgt->confq_pbl_dma = dma_map_single(&hba->pcidev->dev, tgt->confq_pbl,
> +					    tgt->confq_pbl_size, DMA_BIDIRECTIONAL);
> +	if (unlikely(dma_mapping_error(&hba->pcidev->dev, tgt->confq_pbl_dma))) {
> +		pr_err(PFX "unable to map CONFQ PBL memory %d\n",
> +		       tgt->confq_pbl_size);
> +		goto confq_pbl_map_failure;
>   	}
>   
>   	num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE;
> @@ -786,35 +828,88 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
>   	/* Allocate and map ConnDB */
>   	tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db);
>   
> -	tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev,
> -					  tgt->conn_db_mem_size,
> -					  &tgt->conn_db_dma, GFP_KERNEL);
> +	tgt->conn_db = kzalloc(tgt->conn_db_mem_size, GFP_KERNEL);
>   	if (!tgt->conn_db) {
>   		printk(KERN_ERR PFX "unable to allocate conn_db %d\n",
> -						tgt->conn_db_mem_size);
> -		goto mem_alloc_failure;
> +		       tgt->conn_db_mem_size);
> +		goto conn_db_alloc_failure;
>   	}
>   
> +	tgt->conn_db_dma = dma_map_single(&hba->pcidev->dev, tgt->conn_db,
> +					  tgt->conn_db_mem_size, DMA_BIDIRECTIONAL);
> +	if (unlikely(dma_mapping_error(&hba->pcidev->dev, tgt->conn_db_dma))) {
> +		pr_err(PFX "unable to map conn db memory %d\n",
> +		       tgt->conn_db_mem_size);
> +		goto conn_db_map_failure;
> +	}
>   
>   	/* Allocate and map LCQ */
>   	tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE;
>   	tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) &
>   			     CNIC_PAGE_MASK;
>   
> -	tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
> -				      &tgt->lcq_dma, GFP_KERNEL);
> -
> +	tgt->lcq = kzalloc(tgt->lcq_mem_size, GFP_KERNEL);
>   	if (!tgt->lcq) {
>   		printk(KERN_ERR PFX "unable to allocate lcq %d\n",
>   		       tgt->lcq_mem_size);
> -		goto mem_alloc_failure;
> +		goto lcq_alloc_failure;
> +	}
> +
> +	tgt->lcq_dma = dma_map_single(&hba->pcidev->dev, tgt->lcq,
> +				      tgt->lcq_mem_size, DMA_BIDIRECTIONAL);
> +	if (unlikely(dma_mapping_error(&hba->pcidev->dev, tgt->lcq_dma))) {
> +		pr_err(PFX "unable to map lcq memory %d\n",
> +		       tgt->lcq_mem_size);
> +		goto lcq_map_failure;
>   	}
>   
>   	tgt->conn_db->rq_prod = 0x8000;
>   
>   	return 0;
>   
> -mem_alloc_failure:
> +lcq_map_failure:
> +	kfree(tgt->lcq);
> +lcq_alloc_failure:
> +	dma_unmap_single(&hba->pcidev->dev, tgt->conn_db_dma,
> +			 tgt->conn_db_mem_size, DMA_BIDIRECTIONAL);
> +conn_db_map_failure:
> +	kfree(tgt->conn_db);
> +conn_db_alloc_failure:
> +	dma_unmap_single(&hba->pcidev->dev, tgt->confq_pbl_dma,
> +			 tgt->confq_pbl_size, DMA_BIDIRECTIONAL);
> +confq_pbl_map_failure:
> +	kfree(tgt->confq_pbl);
> +confq_pbl_alloc_failure:
> +	dma_unmap_single(&hba->pcidev->dev, tgt->confq_dma,
> +			 tgt->confq_mem_size, DMA_BIDIRECTIONAL);
> +confq_map_failure:
> +	kfree(tgt->confq);
> +confq_alloc_failure:
> +	dma_unmap_single(&hba->pcidev->dev, tgt->xferq_dma,
> +			 tgt->xferq_mem_size, DMA_BIDIRECTIONAL);
> +xferq_map_failure:
> +	kfree(tgt->xferq);
> +xferq_alloc_failure:
> +	dma_unmap_single(&hba->pcidev->dev, tgt->rq_pbl_dma,
> +			 tgt->rq_pbl_size, DMA_BIDIRECTIONAL);
> +rq_pbl_map_failure:
> +	kfree(tgt->rq_pbl);
> +rq_pbl_alloc_failure:
> +	dma_unmap_single(&hba->pcidev->dev, tgt->rq_dma, tgt->rq_mem_size,
> +			 DMA_BIDIRECTIONAL);
> +rq_map_failure:
> +	kfree(tgt->rq);
> +rq_alloc_failure:
> +	dma_unmap_single(&hba->pcidev->dev, tgt->cq_dma, tgt->cq_mem_size,
> +			 DMA_BIDIRECTIONAL);
> +cq_map_failure:
> +	kfree(tgt->cq);
> +cq_alloc_failure:
> +	dma_unmap_single(&hba->pcidev->dev, tgt->sq_dma, tgt->sq_mem_size,
> +			 DMA_BIDIRECTIONAL);
> +sq_map_failure:
> +	kfree(tgt->sq);
> +sq_alloc_failure:
>   	return -ENOMEM;
>   }
>   
> @@ -839,54 +934,63 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
>   
>   	/* Free LCQ */
>   	if (tgt->lcq) {
> -		dma_free_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
> -				    tgt->lcq, tgt->lcq_dma);
> +		dma_unmap_single(&hba->pcidev->dev, tgt->lcq_dma,
> +				 tgt->lcq_mem_size, DMA_BIDIRECTIONAL);
> +		kfree(tgt->lcq);
>   		tgt->lcq = NULL;
>   	}
>   	/* Free connDB */
>   	if (tgt->conn_db) {
> -		dma_free_coherent(&hba->pcidev->dev, tgt->conn_db_mem_size,
> -				    tgt->conn_db, tgt->conn_db_dma);
> +		dma_unmap_single(&hba->pcidev->dev, tgt->conn_db_dma,
> +				 tgt->conn_db_mem_size, DMA_BIDIRECTIONAL);
> +		kfree(tgt->conn_db);
>   		tgt->conn_db = NULL;
>   	}
>   	/* Free confq  and confq pbl */
>   	if (tgt->confq_pbl) {
> -		dma_free_coherent(&hba->pcidev->dev, tgt->confq_pbl_size,
> -				    tgt->confq_pbl, tgt->confq_pbl_dma);
> +		dma_unmap_single(&hba->pcidev->dev, tgt->confq_pbl_dma,
> +				 tgt->confq_pbl_size, DMA_BIDIRECTIONAL);
> +		kfree(tgt->confq_pbl);
>   		tgt->confq_pbl = NULL;
>   	}
>   	if (tgt->confq) {
> -		dma_free_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
> -				    tgt->confq, tgt->confq_dma);
> +		dma_unmap_single(&hba->pcidev->dev, tgt->confq_dma,
> +				 tgt->confq_mem_size, DMA_BIDIRECTIONAL);
> +		kfree(tgt->confq);
>   		tgt->confq = NULL;
>   	}
>   	/* Free XFERQ */
>   	if (tgt->xferq) {
> -		dma_free_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
> -				    tgt->xferq, tgt->xferq_dma);
> +		dma_unmap_single(&hba->pcidev->dev, tgt->xferq_dma,
> +				 tgt->xferq_mem_size, DMA_BIDIRECTIONAL);
> +		kfree(tgt->xferq);
>   		tgt->xferq = NULL;
>   	}
>   	/* Free RQ PBL and RQ */
>   	if (tgt->rq_pbl) {
> -		dma_free_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
> -				    tgt->rq_pbl, tgt->rq_pbl_dma);
> +		dma_unmap_single(&hba->pcidev->dev, tgt->rq_pbl_dma,
> +				 tgt->rq_pbl_size, DMA_BIDIRECTIONAL);
> +		kfree(tgt->rq_pbl);
>   		tgt->rq_pbl = NULL;
>   	}
>   	if (tgt->rq) {
> -		dma_free_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
> -				    tgt->rq, tgt->rq_dma);
> +		dma_unmap_single(&hba->pcidev->dev, tgt->rq_dma,
> +				 tgt->rq_mem_size, DMA_BIDIRECTIONAL);
> +		kfree(tgt->rq);
>   		tgt->rq = NULL;
>   	}
>   	/* Free CQ */
>   	if (tgt->cq) {
> -		dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
> -				    tgt->cq, tgt->cq_dma);
> +		dma_unmap_single(&hba->pcidev->dev, tgt->cq_dma,
> +				 tgt->cq_mem_size, DMA_BIDIRECTIONAL);
> +		kfree(tgt->cq);
>   		tgt->cq = NULL;
>   	}
>   	/* Free SQ */
>   	if (tgt->sq) {
> -		dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
> -				    tgt->sq, tgt->sq_dma);
> +		dma_unmap_single(&hba->pcidev->dev, tgt->sq_dma,
> +				 tgt->sq_mem_size, DMA_BIDIRECTIONAL);
> +		kfree(tgt->sq);
>   		tgt->sq = NULL;
>   	}
>   	spin_unlock_bh(&tgt->cq_lock);
diff mbox series

Patch

diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index 2c246e80c1c4..03628f7760e7 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -671,12 +671,18 @@  static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
 	tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) &
 			   CNIC_PAGE_MASK;
 
-	tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
-				     &tgt->sq_dma, GFP_KERNEL);
+	tgt->sq = kzalloc(tgt->sq_mem_size, GFP_KERNEL);
 	if (!tgt->sq) {
 		printk(KERN_ERR PFX "unable to allocate SQ memory %d\n",
 			tgt->sq_mem_size);
-		goto mem_alloc_failure;
+		goto sq_alloc_failure;
+	}
+
+	tgt->sq_dma = dma_map_single(&hba->pcidev->dev, tgt->sq,
+				     tgt->sq_mem_size, DMA_BIDIRECTIONAL);
+	if (unlikely(dma_mapping_error(&hba->pcidev->dev, tgt->sq_dma))) {
+		pr_err(PFX "unable to map SQ memory %d\n", tgt->sq_mem_size);
+		goto sq_map_failure;
 	}
 
 	/* Allocate and map CQ */
@@ -684,12 +690,18 @@  static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
 	tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) &
 			   CNIC_PAGE_MASK;
 
-	tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
-				     &tgt->cq_dma, GFP_KERNEL);
+	tgt->cq = kzalloc(tgt->cq_mem_size, GFP_KERNEL);
 	if (!tgt->cq) {
-		printk(KERN_ERR PFX "unable to allocate CQ memory %d\n",
-			tgt->cq_mem_size);
-		goto mem_alloc_failure;
+		pr_err(PFX "unable to allocate CQ memory %d\n",
+		       tgt->cq_mem_size);
+		goto cq_alloc_failure;
+	}
+
+	tgt->cq_dma = dma_map_single(&hba->pcidev->dev, tgt->cq,
+				     tgt->cq_mem_size, DMA_BIDIRECTIONAL);
+	if (unlikely(dma_mapping_error(&hba->pcidev->dev, tgt->cq_dma))) {
+		pr_err(PFX "unable to map CQ memory %d\n", tgt->cq_mem_size);
+		goto cq_map_failure;
 	}
 
 	/* Allocate and map RQ and RQ PBL */
@@ -697,24 +709,36 @@  static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
 	tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) &
 			   CNIC_PAGE_MASK;
 
-	tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
-				     &tgt->rq_dma, GFP_KERNEL);
+	tgt->rq = kzalloc(tgt->rq_mem_size, GFP_KERNEL);
 	if (!tgt->rq) {
-		printk(KERN_ERR PFX "unable to allocate RQ memory %d\n",
-			tgt->rq_mem_size);
-		goto mem_alloc_failure;
+		pr_err(PFX "unable to allocate RQ memory %d\n",
+		       tgt->rq_mem_size);
+		goto rq_alloc_failure;
+	}
+
+	tgt->rq_dma = dma_map_single(&hba->pcidev->dev, tgt->rq,
+				     tgt->rq_mem_size, DMA_BIDIRECTIONAL);
+	if (unlikely(dma_mapping_error(&hba->pcidev->dev, tgt->rq_dma))) {
+		pr_err(PFX "unable to map RQ memory %d\n", tgt->rq_mem_size);
+		goto rq_map_failure;
 	}
 
 	tgt->rq_pbl_size = (tgt->rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
 	tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) &
 			   CNIC_PAGE_MASK;
 
-	tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
-					 &tgt->rq_pbl_dma, GFP_KERNEL);
+	tgt->rq_pbl = kzalloc(tgt->rq_pbl_size, GFP_KERNEL);
 	if (!tgt->rq_pbl) {
-		printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n",
-			tgt->rq_pbl_size);
-		goto mem_alloc_failure;
+		pr_err(PFX "unable to allocate RQ PBL %d\n", tgt->rq_pbl_size);
+		goto rq_pbl_alloc_failure;
+	}
+
+	tgt->rq_pbl_dma = dma_map_single(&hba->pcidev->dev, tgt->rq_pbl,
+					 tgt->rq_pbl_size, DMA_BIDIRECTIONAL);
+	if (unlikely(dma_mapping_error(&hba->pcidev->dev, tgt->rq_pbl_dma))) {
+		pr_err(PFX "unable to map RQ PBL memory %d\n",
+		       tgt->rq_pbl_size);
+		goto rq_pbl_map_failure;
 	}
 
 	num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE;
@@ -734,13 +758,19 @@  static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
 	tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) &
 			       CNIC_PAGE_MASK;
 
-	tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev,
-					tgt->xferq_mem_size, &tgt->xferq_dma,
-					GFP_KERNEL);
+	tgt->xferq = kzalloc(tgt->xferq_mem_size, GFP_KERNEL);
 	if (!tgt->xferq) {
 		printk(KERN_ERR PFX "unable to allocate XFERQ %d\n",
 			tgt->xferq_mem_size);
-		goto mem_alloc_failure;
+		goto xferq_alloc_failure;
+	}
+
+	tgt->xferq_dma = dma_map_single(&hba->pcidev->dev, tgt->xferq,
+					tgt->xferq_mem_size, DMA_BIDIRECTIONAL);
+	if (unlikely(dma_mapping_error(&hba->pcidev->dev, tgt->xferq_dma))) {
+		pr_err(PFX "unable to map XFERQ memory %d\n",
+		       tgt->xferq_mem_size);
+		goto xferq_map_failure;
 	}
 
 	/* Allocate and map CONFQ & CONFQ PBL */
@@ -748,13 +778,19 @@  static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
 	tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) &
 			       CNIC_PAGE_MASK;
 
-	tgt->confq = dma_alloc_coherent(&hba->pcidev->dev,
-					tgt->confq_mem_size, &tgt->confq_dma,
-					GFP_KERNEL);
+	tgt->confq = kzalloc(tgt->confq_mem_size, GFP_KERNEL);
 	if (!tgt->confq) {
-		printk(KERN_ERR PFX "unable to allocate CONFQ %d\n",
-			tgt->confq_mem_size);
-		goto mem_alloc_failure;
+		pr_err(PFX "unable to allocate CONFQ %d\n",
+		       tgt->confq_mem_size);
+		goto confq_alloc_failure;
+	}
+
+	tgt->confq_dma = dma_map_single(&hba->pcidev->dev, tgt->confq,
+					tgt->confq_mem_size, DMA_BIDIRECTIONAL);
+	if (unlikely(dma_mapping_error(&hba->pcidev->dev, tgt->confq_dma))) {
+		pr_err(PFX "unable to map CONFQ memory %d\n",
+		       tgt->confq_mem_size);
+		goto confq_map_failure;
 	}
 
 	tgt->confq_pbl_size =
@@ -762,13 +798,19 @@  static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
 	tgt->confq_pbl_size =
 		(tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
 
-	tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
-					    tgt->confq_pbl_size,
-					    &tgt->confq_pbl_dma, GFP_KERNEL);
+	tgt->confq_pbl = kzalloc(tgt->confq_pbl_size, GFP_KERNEL);
 	if (!tgt->confq_pbl) {
-		printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n",
-			tgt->confq_pbl_size);
-		goto mem_alloc_failure;
+		pr_err(PFX "unable to allocate CONFQ PBL %d\n",
+		       tgt->confq_pbl_size);
+		goto confq_pbl_alloc_failure;
+	}
+
+	tgt->confq_pbl_dma = dma_map_single(&hba->pcidev->dev, tgt->confq_pbl,
+					    tgt->confq_pbl_size, DMA_BIDIRECTIONAL);
+	if (unlikely(dma_mapping_error(&hba->pcidev->dev, tgt->confq_pbl_dma))) {
+		pr_err(PFX "unable to map CONFQ PBL memory %d\n",
+		       tgt->confq_pbl_size);
+		goto confq_pbl_map_failure;
 	}
 
 	num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE;
@@ -786,35 +828,88 @@  static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
 	/* Allocate and map ConnDB */
 	tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db);
 
-	tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev,
-					  tgt->conn_db_mem_size,
-					  &tgt->conn_db_dma, GFP_KERNEL);
+	tgt->conn_db = kzalloc(tgt->conn_db_mem_size, GFP_KERNEL);
 	if (!tgt->conn_db) {
 		printk(KERN_ERR PFX "unable to allocate conn_db %d\n",
-						tgt->conn_db_mem_size);
-		goto mem_alloc_failure;
+		       tgt->conn_db_mem_size);
+		goto conn_db_alloc_failure;
 	}
 
+	tgt->conn_db_dma = dma_map_single(&hba->pcidev->dev, tgt->conn_db,
+					  tgt->conn_db_mem_size, DMA_BIDIRECTIONAL);
+	if (unlikely(dma_mapping_error(&hba->pcidev->dev, tgt->conn_db_dma))) {
+		pr_err(PFX "unable to map conn db memory %d\n",
+		       tgt->conn_db_mem_size);
+		goto conn_db_map_failure;
+	}
 
 	/* Allocate and map LCQ */
 	tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE;
 	tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) &
 			     CNIC_PAGE_MASK;
 
-	tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
-				      &tgt->lcq_dma, GFP_KERNEL);
-
+	tgt->lcq = kzalloc(tgt->lcq_mem_size, GFP_KERNEL);
 	if (!tgt->lcq) {
 		printk(KERN_ERR PFX "unable to allocate lcq %d\n",
 		       tgt->lcq_mem_size);
-		goto mem_alloc_failure;
+		goto lcq_alloc_failure;
+	}
+
+	tgt->lcq_dma = dma_map_single(&hba->pcidev->dev, tgt->lcq,
+				      tgt->lcq_mem_size, DMA_BIDIRECTIONAL);
+	if (unlikely(dma_mapping_error(&hba->pcidev->dev, tgt->lcq_dma))) {
+		pr_err(PFX "unable to map lcq memory %d\n",
+		       tgt->lcq_mem_size);
+		goto lcq_map_failure;
 	}
 
 	tgt->conn_db->rq_prod = 0x8000;
 
 	return 0;
 
-mem_alloc_failure:
+lcq_map_failure:
+	kfree(tgt->lcq);
+lcq_alloc_failure:
+	dma_unmap_single(&hba->pcidev->dev, tgt->conn_db_dma,
+			 tgt->conn_db_mem_size, DMA_BIDIRECTIONAL);
+conn_db_map_failure:
+	kfree(tgt->conn_db);
+conn_db_alloc_failure:
+	dma_unmap_single(&hba->pcidev->dev, tgt->confq_pbl_dma,
+			 tgt->confq_pbl_size, DMA_BIDIRECTIONAL);
+confq_pbl_map_failure:
+	kfree(tgt->confq_pbl);
+confq_pbl_alloc_failure:
+	dma_unmap_single(&hba->pcidev->dev, tgt->confq_dma,
+			 tgt->confq_mem_size, DMA_BIDIRECTIONAL);
+confq_map_failure:
+	kfree(tgt->confq);
+confq_alloc_failure:
+	dma_unmap_single(&hba->pcidev->dev, tgt->xferq_dma,
+			 tgt->xferq_mem_size, DMA_BIDIRECTIONAL);
+xferq_map_failure:
+	kfree(tgt->xferq);
+xferq_alloc_failure:
+	dma_unmap_single(&hba->pcidev->dev, tgt->rq_pbl_dma,
+			 tgt->rq_pbl_size, DMA_BIDIRECTIONAL);
+rq_pbl_map_failure:
+	kfree(tgt->rq_pbl);
+rq_pbl_alloc_failure:
+	dma_unmap_single(&hba->pcidev->dev, tgt->rq_dma, tgt->rq_mem_size,
+			 DMA_BIDIRECTIONAL);
+rq_map_failure:
+	kfree(tgt->rq);
+rq_alloc_failure:
+	dma_unmap_single(&hba->pcidev->dev, tgt->cq_dma, tgt->cq_mem_size,
+			 DMA_BIDIRECTIONAL);
+cq_map_failure:
+	kfree(tgt->cq);
+cq_alloc_failure:
+	dma_unmap_single(&hba->pcidev->dev, tgt->sq_dma, tgt->sq_mem_size,
+			 DMA_BIDIRECTIONAL);
+sq_map_failure:
+	kfree(tgt->sq);
+sq_alloc_failure:
 	return -ENOMEM;
 }
 
@@ -839,54 +934,63 @@  static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
 
 	/* Free LCQ */
 	if (tgt->lcq) {
-		dma_free_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
-				    tgt->lcq, tgt->lcq_dma);
+		dma_unmap_single(&hba->pcidev->dev, tgt->lcq_dma,
+				 tgt->lcq_mem_size, DMA_BIDIRECTIONAL);
+		kfree(tgt->lcq);
 		tgt->lcq = NULL;
 	}
 	/* Free connDB */
 	if (tgt->conn_db) {
-		dma_free_coherent(&hba->pcidev->dev, tgt->conn_db_mem_size,
-				    tgt->conn_db, tgt->conn_db_dma);
+		dma_unmap_single(&hba->pcidev->dev, tgt->conn_db_dma,
+				 tgt->conn_db_mem_size, DMA_BIDIRECTIONAL);
+		kfree(tgt->conn_db);
 		tgt->conn_db = NULL;
 	}
 	/* Free confq  and confq pbl */
 	if (tgt->confq_pbl) {
-		dma_free_coherent(&hba->pcidev->dev, tgt->confq_pbl_size,
-				    tgt->confq_pbl, tgt->confq_pbl_dma);
+		dma_unmap_single(&hba->pcidev->dev, tgt->confq_pbl_dma,
+				 tgt->confq_pbl_size, DMA_BIDIRECTIONAL);
+		kfree(tgt->confq_pbl);
 		tgt->confq_pbl = NULL;
 	}
 	if (tgt->confq) {
-		dma_free_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
-				    tgt->confq, tgt->confq_dma);
+		dma_unmap_single(&hba->pcidev->dev, tgt->confq_dma,
+				 tgt->confq_mem_size, DMA_BIDIRECTIONAL);
+		kfree(tgt->confq);
 		tgt->confq = NULL;
 	}
 	/* Free XFERQ */
 	if (tgt->xferq) {
-		dma_free_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
-				    tgt->xferq, tgt->xferq_dma);
+		dma_unmap_single(&hba->pcidev->dev, tgt->xferq_dma,
+				 tgt->xferq_mem_size, DMA_BIDIRECTIONAL);
+		kfree(tgt->xferq);
 		tgt->xferq = NULL;
 	}
 	/* Free RQ PBL and RQ */
 	if (tgt->rq_pbl) {
-		dma_free_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
-				    tgt->rq_pbl, tgt->rq_pbl_dma);
+		dma_unmap_single(&hba->pcidev->dev, tgt->rq_pbl_dma,
+				 tgt->rq_pbl_size, DMA_BIDIRECTIONAL);
+		kfree(tgt->rq_pbl);
 		tgt->rq_pbl = NULL;
 	}
 	if (tgt->rq) {
-		dma_free_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
-				    tgt->rq, tgt->rq_dma);
+		dma_unmap_single(&hba->pcidev->dev, tgt->rq_dma,
+				 tgt->rq_mem_size, DMA_BIDIRECTIONAL);
+		kfree(tgt->rq);
 		tgt->rq = NULL;
 	}
 	/* Free CQ */
 	if (tgt->cq) {
-		dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
-				    tgt->cq, tgt->cq_dma);
+		dma_unmap_single(&hba->pcidev->dev, tgt->cq_dma,
+				 tgt->cq_mem_size, DMA_BIDIRECTIONAL);
+		kfree(tgt->cq);
 		tgt->cq = NULL;
 	}
 	/* Free SQ */
 	if (tgt->sq) {
-		dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
-				    tgt->sq, tgt->sq_dma);
+		dma_unmap_single(&hba->pcidev->dev, tgt->sq_dma,
+				 tgt->sq_mem_size, DMA_BIDIRECTIONAL);
+		kfree(tgt->sq);
 		tgt->sq = NULL;
 	}
 	spin_unlock_bh(&tgt->cq_lock);