@@ -771,13 +771,12 @@ static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
iscsi_set_param(cls_conn, param, buf, buflen);
break;
case ISCSI_PARAM_DATADGST_EN:
- iscsi_set_param(cls_conn, param, buf, buflen);
-
mutex_lock(&tcp_sw_conn->sock_lock);
if (!tcp_sw_conn->sock) {
mutex_unlock(&tcp_sw_conn->sock_lock);
return -ENOTCONN;
}
+ iscsi_set_param(cls_conn, param, buf, buflen);
tcp_sw_conn->sendpage = conn->datadgst_en ?
sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage;
mutex_unlock(&tcp_sw_conn->sock_lock);
@@ -2526,7 +2526,7 @@ static void mpi3mr_watchdog_work(struct work_struct *work)
mrioc->unrecoverable = 1;
goto schedule_work;
case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS:
- return;
+ goto schedule_work;
case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET:
reset_reason = MPI3MR_RESET_FROM_CIACTIV_FAULT;
break;
@@ -3617,6 +3617,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
probe_failed:
qla_enode_stop(base_vha);
qla_edb_stop(base_vha);
+ vfree(base_vha->scan.l);
if (base_vha->gnl.l) {
dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
base_vha->gnl.l, base_vha->gnl.ldma);
@@ -1409,13 +1409,6 @@ static int ufshcd_devfreq_target(struct device *dev,
struct ufs_clk_info *clki;
unsigned long irq_flags;
- /*
- * Skip devfreq if UFS initialization is not finished.
- * Otherwise ufs could be in a inconsistent state.
- */
- if (!smp_load_acquire(&hba->logical_unit_scan_finished))
- return 0;
-
if (!ufshcd_is_clkscaling_supported(hba))
return -EINVAL;
@@ -8399,6 +8392,22 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
if (ret)
goto out;
+ /* Initialize devfreq after UFS device is detected */
+ if (ufshcd_is_clkscaling_supported(hba)) {
+ memcpy(&hba->clk_scaling.saved_pwr_info.info,
+ &hba->pwr_info,
+ sizeof(struct ufs_pa_layer_attr));
+ hba->clk_scaling.saved_pwr_info.is_valid = true;
+ hba->clk_scaling.is_allowed = true;
+
+ ret = ufshcd_devfreq_init(hba);
+ if (ret)
+ goto out;
+
+ hba->clk_scaling.is_enabled = true;
+ ufshcd_init_clk_scaling_sysfs(hba);
+ }
+
ufs_bsg_probe(hba);
ufshpb_init(hba);
scsi_scan_host(hba->host);
@@ -8670,12 +8679,6 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
if (ret) {
pm_runtime_put_sync(hba->dev);
ufshcd_hba_exit(hba);
- } else {
- /*
- * Make sure that when reader code sees UFS initialization has finished,
- * all initialization steps have really been executed.
- */
- smp_store_release(&hba->logical_unit_scan_finished, true);
}
}
@@ -10316,30 +10319,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
*/
ufshcd_set_ufs_dev_active(hba);
- /* Initialize devfreq */
- if (ufshcd_is_clkscaling_supported(hba)) {
- memcpy(&hba->clk_scaling.saved_pwr_info.info,
- &hba->pwr_info,
- sizeof(struct ufs_pa_layer_attr));
- hba->clk_scaling.saved_pwr_info.is_valid = true;
- hba->clk_scaling.is_allowed = true;
-
- err = ufshcd_devfreq_init(hba);
- if (err)
- goto rpm_put_sync;
-
- hba->clk_scaling.is_enabled = true;
- ufshcd_init_clk_scaling_sysfs(hba);
- }
-
async_schedule(ufshcd_async_scan, hba);
ufs_sysfs_add_nodes(hba->dev);
device_enable_async_suspend(dev);
return 0;
-rpm_put_sync:
- pm_runtime_put_sync(dev);
free_tmf_queue:
blk_mq_destroy_queue(hba->tmf_queue);
blk_put_queue(hba->tmf_queue);
@@ -979,7 +979,6 @@ struct ufs_hba {
struct completion *uic_async_done;
enum ufshcd_state ufshcd_state;
- bool logical_unit_scan_finished;
u32 eh_flags;
u32 intr_mask;
u16 ee_ctrl_mask;