@@ -1297,22 +1297,30 @@ static void ufs_qcom_exit(struct ufs_hba *hba)
}
static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
- u32 clk_cycles)
+ u32 cycles_in_1us)
{
- int err;
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
u32 core_clk_ctrl_reg;
+ int ret;
- if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK)
- return -EINVAL;
-
- err = ufshcd_dme_get(hba,
+ ret = ufshcd_dme_get(hba,
UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
&core_clk_ctrl_reg);
- if (err)
- return err;
+ if (ret)
+ return ret;
- core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
- core_clk_ctrl_reg |= clk_cycles;
+ /* Bit mask is different for UFS host controller V4.0.0 onwards */
+ if (host->hw_ver.major >= 4) {
+ if (!FIELD_FIT(CLK_1US_CYCLES_MASK_V4, cycles_in_1us))
+ return -ERANGE;
+ core_clk_ctrl_reg &= ~CLK_1US_CYCLES_MASK_V4;
+ core_clk_ctrl_reg |= FIELD_PREP(CLK_1US_CYCLES_MASK_V4, cycles_in_1us);
+ } else {
+ if (!FIELD_FIT(CLK_1US_CYCLES_MASK, cycles_in_1us))
+ return -ERANGE;
+ core_clk_ctrl_reg &= ~CLK_1US_CYCLES_MASK;
+ core_clk_ctrl_reg |= FIELD_PREP(CLK_1US_CYCLES_MASK, cycles_in_1us);
+ }
/* Clear CORE_CLK_DIV_EN */
core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
@@ -129,8 +129,9 @@ enum {
#define PA_VS_CONFIG_REG1 0x9000
#define DME_VS_CORE_CLK_CTRL 0xD002
/* bit and mask definitions for DME_VS_CORE_CLK_CTRL attribute */
-#define DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT BIT(8)
-#define DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK 0xFF
+#define CLK_1US_CYCLES_MASK_V4 GENMASK(27, 16)
+#define CLK_1US_CYCLES_MASK GENMASK(7, 0)
+#define DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT BIT(8)
static inline void
ufs_qcom_get_controller_revision(struct ufs_hba *hba,