@@ -144,13 +144,14 @@ static bool ufshpb_is_hpb_rsp_valid(struct ufs_hba *hba,
return true;
}
-static void ufshpb_set_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx,
- int srgn_idx, int srgn_offset, int cnt)
+static void ufshpb_iterate_rgn(struct ufshpb_lu *hpb, int rgn_idx, int srgn_idx,
+ int srgn_offset, int cnt, bool set_dirty)
{
struct ufshpb_region *rgn;
struct ufshpb_subregion *srgn;
int set_bit_len;
int bitmap_len;
+ unsigned long flags;
next_srgn:
rgn = hpb->rgn_tbl + rgn_idx;
@@ -166,11 +167,14 @@ static void ufshpb_set_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx,
else
set_bit_len = cnt;
- set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
+ if (set_dirty)
+ set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
- if (rgn->rgn_state != HPB_RGN_INACTIVE &&
+ spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+ if (set_dirty && rgn->rgn_state != HPB_RGN_INACTIVE &&
srgn->srgn_state == HPB_SRGN_VALID)
bitmap_set(srgn->mctx->ppn_dirty, srgn_offset, set_bit_len);
+ spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
srgn_offset = 0;
if (++srgn_idx == hpb->srgns_per_rgn) {
@@ -592,10 +596,8 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
/* If command type is WRITE or DISCARD, set bitmap as drity */
if (ufshpb_is_write_or_discard_cmd(cmd)) {
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
- ufshpb_set_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset,
- transfer_len);
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+ ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
+ transfer_len, true);
return 0;
}
Given a transfer length, set_dirty meticulously runs over all the entries, across subregions and regions if needed. Currently its only use is to mark dirty blocks, but soon HCM may profit from it as well, when managing its read counters. Signed-off-by: Avri Altman <avri.altman@wdc.com> --- drivers/scsi/ufs/ufshpb.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-)