@@ -18,6 +18,9 @@
#define ACTIVATION_THRESHOLD 8 /* 8 IOs */
#define EVICTION_THRESHOLD (ACTIVATION_THRESHOLD << 5) /* 256 IOs */
+#define READ_TO_MS 1000
+#define READ_TO_EXPIRIES 100
+#define POLLING_INTERVAL_MS 200
/* memory management */
static struct kmem_cache *ufshpb_mctx_cache;
@@ -1021,12 +1024,63 @@ static int ufshpb_check_srgns_issue_state(struct ufshpb_lu *hpb,
return 0;
}
+static void ufshpb_read_to_handler(struct work_struct *work)
+{
+ struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
+ ufshpb_read_to_work.work);
+ struct victim_select_info *lru_info = &hpb->lru_info;
+ struct ufshpb_region *rgn, *next_rgn;
+ unsigned long flags;
+ LIST_HEAD(expired_list);
+
+ if (test_and_set_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits))
+ return;
+
+ spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+
+ list_for_each_entry_safe(rgn, next_rgn, &lru_info->lh_lru_rgn,
+ list_lru_rgn) {
+ bool timedout = ktime_after(ktime_get(), rgn->read_timeout);
+
+ if (timedout) {
+ rgn->read_timeout_expiries--;
+ if (is_rgn_dirty(rgn) ||
+ rgn->read_timeout_expiries == 0)
+ list_add(&rgn->list_expired_rgn, &expired_list);
+ else
+ rgn->read_timeout = ktime_add_ms(ktime_get(),
+ READ_TO_MS);
+ }
+ }
+
+ spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+
+ list_for_each_entry_safe(rgn, next_rgn, &expired_list,
+ list_expired_rgn) {
+ list_del_init(&rgn->list_expired_rgn);
+ spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+ ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
+ spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+ }
+
+ ufshpb_kick_map_work(hpb);
+
+ clear_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits);
+
+ schedule_delayed_work(&hpb->ufshpb_read_to_work,
+ msecs_to_jiffies(POLLING_INTERVAL_MS));
+}
+
static void ufshpb_add_lru_info(struct victim_select_info *lru_info,
struct ufshpb_region *rgn)
{
rgn->rgn_state = HPB_RGN_ACTIVE;
list_add_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
atomic_inc(&lru_info->active_cnt);
+ if (rgn->hpb->is_hcm) {
+ rgn->read_timeout = ktime_add_ms(ktime_get(), READ_TO_MS);
+ rgn->read_timeout_expiries = READ_TO_EXPIRIES;
+ }
}
static void ufshpb_hit_lru_info(struct victim_select_info *lru_info,
@@ -1803,6 +1857,7 @@ static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb)
INIT_LIST_HEAD(&rgn->list_inact_rgn);
INIT_LIST_HEAD(&rgn->list_lru_rgn);
+ INIT_LIST_HEAD(&rgn->list_expired_rgn);
if (rgn_idx == hpb->rgns_per_lu - 1) {
srgn_cnt = ((hpb->srgns_per_lu - 1) %
@@ -1824,6 +1879,7 @@ static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb)
}
rgn->rgn_flags = 0;
+ rgn->hpb = hpb;
}
return 0;
@@ -2047,9 +2103,12 @@ static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb)
INIT_LIST_HEAD(&hpb->list_hpb_lu);
INIT_WORK(&hpb->map_work, ufshpb_map_work_handler);
- if (hpb->is_hcm)
+ if (hpb->is_hcm) {
INIT_WORK(&hpb->ufshpb_normalization_work,
ufshpb_normalization_work_handler);
+ INIT_DELAYED_WORK(&hpb->ufshpb_read_to_work,
+ ufshpb_read_to_handler);
+ }
hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache",
sizeof(struct ufshpb_req), 0, 0, NULL);
@@ -2083,6 +2142,10 @@ static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb)
ufshpb_stat_init(hpb);
ufshpb_param_init(hpb);
+ if (hpb->is_hcm)
+ schedule_delayed_work(&hpb->ufshpb_read_to_work,
+ msecs_to_jiffies(POLLING_INTERVAL_MS));
+
return 0;
release_pre_req_mempool:
@@ -2149,9 +2212,10 @@ static void ufshpb_discard_rsp_lists(struct ufshpb_lu *hpb)
static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb)
{
- if (hpb->is_hcm)
+ if (hpb->is_hcm) {
+ cancel_delayed_work_sync(&hpb->ufshpb_read_to_work);
cancel_work_sync(&hpb->ufshpb_normalization_work);
-
+ }
cancel_work_sync(&hpb->map_work);
}
@@ -2259,6 +2323,10 @@ void ufshpb_resume(struct ufs_hba *hba)
continue;
ufshpb_set_state(hpb, HPB_PRESENT);
ufshpb_kick_map_work(hpb);
+ if (hpb->is_hcm)
+ schedule_delayed_work(&hpb->ufshpb_read_to_work,
+ msecs_to_jiffies(POLLING_INTERVAL_MS));
+
}
}
@@ -111,6 +111,7 @@ struct ufshpb_subregion {
};
struct ufshpb_region {
+ struct ufshpb_lu *hpb;
struct ufshpb_subregion *srgn_tbl;
enum HPB_RGN_STATE rgn_state;
int rgn_idx;
@@ -128,6 +129,10 @@ struct ufshpb_region {
/* region reads - for host mode */
spinlock_t rgn_lock;
unsigned int reads;
+ /* region "cold" timer - for host mode */
+ ktime_t read_timeout;
+ unsigned int read_timeout_expiries;
+ struct list_head list_expired_rgn;
};
#define for_each_sub_region(rgn, i, srgn) \
@@ -219,6 +224,9 @@ struct ufshpb_lu {
/* for selecting victim */
struct victim_select_info lru_info;
struct work_struct ufshpb_normalization_work;
+ struct delayed_work ufshpb_read_to_work;
+ unsigned long work_data_bits;
+#define TIMEOUT_WORK_RUNNING 0
/* pinned region information */
u32 lu_pinned_start;
In order not to hang on to “cold” regions, we shall inactivate a region that has no READ access for a predefined amount of time - READ_TO_MS. For that purpose we shall monitor the active regions list, polling it on every POLLING_INTERVAL_MS. On timeout expiry we shall add the region to the "to-be-inactivated" list, unless it is clean and did not exhaust its READ_TO_EXPIRIES - another parameter. All this does not apply to pinned regions. Signed-off-by: Avri Altman <avri.altman@wdc.com> --- drivers/scsi/ufs/ufshpb.c | 74 +++++++++++++++++++++++++++++++++++++-- drivers/scsi/ufs/ufshpb.h | 8 +++++ 2 files changed, 79 insertions(+), 3 deletions(-)