@@ -16,6 +16,8 @@
#include "ufshpb.h"
#include "../sd.h"
+#define ACTIVATION_THRESHOLD 8 /* 8 IOs */
+
/* memory management */
static struct kmem_cache *ufshpb_mctx_cache;
static mempool_t *ufshpb_mctx_pool;
@@ -26,6 +28,9 @@ static int tot_active_srgn_pages;
static struct workqueue_struct *ufshpb_wq;
+static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
+ int srgn_idx);
+
bool ufshpb_is_allowed(struct ufs_hba *hba)
{
return !(hba->ufshpb_dev.hpb_disabled);
@@ -148,7 +153,7 @@ static void ufshpb_iterate_rgn(struct ufshpb_lu *hpb, int rgn_idx, int srgn_idx,
int srgn_offset, int cnt, bool set_dirty)
{
struct ufshpb_region *rgn;
- struct ufshpb_subregion *srgn;
+ struct ufshpb_subregion *srgn, *prev_srgn = NULL;
int set_bit_len;
int bitmap_len;
unsigned long flags;
@@ -167,15 +172,39 @@ static void ufshpb_iterate_rgn(struct ufshpb_lu *hpb, int rgn_idx, int srgn_idx,
else
set_bit_len = cnt;
- if (set_dirty)
- set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
-
spin_lock_irqsave(&hpb->rgn_state_lock, flags);
if (set_dirty && rgn->rgn_state != HPB_RGN_INACTIVE &&
srgn->srgn_state == HPB_SRGN_VALID)
bitmap_set(srgn->mctx->ppn_dirty, srgn_offset, set_bit_len);
spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+ if (hpb->is_hcm && prev_srgn != srgn) {
+ bool activate = false;
+
+ spin_lock(&rgn->rgn_lock);
+ if (set_dirty) {
+ rgn->reads -= srgn->reads;
+ srgn->reads = 0;
+ set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
+ } else {
+ srgn->reads++;
+ rgn->reads++;
+ if (srgn->reads == ACTIVATION_THRESHOLD)
+ activate = true;
+ }
+ spin_unlock(&rgn->rgn_lock);
+
+ if (activate) {
+ spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+ ufshpb_update_active_info(hpb, rgn_idx, srgn_idx);
+ spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+ dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
+ "activate region %d-%d\n", rgn_idx, srgn_idx);
+ }
+
+ prev_srgn = srgn;
+ }
+
srgn_offset = 0;
if (++srgn_idx == hpb->srgns_per_rgn) {
srgn_idx = 0;
@@ -602,6 +631,19 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
if (!ufshpb_is_supported_chunk(hpb, transfer_len))
return 0;
+ if (hpb->is_hcm) {
+ /*
+ * in host control mode, reads are the main source for
+ * activation trials.
+ */
+ ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
+ transfer_len, false);
+
+ /* keep those counters normalized */
+ if (rgn->reads > hpb->entries_per_srgn)
+ schedule_work(&hpb->ufshpb_normalization_work);
+ }
+
spin_lock_irqsave(&hpb->rgn_state_lock, flags);
if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset,
transfer_len)) {
@@ -753,6 +795,8 @@ static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
if (list_empty(&srgn->list_act_srgn))
list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
+
+ hpb->stats.rb_active_cnt++;
}
static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx)
@@ -768,6 +812,8 @@ static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx)
if (list_empty(&rgn->list_inact_rgn))
list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn);
+
+ hpb->stats.rb_inactive_cnt++;
}
static void ufshpb_activate_subregion(struct ufshpb_lu *hpb,
@@ -1088,6 +1134,7 @@ static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
rgn->rgn_idx);
goto out;
}
+
if (!list_empty(&rgn->list_lru_rgn)) {
if (ufshpb_check_srgns_issue_state(hpb, rgn)) {
ret = -EBUSY;
@@ -1282,7 +1329,6 @@ static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb,
if (srgn->srgn_state == HPB_SRGN_VALID)
srgn->srgn_state = HPB_SRGN_INVALID;
spin_unlock(&hpb->rgn_state_lock);
- hpb->stats.rb_active_cnt++;
}
if (hpb->is_hcm) {
@@ -1314,7 +1360,6 @@ static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb,
}
spin_unlock(&hpb->rgn_state_lock);
- hpb->stats.rb_inactive_cnt++;
}
out:
@@ -1513,6 +1558,36 @@ static void ufshpb_run_inactive_region_list(struct ufshpb_lu *hpb)
spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
}
+static void ufshpb_normalization_work_handler(struct work_struct *work)
+{
+ struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
+ ufshpb_normalization_work);
+ int rgn_idx;
+
+ for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
+ struct ufshpb_region *rgn = hpb->rgn_tbl + rgn_idx;
+ int srgn_idx;
+
+ spin_lock(&rgn->rgn_lock);
+ rgn->reads = 0;
+ for (srgn_idx = 0; srgn_idx < hpb->srgns_per_rgn; srgn_idx++) {
+ struct ufshpb_subregion *srgn = rgn->srgn_tbl + srgn_idx;
+
+ srgn->reads >>= 1;
+ rgn->reads += srgn->reads;
+ }
+ spin_unlock(&rgn->rgn_lock);
+
+ if (rgn->rgn_state != HPB_RGN_ACTIVE || rgn->reads)
+ continue;
+
+ /* if region is active but has no reads - inactivate it */
+ spin_lock(&hpb->rsp_list_lock);
+ ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
+ spin_unlock(&hpb->rsp_list_lock);
+ }
+}
+
static void ufshpb_map_work_handler(struct work_struct *work)
{
struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, map_work);
@@ -1671,6 +1746,8 @@ static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb)
rgn = rgn_table + rgn_idx;
rgn->rgn_idx = rgn_idx;
+ spin_lock_init(&rgn->rgn_lock);
+
INIT_LIST_HEAD(&rgn->list_inact_rgn);
INIT_LIST_HEAD(&rgn->list_lru_rgn);
@@ -1910,6 +1987,9 @@ static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb)
INIT_LIST_HEAD(&hpb->list_hpb_lu);
INIT_WORK(&hpb->map_work, ufshpb_map_work_handler);
+ if (hpb->is_hcm)
+ INIT_WORK(&hpb->ufshpb_normalization_work,
+ ufshpb_normalization_work_handler);
hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache",
sizeof(struct ufshpb_req), 0, 0, NULL);
@@ -2009,6 +2089,8 @@ static void ufshpb_discard_rsp_lists(struct ufshpb_lu *hpb)
static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb)
{
+ if (hpb->is_hcm)
+ cancel_work_sync(&hpb->ufshpb_normalization_work);
cancel_work_sync(&hpb->map_work);
}
@@ -106,6 +106,10 @@ struct ufshpb_subregion {
int rgn_idx;
int srgn_idx;
bool is_last;
+
+ /* subregion reads - for host mode */
+ unsigned int reads;
+
/* below information is used by rsp_list */
struct list_head list_act_srgn;
};
@@ -123,6 +127,10 @@ struct ufshpb_region {
struct list_head list_lru_rgn;
unsigned long rgn_flags;
#define RGN_FLAG_DIRTY 0
+
+ /* region reads - for host mode */
+ spinlock_t rgn_lock;
+ unsigned int reads;
};
#define for_each_sub_region(rgn, i, srgn) \
@@ -212,6 +220,7 @@ struct ufshpb_lu {
/* for selecting victim */
struct victim_select_info lru_info;
+ struct work_struct ufshpb_normalization_work;
/* pinned region information */
u32 lu_pinned_start;