@@ -21,6 +21,31 @@ static void mmc_hsq_retry_handler(struct work_struct *work)
mmc->ops->request(mmc, hsq->mrq);
}
+static void mmc_hsq_modify_threshold(struct mmc_hsq *hsq)
+{
+ struct mmc_host *mmc = hsq->mmc;
+ struct mmc_request *mrq;
+ struct hsq_slot *slot;
+ int need_change = 0;
+ int tag;
+
+ for (tag = 0; tag < HSQ_NUM_SLOTS; tag++) {
+ slot = &hsq->slot[tag];
+ mrq = slot->mrq;
+ if (mrq && mrq->data &&
+ (mrq->data->blksz * mrq->data->blocks == 4096) &&
+ (mrq->data->flags & MMC_DATA_WRITE))
+ need_change++;
+ else
+ break;
+ }
+
+ if (need_change > 1)
+ mmc->hsq_depth = HSQ_PERFORMANCE_DEPTH;
+ else
+ mmc->hsq_depth = HSQ_NORMAL_DEPTH;
+}
+
static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
{
struct mmc_host *mmc = hsq->mmc;
@@ -42,6 +67,8 @@ static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
return;
}
+ mmc_hsq_modify_threshold(hsq);
+
slot = &hsq->slot[hsq->next_tag];
hsq->mrq = slot->mrq;
hsq->qcnt--;
@@ -10,6 +10,11 @@
* flight to avoid a long latency.
*/
#define HSQ_NORMAL_DEPTH 2
+/*
+ * For 4k random writes, we allow hsq_depth to increase to 5
+ * for better performance.
+ */
+#define HSQ_PERFORMANCE_DEPTH 5
struct hsq_slot {
struct mmc_request *mrq;
Increasing hsq_depth improves random write performance. Signed-off-by: Wenchao Chen <wenchao.chen@unisoc.com> --- drivers/mmc/host/mmc_hsq.c | 27 +++++++++++++++++++++++++++ drivers/mmc/host/mmc_hsq.h | 5 +++++ 2 files changed, 32 insertions(+)