diff mbox

[08/12] vrange: Add LRU handling for victim vrange

Message ID 1367605636-18284-9-git-send-email-john.stultz@linaro.org
State Superseded
Headers show

Commit Message

John Stultz May 3, 2013, 6:27 p.m. UTC
From: Minchan Kim <minchan@kernel.org>

This patch adds LRU data structure for selecting victim vrange
when memory pressure happens.

Basically, VM will select old vrange but if user try to access
purged page recently, the vrange includes the page will be activated
because page fault means one of them which user process will be
killed or recover SIGBUS and continue the work. For latter case,
we have to keep the vrange out of victim selection.

I admit LRU might be not best but I can't imagine better idea
so wanted to make it simple. I think user space can handle better
with enough information so hope they handle it via mempressure
notifier. Otherwise, if you have better idea, welcome!

Signed-off-by: Minchan Kim <minchan@kernel.org>
Signed-off-by: John Stultz <john.stultz@linaro.org>
---
 include/linux/vrange.h       |  3 +++
 include/linux/vrange_types.h |  1 +
 mm/memory.c                  |  1 +
 mm/vrange.c                  | 49 ++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 54 insertions(+)
diff mbox

Patch

diff --git a/include/linux/vrange.h b/include/linux/vrange.h
index 25bcd92..ff301b2 100644
--- a/include/linux/vrange.h
+++ b/include/linux/vrange.h
@@ -43,6 +43,9 @@  bool vrange_address(struct mm_struct *mm, unsigned long start,
 
 extern bool is_purged_vrange(struct mm_struct *mm, unsigned long address);
 
+unsigned int discard_vrange_pages(struct zone *zone, int nr_to_discard);
+void lru_move_vrange_to_head(struct mm_struct *mm, unsigned long address);
+
 #else
 
 static inline void vrange_init(void) {};
diff --git a/include/linux/vrange_types.h b/include/linux/vrange_types.h
index e46942c..d69b608 100644
--- a/include/linux/vrange_types.h
+++ b/include/linux/vrange_types.h
@@ -15,6 +15,7 @@  struct vrange {
 	struct interval_tree_node node;
 	struct vrange_root *owner;
 	bool purged;
+	struct list_head lru; /* protected by lru_lock */
 };
 #endif
 
diff --git a/mm/memory.c b/mm/memory.c
index 010fc42..b22fa63 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3719,6 +3719,7 @@  anon:
 
 		if (unlikely(pte_vrange(entry))) {
 			if (!is_purged_vrange(mm, address)) {
+				lru_move_vrange_to_head(mm, address);
 				/* zap pte */
 				ptl = pte_lockptr(mm, pmd);
 				spin_lock(ptl);
diff --git a/mm/vrange.c b/mm/vrange.c
index 1fce20e..8e66c41 100644
--- a/mm/vrange.c
+++ b/mm/vrange.c
@@ -15,8 +15,53 @@ 
 #include <linux/swapops.h>
 #include <linux/mmu_notifier.h>
 
+static LIST_HEAD(lru_vrange);
+static DEFINE_SPINLOCK(lru_lock);
+
 static struct kmem_cache *vrange_cachep;
 
+
+
+void lru_add_vrange(struct vrange *vrange)
+{
+	spin_lock(&lru_lock);
+	WARN_ON(!list_empty(&vrange->lru));
+	list_add(&vrange->lru, &lru_vrange);
+	spin_unlock(&lru_lock);
+}
+
+void lru_remove_vrange(struct vrange *vrange)
+{
+	spin_lock(&lru_lock);
+	if (!list_empty(&vrange->lru))
+		list_del_init(&vrange->lru);
+	spin_unlock(&lru_lock);
+}
+
+void lru_move_vrange_to_head(struct mm_struct *mm, unsigned long address)
+{
+	struct vrange_root *vroot = &mm->vroot;
+	struct interval_tree_node *node;
+	struct vrange *vrange;
+
+	vrange_lock(vroot);
+	node = interval_tree_iter_first(&vroot->v_rb, address,
+						address + PAGE_SIZE - 1);
+	if (node) {
+		vrange = container_of(node, struct vrange, node);
+		spin_lock(&lru_lock);
+		/*
+		 * Race happens with get_victim_vrange so in such case,
+		 * we can't move but it can put the vrange into head
+		 * after finishing purging work so no problem.
+		 */
+		if (!list_empty(&vrange->lru))
+			list_move(&vrange->lru, &lru_vrange);
+		spin_unlock(&lru_lock);
+	}
+	vrange_unlock(vroot);
+}
+
 void __init vrange_init(void)
 {
 	vrange_cachep = KMEM_CACHE(vrange, SLAB_PANIC);
@@ -28,24 +73,28 @@  static struct vrange *__vrange_alloc(void)
 	if (!vrange)
 		return vrange;
 	vrange->owner = NULL;
+	INIT_LIST_HEAD(&vrange->lru);
 	return vrange;
 }
 
 static void __vrange_free(struct vrange *range)
 {
 	WARN_ON(range->owner);
+	lru_remove_vrange(range);
 	kmem_cache_free(vrange_cachep, range);
 }
 
 static void __vrange_add(struct vrange *range, struct vrange_root *vroot)
 {
 	range->owner = vroot;
+	lru_add_vrange(range);
 	interval_tree_insert(&range->node, &vroot->v_rb);
 }
 
 static void __vrange_remove(struct vrange *range)
 {
 	interval_tree_remove(&range->node, &range->owner->v_rb);
+	lru_remove_vrange(range);
 	range->owner = NULL;
 }