@@ -43,6 +43,9 @@ bool vrange_address(struct mm_struct *mm, unsigned long start,
extern bool is_purged_vrange(struct mm_struct *mm, unsigned long address);
+unsigned int discard_vrange_pages(struct zone *zone, int nr_to_discard);
+void lru_move_vrange_to_head(struct mm_struct *mm, unsigned long address);
+
#else
static inline void vrange_init(void) {};
@@ -15,6 +15,7 @@ struct vrange {
struct interval_tree_node node;
struct vrange_root *owner;
bool purged;
+ struct list_head lru; /* protected by lru_lock */
};
#endif
@@ -3719,6 +3719,7 @@ anon:
if (unlikely(pte_vrange(entry))) {
if (!is_purged_vrange(mm, address)) {
+ lru_move_vrange_to_head(mm, address);
/* zap pte */
ptl = pte_lockptr(mm, pmd);
spin_lock(ptl);
@@ -15,8 +15,53 @@
#include <linux/swapops.h>
#include <linux/mmu_notifier.h>
+static LIST_HEAD(lru_vrange);
+static DEFINE_SPINLOCK(lru_lock);
+
static struct kmem_cache *vrange_cachep;
+
+
+void lru_add_vrange(struct vrange *vrange)
+{
+ spin_lock(&lru_lock);
+ WARN_ON(!list_empty(&vrange->lru));
+ list_add(&vrange->lru, &lru_vrange);
+ spin_unlock(&lru_lock);
+}
+
+void lru_remove_vrange(struct vrange *vrange)
+{
+ spin_lock(&lru_lock);
+ if (!list_empty(&vrange->lru))
+ list_del_init(&vrange->lru);
+ spin_unlock(&lru_lock);
+}
+
+void lru_move_vrange_to_head(struct mm_struct *mm, unsigned long address)
+{
+ struct vrange_root *vroot = &mm->vroot;
+ struct interval_tree_node *node;
+ struct vrange *vrange;
+
+ vrange_lock(vroot);
+ node = interval_tree_iter_first(&vroot->v_rb, address,
+ address + PAGE_SIZE - 1);
+ if (node) {
+ vrange = container_of(node, struct vrange, node);
+ spin_lock(&lru_lock);
+ /*
+ * Race happens with get_victim_vrange so in such case,
+ * we can't move but it can put the vrange into head
+ * after finishing purging work so no problem.
+ */
+ if (!list_empty(&vrange->lru))
+ list_move(&vrange->lru, &lru_vrange);
+ spin_unlock(&lru_lock);
+ }
+ vrange_unlock(vroot);
+}
+
void __init vrange_init(void)
{
vrange_cachep = KMEM_CACHE(vrange, SLAB_PANIC);
@@ -28,24 +73,28 @@ static struct vrange *__vrange_alloc(void)
if (!vrange)
return vrange;
vrange->owner = NULL;
+ INIT_LIST_HEAD(&vrange->lru);
return vrange;
}
static void __vrange_free(struct vrange *range)
{
WARN_ON(range->owner);
+ lru_remove_vrange(range);
kmem_cache_free(vrange_cachep, range);
}
static void __vrange_add(struct vrange *range, struct vrange_root *vroot)
{
range->owner = vroot;
+ lru_add_vrange(range);
interval_tree_insert(&range->node, &vroot->v_rb);
}
static void __vrange_remove(struct vrange *range)
{
interval_tree_remove(&range->node, &range->owner->v_rb);
+ lru_remove_vrange(range);
range->owner = NULL;
}