new file mode 100644
@@ -0,0 +1,45 @@
+#ifndef _LINUX_VRANGE_H
+#define _LINUX_VRANGE_H
+
+#include <linux/vrange_types.h>
+#include <linux/mm.h>
+
+#define vrange_entry(ptr) \
+ container_of(ptr, struct vrange, node.rb)
+
+#ifdef CONFIG_MMU
+
+static inline void vrange_root_init(struct vrange_root *vroot, int type)
+{
+ vroot->type = type;
+ vroot->v_rb = RB_ROOT;
+ mutex_init(&vroot->v_lock);
+}
+
+
+static inline void vrange_lock(struct vrange_root *vroot)
+{
+ mutex_lock(&vroot->v_lock);
+}
+
+static inline void vrange_unlock(struct vrange_root *vroot)
+{
+ mutex_unlock(&vroot->v_lock);
+}
+
+static inline int vrange_type(struct vrange *vrange)
+{
+ return vrange->owner->type;
+}
+
+void vrange_init(void);
+extern void vrange_root_cleanup(struct vrange_root *vroot);
+
+#else
+
+static inline void vrange_init(void) {};
+static inline void vrange_root_init(struct vrange_root *vroot, int type) {};
+static inline void vrange_root_cleanup(struct vrange_root *vroot) {};
+
+#endif
+#endif /* _LINIUX_VRANGE_H */
new file mode 100644
@@ -0,0 +1,20 @@
+#ifndef _LINUX_VRANGE_TYPES_H
+#define _LINUX_VRANGE_TYPES_H
+
+#include <linux/mutex.h>
+#include <linux/interval_tree.h>
+
+struct vrange_root {
+ struct rb_root v_rb; /* vrange rb tree */
+ struct mutex v_lock; /* Protect v_rb */
+ enum {VRANGE_MM, VRANGE_FILE} type; /* range root type */
+};
+
+
+struct vrange {
+ struct interval_tree_node node;
+ struct vrange_root *owner;
+ bool purged;
+};
+#endif
+
@@ -72,6 +72,7 @@
#include <linux/ptrace.h>
#include <linux/blkdev.h>
#include <linux/elevator.h>
+#include <linux/vrange.h>
#include <asm/io.h>
#include <asm/bugs.h>
@@ -605,6 +606,7 @@ asmlinkage void __init start_kernel(void)
calibrate_delay();
pidmap_init();
anon_vma_init();
+ vrange_init();
#ifdef CONFIG_X86
if (efi_enabled(EFI_RUNTIME_SERVICES))
efi_enter_virtual_mode();
@@ -5,7 +5,7 @@
mmu-y := nommu.o
mmu-$(CONFIG_MMU) := fremap.o highmem.o madvise.o memory.o mincore.o \
mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
- vmalloc.o pagewalk.o pgtable-generic.o
+ vmalloc.o pagewalk.o pgtable-generic.o vrange.o
ifdef CONFIG_CROSS_MEMORY_ATTACH
mmu-$(CONFIG_MMU) += process_vm_access.o
new file mode 100644
@@ -0,0 +1,165 @@
+/*
+ * mm/vrange.c
+ */
+
+#include <linux/vrange.h>
+#include <linux/slab.h>
+
+static struct kmem_cache *vrange_cachep;
+
+void __init vrange_init(void)
+{
+ vrange_cachep = KMEM_CACHE(vrange, SLAB_PANIC);
+}
+
+static struct vrange *__vrange_alloc(void)
+{
+ struct vrange *vrange = kmem_cache_alloc(vrange_cachep, GFP_KERNEL);
+ if (!vrange)
+ return vrange;
+ vrange->owner = NULL;
+ return vrange;
+}
+
+static void __vrange_free(struct vrange *range)
+{
+ WARN_ON(range->owner);
+ kmem_cache_free(vrange_cachep, range);
+}
+
+static void __vrange_add(struct vrange *range, struct vrange_root *vroot)
+{
+ range->owner = vroot;
+ interval_tree_insert(&range->node, &vroot->v_rb);
+}
+
+static void __vrange_remove(struct vrange *range)
+{
+ interval_tree_remove(&range->node, &range->owner->v_rb);
+ range->owner = NULL;
+}
+
+static inline void __vrange_set(struct vrange *range,
+ unsigned long start_idx, unsigned long end_idx,
+ bool purged)
+{
+ range->node.start = start_idx;
+ range->node.last = end_idx;
+ range->purged = purged;
+}
+
+static inline void __vrange_resize(struct vrange *range,
+ unsigned long start, unsigned long end)
+{
+ struct vrange_root *vroot = range->owner;
+ bool purged = range->purged;
+
+ __vrange_remove(range);
+ __vrange_set(range, start, end, purged);
+ __vrange_add(range, vroot);
+}
+
+static int vrange_add(struct vrange_root *vroot,
+ unsigned long start, unsigned long end)
+{
+ struct vrange *new_range, *range;
+ struct interval_tree_node *node, *next;
+ int purged = 0;
+
+ new_range = __vrange_alloc();
+ if (!new_range)
+ return -ENOMEM;
+
+ vrange_lock(vroot);
+ node = interval_tree_iter_first(&vroot->v_rb, start, end);
+ while (node) {
+ next = interval_tree_iter_next(node, start, end);
+
+ range = container_of(node, struct vrange, node);
+ if (node->start < start && node->last > end) {
+ __vrange_free(new_range);
+ goto out;
+ }
+
+ start = min_t(unsigned long, start, node->start);
+ end = max_t(unsigned long, end, node->last);
+
+ purged |= range->purged;
+ __vrange_remove(range);
+ __vrange_free(range);
+
+ node = next;
+ }
+ __vrange_set(new_range, start, end, purged);
+ __vrange_add(new_range, vroot);
+out:
+ vrange_unlock(vroot);
+ return 0;
+}
+
+static int vrange_remove(struct vrange_root *vroot,
+ unsigned long start, unsigned long end,
+ int *purged)
+{
+ struct vrange *new_range, *range;
+ struct interval_tree_node *node, *next;
+ bool used_new = false;
+
+ if (!purged)
+ return -EINVAL;
+ *purged = 0;
+
+ new_range = __vrange_alloc();
+ if (!new_range)
+ return -ENOMEM;
+
+ vrange_lock(vroot);
+ node = interval_tree_iter_first(&vroot->v_rb, start, end);
+ while (node) {
+ next = interval_tree_iter_next(node, start, end);
+
+ range = container_of(node, struct vrange, node);
+ *purged |= range->purged;
+
+ if (start <= node->start && end >= node->last) {
+ __vrange_remove(range);
+ __vrange_free(range);
+ } else if (node->start >= start) {
+ __vrange_resize(range, end, node->last);
+ } else if (node->last <= end) {
+ __vrange_resize(range, node->start, start);
+ } else {
+ used_new = true;
+ __vrange_set(new_range, end, node->last, range->purged);
+ __vrange_resize(range, node->start, start);
+ __vrange_add(new_range, vroot);
+ break;
+ }
+
+ node = next;
+ }
+ vrange_unlock(vroot);
+
+ if (!used_new)
+ __vrange_free(new_range);
+
+ return 0;
+}
+
+
+void vrange_root_cleanup(struct vrange_root *vroot)
+{
+ struct vrange *range;
+ struct rb_node *next;
+
+ vrange_lock(vroot);
+ next = rb_first(&vroot->v_rb);
+ while (next) {
+ range = vrange_entry(next);
+ next = rb_next(next);
+ __vrange_remove(range);
+ __vrange_free(range);
+ }
+ vrange_unlock(vroot);
+}
+