@@ -40,6 +40,15 @@ static inline struct mm_struct *vrange_get_owner_mm(struct vrange *vrange)
return container_of(vrange->owner, struct mm_struct, vroot);
}
+static inline
+struct address_space *vrange_get_owner_mapping(struct vrange *vrange)
+{
+ if (vrange_type(vrange) != VRANGE_FILE)
+ return NULL;
+ return container_of(vrange->owner, struct address_space, vroot);
+}
+
+
void vrange_init(void);
extern void vrange_root_cleanup(struct vrange_root *vroot);
extern int vrange_root_duplicate(struct vrange_root *orig,
@@ -783,8 +783,9 @@ unsigned int discard_vma_pages(struct zone *zone, struct mm_struct *mm,
return ret;
}
-unsigned int discard_vrange(struct zone *zone, struct vrange *vrange,
- int nr_to_discard)
+static unsigned int discard_anon_vrange(struct zone *zone,
+ struct vrange *vrange,
+ int nr_to_discard)
{
struct mm_struct *mm;
unsigned long start = vrange->node.start;
@@ -825,46 +826,91 @@ out:
return nr_discarded;
}
+static unsigned int discard_file_vrange(struct zone *zone,
+ struct vrange *vrange,
+ int nr_to_discard)
+{
+ struct address_space *mapping;
+ unsigned long start = vrange->node.start;
+ unsigned long end = vrange->node.last;
+ unsigned long count = ((end-start) >> PAGE_CACHE_SHIFT);
+
+ mapping = vrange_get_owner_mapping(vrange);
+
+ truncate_inode_pages_range(mapping, start, end);
+ vrange->purged = true;
+
+ return count;
+}
+
+unsigned int discard_vrange(struct zone *zone, struct vrange *vrange,
+ int nr_to_discard)
+{
+ if (vrange_type(vrange) == VRANGE_MM)
+ return discard_anon_vrange(zone, vrange, nr_to_discard);
+ return discard_file_vrange(zone, vrange, nr_to_discard);
+}
+
+
+/* Take a vrange refcount and depending on the type
+ * the vrange->owner's mm refcount or inode refcount
+ */
+static int hold_victim_vrange(struct vrange *vrange)
+{
+ if (vrange_type(vrange) == VRANGE_MM) {
+ struct mm_struct *mm = vrange_get_owner_mm(vrange);
+
+
+ if (atomic_read(&mm->mm_users) == 0)
+ return -1;
+
+
+ if (!atomic_inc_not_zero(&vrange->refcount))
+ return -1;
+ /*
+ * we need to access mmap_sem further routine so
+ * need to get a refcount of mm.
+ * NOTE: We guarantee mm_count isn't zero in here because
+ * if we found vrange from LRU list, it means we are
+ * before exit_vrange or remove_vrange.
+ */
+ atomic_inc(&mm->mm_count);
+ } else {
+ struct address_space *mapping;
+ mapping = vrange_get_owner_mapping(vrange);
+
+ if (!atomic_inc_not_zero(&vrange->refcount))
+ return -1;
+ __iget(mapping->host);
+ }
+
+ return 0;
+}
+
+
+
/*
- * Get next victim vrange from LRU and hold a vrange refcount
- * and vrange->mm's refcount.
+ * Get next victim vrange from LRU and hold needed refcounts.
*/
struct vrange *get_victim_vrange(void)
{
- struct mm_struct *mm;
struct vrange *vrange = NULL;
struct list_head *cur, *tmp;
spin_lock(&lru_lock);
list_for_each_prev_safe(cur, tmp, &lru_vrange) {
vrange = list_entry(cur, struct vrange, lru);
- mm = vrange_get_owner_mm(vrange);
- /* the process is exiting so pass it */
- if (atomic_read(&mm->mm_users) == 0) {
- list_del_init(&vrange->lru);
- vrange = NULL;
- continue;
- }
- /* vrange is freeing so continue to loop */
- if (!atomic_inc_not_zero(&vrange->refcount)) {
+ if (hold_victim_vrange(vrange)) {
list_del_init(&vrange->lru);
vrange = NULL;
continue;
}
- /*
- * we need to access mmap_sem further routine so
- * need to get a refcount of mm.
- * NOTE: We guarantee mm_count isn't zero in here because
- * if we found vrange from LRU list, it means we are
- * before exit_vrange or remove_vrange.
- */
- atomic_inc(&mm->mm_count);
-
/* Isolate vrange */
list_del_init(&vrange->lru);
break;
+
}
spin_unlock(&lru_lock);
@@ -873,9 +919,18 @@ struct vrange *get_victim_vrange(void)
void put_victim_range(struct vrange *vrange)
{
- struct mm_struct *mm = vrange_get_owner_mm(vrange);
put_vrange(vrange);
- mmdrop(mm);
+
+ if (vrange_type(vrange) == VRANGE_MM) {
+ struct mm_struct *mm = vrange_get_owner_mm(vrange);
+
+ mmdrop(mm);
+ } else {
+ struct address_space *mapping;
+
+ mapping = vrange_get_owner_mapping(vrange);
+ iput(mapping->host);
+ }
}
unsigned int discard_vrange_pages(struct zone *zone, int nr_to_discard)
@@ -884,11 +939,8 @@ unsigned int discard_vrange_pages(struct zone *zone, int nr_to_discard)
unsigned int nr_discarded = 0;
start_vrange = vrange = get_victim_vrange();
- if (start_vrange) {
- struct mm_struct *mm = vrange_get_owner_mm(start_vrange);
- atomic_inc(&start_vrange->refcount);
- atomic_inc(&mm->mm_count);
- }
+ if (start_vrange)
+ hold_victim_vrange(start_vrange);
while (vrange) {
nr_discarded += discard_vrange(zone, vrange, nr_to_discard);
Rework the victim range selection to also support file backed volatile ranges. Signed-off-by: John Stultz <john.stultz@linaro.org> --- include/linux/vrange.h | 9 ++++ mm/vrange.c | 112 ++++++++++++++++++++++++++++++++++++------------- 2 files changed, 91 insertions(+), 30 deletions(-)