Message ID | 20230903151328.2981432-3-joel@joelfernandes.org |
---|---|
State | Accepted |
Commit | b1e5a3dee255a11cbdd5a0e814829276bd33a793 |
Headers | show |
Series | [v6,1/7] mm/mremap: Optimize the start addresses in move_page_tables() | expand |
On Sun, Sep 03, 2023 at 03:13:23PM +0000, Joel Fernandes (Google) wrote: > For the stack move happening in shift_arg_pages(), the move is happening > within the same VMA which spans the old and new ranges. > > In case the aligned address happens to fall within that VMA, allow such > moves and don't abort the mremap alignment optimization. > > In the regular non-stack mremap case, we cannot allow any such moves as > will end up destroying some part of the mapping (either the source of > the move, or part of the existing mapping). So just avoid it for stack > moves. > > Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org> > --- > fs/exec.c | 2 +- > include/linux/mm.h | 2 +- > mm/mremap.c | 33 +++++++++++++++++++-------------- > 3 files changed, 21 insertions(+), 16 deletions(-) > > diff --git a/fs/exec.c b/fs/exec.c > index 1a827d55ba94..244925307958 100644 > --- a/fs/exec.c > +++ b/fs/exec.c > @@ -712,7 +712,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) > * process cleanup to remove whatever mess we made. > */ > if (length != move_page_tables(vma, old_start, > - vma, new_start, length, false)) > + vma, new_start, length, false, true)) > return -ENOMEM; > > lru_add_drain(); > diff --git a/include/linux/mm.h b/include/linux/mm.h > index 406ab9ea818f..e635d1fc73b6 100644 > --- a/include/linux/mm.h > +++ b/include/linux/mm.h > @@ -2458,7 +2458,7 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen); > extern unsigned long move_page_tables(struct vm_area_struct *vma, > unsigned long old_addr, struct vm_area_struct *new_vma, > unsigned long new_addr, unsigned long len, > - bool need_rmap_locks); > + bool need_rmap_locks, bool for_stack); > > /* > * Flags used by change_protection(). For now we make it a bitmap so > diff --git a/mm/mremap.c b/mm/mremap.c > index 1011326b7b80..2b51f8b7cad8 100644 > --- a/mm/mremap.c > +++ b/mm/mremap.c > @@ -490,12 +490,13 @@ static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma, > } > > /* > - * A helper to check if a previous mapping exists. Required for > - * move_page_tables() and realign_addr() to determine if a previous mapping > - * exists before we can do realignment optimizations. > + * A helper to check if aligning down is OK. The aligned address should fall > + * on *no mapping*. For the stack moving down, that's a special move within > + * the VMA that is created to span the source and destination of the move, > + * so we make an exception for it. > */ > static bool can_align_down(struct vm_area_struct *vma, unsigned long addr_to_align, > - unsigned long mask) > + unsigned long mask, bool for_stack) > { > unsigned long addr_masked = addr_to_align & mask; > > @@ -504,9 +505,13 @@ static bool can_align_down(struct vm_area_struct *vma, unsigned long addr_to_ali > * of the corresponding VMA, we can't align down or we will destroy part > * of the current mapping. > */ > - if (vma->vm_start != addr_to_align) > + if (!for_stack && vma->vm_start != addr_to_align) > return false; > > + /* In the stack case we explicitly permit in-VMA alignment. */ > + if (for_stack && addr_masked >= vma->vm_start) > + return true; > + > /* > * Make sure the realignment doesn't cause the address to fall on an > * existing mapping. > @@ -517,7 +522,7 @@ static bool can_align_down(struct vm_area_struct *vma, unsigned long addr_to_ali > /* Opportunistically realign to specified boundary for faster copy. */ > static void try_realign_addr(unsigned long *old_addr, struct vm_area_struct *old_vma, > unsigned long *new_addr, struct vm_area_struct *new_vma, > - unsigned long mask) > + unsigned long mask, bool for_stack) > { > /* Skip if the addresses are already aligned. */ > if ((*old_addr & ~mask) == 0) > @@ -528,8 +533,8 @@ static void try_realign_addr(unsigned long *old_addr, struct vm_area_struct *old > return; > > /* Ensure realignment doesn't cause overlap with existing mappings. */ > - if (!can_align_down(old_vma, *old_addr, mask) || > - !can_align_down(new_vma, *new_addr, mask)) > + if (!can_align_down(old_vma, *old_addr, mask, for_stack) || > + !can_align_down(new_vma, *new_addr, mask, for_stack)) > return; > > *old_addr = *old_addr & mask; > @@ -539,7 +544,7 @@ static void try_realign_addr(unsigned long *old_addr, struct vm_area_struct *old > unsigned long move_page_tables(struct vm_area_struct *vma, > unsigned long old_addr, struct vm_area_struct *new_vma, > unsigned long new_addr, unsigned long len, > - bool need_rmap_locks) > + bool need_rmap_locks, bool for_stack) > { > unsigned long extent, old_end; > struct mmu_notifier_range range; > @@ -559,9 +564,9 @@ unsigned long move_page_tables(struct vm_area_struct *vma, > * If possible, realign addresses to PMD boundary for faster copy. > * Only realign if the mremap copying hits a PMD boundary. > */ > - if ((vma != new_vma) > - && (len >= PMD_SIZE - (old_addr & ~PMD_MASK))) > - try_realign_addr(&old_addr, vma, &new_addr, new_vma, PMD_MASK); > + if (len >= PMD_SIZE - (old_addr & ~PMD_MASK)) > + try_realign_addr(&old_addr, vma, &new_addr, new_vma, PMD_MASK, > + for_stack); > > flush_cache_range(vma, old_addr, old_end); > mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm, > @@ -708,7 +713,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, > } > > moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, > - need_rmap_locks); > + need_rmap_locks, false); > if (moved_len < old_len) { > err = -ENOMEM; > } else if (vma->vm_ops && vma->vm_ops->mremap) { > @@ -722,7 +727,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, > * and then proceed to unmap new area instead of old. > */ > move_page_tables(new_vma, new_addr, vma, old_addr, moved_len, > - true); > + true, false); > vma = new_vma; > old_len = new_len; > old_addr = new_addr; > -- > 2.42.0.283.g2d96d420d3-goog > Looks good to me, thanks Reviewed-by: Lorenzo Stoakes <lstoakes@gmail.com>
On Sun 03-09-23 15:13:23, Joel Fernandes wrote: > For the stack move happening in shift_arg_pages(), the move is happening > within the same VMA which spans the old and new ranges. > > In case the aligned address happens to fall within that VMA, allow such > moves and don't abort the mremap alignment optimization. > > In the regular non-stack mremap case, we cannot allow any such moves as > will end up destroying some part of the mapping (either the source of > the move, or part of the existing mapping). So just avoid it for stack > moves. > > Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org> LGTM Acked-by: Michal Hocko <mhocko@suse.com> Thanks! > --- > fs/exec.c | 2 +- > include/linux/mm.h | 2 +- > mm/mremap.c | 33 +++++++++++++++++++-------------- > 3 files changed, 21 insertions(+), 16 deletions(-) > > diff --git a/fs/exec.c b/fs/exec.c > index 1a827d55ba94..244925307958 100644 > --- a/fs/exec.c > +++ b/fs/exec.c > @@ -712,7 +712,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) > * process cleanup to remove whatever mess we made. > */ > if (length != move_page_tables(vma, old_start, > - vma, new_start, length, false)) > + vma, new_start, length, false, true)) > return -ENOMEM; > > lru_add_drain(); > diff --git a/include/linux/mm.h b/include/linux/mm.h > index 406ab9ea818f..e635d1fc73b6 100644 > --- a/include/linux/mm.h > +++ b/include/linux/mm.h > @@ -2458,7 +2458,7 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen); > extern unsigned long move_page_tables(struct vm_area_struct *vma, > unsigned long old_addr, struct vm_area_struct *new_vma, > unsigned long new_addr, unsigned long len, > - bool need_rmap_locks); > + bool need_rmap_locks, bool for_stack); > > /* > * Flags used by change_protection(). For now we make it a bitmap so > diff --git a/mm/mremap.c b/mm/mremap.c > index 1011326b7b80..2b51f8b7cad8 100644 > --- a/mm/mremap.c > +++ b/mm/mremap.c > @@ -490,12 +490,13 @@ static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma, > } > > /* > - * A helper to check if a previous mapping exists. Required for > - * move_page_tables() and realign_addr() to determine if a previous mapping > - * exists before we can do realignment optimizations. > + * A helper to check if aligning down is OK. The aligned address should fall > + * on *no mapping*. For the stack moving down, that's a special move within > + * the VMA that is created to span the source and destination of the move, > + * so we make an exception for it. > */ > static bool can_align_down(struct vm_area_struct *vma, unsigned long addr_to_align, > - unsigned long mask) > + unsigned long mask, bool for_stack) > { > unsigned long addr_masked = addr_to_align & mask; > > @@ -504,9 +505,13 @@ static bool can_align_down(struct vm_area_struct *vma, unsigned long addr_to_ali > * of the corresponding VMA, we can't align down or we will destroy part > * of the current mapping. > */ > - if (vma->vm_start != addr_to_align) > + if (!for_stack && vma->vm_start != addr_to_align) > return false; > > + /* In the stack case we explicitly permit in-VMA alignment. */ > + if (for_stack && addr_masked >= vma->vm_start) > + return true; > + > /* > * Make sure the realignment doesn't cause the address to fall on an > * existing mapping. > @@ -517,7 +522,7 @@ static bool can_align_down(struct vm_area_struct *vma, unsigned long addr_to_ali > /* Opportunistically realign to specified boundary for faster copy. */ > static void try_realign_addr(unsigned long *old_addr, struct vm_area_struct *old_vma, > unsigned long *new_addr, struct vm_area_struct *new_vma, > - unsigned long mask) > + unsigned long mask, bool for_stack) > { > /* Skip if the addresses are already aligned. */ > if ((*old_addr & ~mask) == 0) > @@ -528,8 +533,8 @@ static void try_realign_addr(unsigned long *old_addr, struct vm_area_struct *old > return; > > /* Ensure realignment doesn't cause overlap with existing mappings. */ > - if (!can_align_down(old_vma, *old_addr, mask) || > - !can_align_down(new_vma, *new_addr, mask)) > + if (!can_align_down(old_vma, *old_addr, mask, for_stack) || > + !can_align_down(new_vma, *new_addr, mask, for_stack)) > return; > > *old_addr = *old_addr & mask; > @@ -539,7 +544,7 @@ static void try_realign_addr(unsigned long *old_addr, struct vm_area_struct *old > unsigned long move_page_tables(struct vm_area_struct *vma, > unsigned long old_addr, struct vm_area_struct *new_vma, > unsigned long new_addr, unsigned long len, > - bool need_rmap_locks) > + bool need_rmap_locks, bool for_stack) > { > unsigned long extent, old_end; > struct mmu_notifier_range range; > @@ -559,9 +564,9 @@ unsigned long move_page_tables(struct vm_area_struct *vma, > * If possible, realign addresses to PMD boundary for faster copy. > * Only realign if the mremap copying hits a PMD boundary. > */ > - if ((vma != new_vma) > - && (len >= PMD_SIZE - (old_addr & ~PMD_MASK))) > - try_realign_addr(&old_addr, vma, &new_addr, new_vma, PMD_MASK); > + if (len >= PMD_SIZE - (old_addr & ~PMD_MASK)) > + try_realign_addr(&old_addr, vma, &new_addr, new_vma, PMD_MASK, > + for_stack); > > flush_cache_range(vma, old_addr, old_end); > mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm, > @@ -708,7 +713,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, > } > > moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, > - need_rmap_locks); > + need_rmap_locks, false); > if (moved_len < old_len) { > err = -ENOMEM; > } else if (vma->vm_ops && vma->vm_ops->mremap) { > @@ -722,7 +727,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, > * and then proceed to unmap new area instead of old. > */ > move_page_tables(new_vma, new_addr, vma, old_addr, moved_len, > - true); > + true, false); > vma = new_vma; > old_len = new_len; > old_addr = new_addr; > -- > 2.42.0.283.g2d96d420d3-goog
diff --git a/fs/exec.c b/fs/exec.c index 1a827d55ba94..244925307958 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -712,7 +712,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) * process cleanup to remove whatever mess we made. */ if (length != move_page_tables(vma, old_start, - vma, new_start, length, false)) + vma, new_start, length, false, true)) return -ENOMEM; lru_add_drain(); diff --git a/include/linux/mm.h b/include/linux/mm.h index 406ab9ea818f..e635d1fc73b6 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2458,7 +2458,7 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen); extern unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len, - bool need_rmap_locks); + bool need_rmap_locks, bool for_stack); /* * Flags used by change_protection(). For now we make it a bitmap so diff --git a/mm/mremap.c b/mm/mremap.c index 1011326b7b80..2b51f8b7cad8 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -490,12 +490,13 @@ static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma, } /* - * A helper to check if a previous mapping exists. Required for - * move_page_tables() and realign_addr() to determine if a previous mapping - * exists before we can do realignment optimizations. + * A helper to check if aligning down is OK. The aligned address should fall + * on *no mapping*. For the stack moving down, that's a special move within + * the VMA that is created to span the source and destination of the move, + * so we make an exception for it. */ static bool can_align_down(struct vm_area_struct *vma, unsigned long addr_to_align, - unsigned long mask) + unsigned long mask, bool for_stack) { unsigned long addr_masked = addr_to_align & mask; @@ -504,9 +505,13 @@ static bool can_align_down(struct vm_area_struct *vma, unsigned long addr_to_ali * of the corresponding VMA, we can't align down or we will destroy part * of the current mapping. */ - if (vma->vm_start != addr_to_align) + if (!for_stack && vma->vm_start != addr_to_align) return false; + /* In the stack case we explicitly permit in-VMA alignment. */ + if (for_stack && addr_masked >= vma->vm_start) + return true; + /* * Make sure the realignment doesn't cause the address to fall on an * existing mapping. @@ -517,7 +522,7 @@ static bool can_align_down(struct vm_area_struct *vma, unsigned long addr_to_ali /* Opportunistically realign to specified boundary for faster copy. */ static void try_realign_addr(unsigned long *old_addr, struct vm_area_struct *old_vma, unsigned long *new_addr, struct vm_area_struct *new_vma, - unsigned long mask) + unsigned long mask, bool for_stack) { /* Skip if the addresses are already aligned. */ if ((*old_addr & ~mask) == 0) @@ -528,8 +533,8 @@ static void try_realign_addr(unsigned long *old_addr, struct vm_area_struct *old return; /* Ensure realignment doesn't cause overlap with existing mappings. */ - if (!can_align_down(old_vma, *old_addr, mask) || - !can_align_down(new_vma, *new_addr, mask)) + if (!can_align_down(old_vma, *old_addr, mask, for_stack) || + !can_align_down(new_vma, *new_addr, mask, for_stack)) return; *old_addr = *old_addr & mask; @@ -539,7 +544,7 @@ static void try_realign_addr(unsigned long *old_addr, struct vm_area_struct *old unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len, - bool need_rmap_locks) + bool need_rmap_locks, bool for_stack) { unsigned long extent, old_end; struct mmu_notifier_range range; @@ -559,9 +564,9 @@ unsigned long move_page_tables(struct vm_area_struct *vma, * If possible, realign addresses to PMD boundary for faster copy. * Only realign if the mremap copying hits a PMD boundary. */ - if ((vma != new_vma) - && (len >= PMD_SIZE - (old_addr & ~PMD_MASK))) - try_realign_addr(&old_addr, vma, &new_addr, new_vma, PMD_MASK); + if (len >= PMD_SIZE - (old_addr & ~PMD_MASK)) + try_realign_addr(&old_addr, vma, &new_addr, new_vma, PMD_MASK, + for_stack); flush_cache_range(vma, old_addr, old_end); mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm, @@ -708,7 +713,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, } moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, - need_rmap_locks); + need_rmap_locks, false); if (moved_len < old_len) { err = -ENOMEM; } else if (vma->vm_ops && vma->vm_ops->mremap) { @@ -722,7 +727,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, * and then proceed to unmap new area instead of old. */ move_page_tables(new_vma, new_addr, vma, old_addr, moved_len, - true); + true, false); vma = new_vma; old_len = new_len; old_addr = new_addr;
For the stack move happening in shift_arg_pages(), the move is happening within the same VMA which spans the old and new ranges. In case the aligned address happens to fall within that VMA, allow such moves and don't abort the mremap alignment optimization. In the regular non-stack mremap case, we cannot allow any such moves as will end up destroying some part of the mapping (either the source of the move, or part of the existing mapping). So just avoid it for stack moves. Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org> --- fs/exec.c | 2 +- include/linux/mm.h | 2 +- mm/mremap.c | 33 +++++++++++++++++++-------------- 3 files changed, 21 insertions(+), 16 deletions(-)