@@ -39,7 +39,7 @@ static inline void tlb_flush(struct mmu_gather *tlb)
{
if (tlb->fullmm) {
flush_tlb_mm(tlb->mm);
- } else if (tlb->end > 0) {
+ } else {
struct vm_area_struct vma = { .vm_mm = tlb->mm, };
flush_tlb_range(&vma, tlb->start, tlb->end);
}
@@ -96,10 +96,9 @@ struct mmu_gather {
#endif
unsigned long start;
unsigned long end;
- unsigned int need_flush : 1, /* Did free PTEs */
/* we are in the middle of an operation to clear
* a full mm and can make some optimizations */
- fullmm : 1,
+ unsigned int fullmm : 1,
/* we have performed an operation which
* requires a complete flush of the tlb */
need_flush_all : 1;
@@ -131,10 +130,8 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
static inline void __tlb_adjust_range(struct mmu_gather *tlb,
unsigned long address)
{
- if (!tlb->fullmm) {
- tlb->start = min(tlb->start, address);
- tlb->end = max(tlb->end, address + PAGE_SIZE);
- }
+ tlb->start = min(tlb->start, address);
+ tlb->end = max(tlb->end, address + PAGE_SIZE);
}
static inline void __tlb_reset_range(struct mmu_gather *tlb)
@@ -154,7 +151,7 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
#define __tlb_end_vma(tlb, vma) \
do { \
- if (!tlb->fullmm) { \
+ if (!tlb->fullmm && tlb->end) { \
tlb_flush(tlb); \
__tlb_reset_range(tlb); \
} \
@@ -171,13 +168,12 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
/**
* tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
*
- * Record the fact that pte's were really umapped in ->need_flush, so we can
- * later optimise away the tlb invalidate. This helps when userspace is
- * unmapping already-unmapped pages, which happens quite a lot.
+ * Record the fact that pte's were really unmapped by updating the range,
+ * so we can later optimise away the tlb invalidate. This helps when
+ * userspace is unmapping already-unmapped pages, which happens quite a lot.
*/
#define tlb_remove_tlb_entry(tlb, ptep, address) \
do { \
- tlb->need_flush = 1; \
__tlb_adjust_range(tlb, address); \
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
@@ -192,14 +188,12 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
do { \
- tlb->need_flush = 1; \
__tlb_adjust_range(tlb, address); \
__tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
} while (0)
#define pte_free_tlb(tlb, ptep, address) \
do { \
- tlb->need_flush = 1; \
__tlb_adjust_range(tlb, address); \
__pte_free_tlb(tlb, ptep, address); \
} while (0)
@@ -207,7 +201,6 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
#ifndef __ARCH_HAS_4LEVEL_HACK
#define pud_free_tlb(tlb, pudp, address) \
do { \
- tlb->need_flush = 1; \
__tlb_adjust_range(tlb, address); \
__pud_free_tlb(tlb, pudp, address); \
} while (0)
@@ -215,7 +208,6 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
#define pmd_free_tlb(tlb, pmdp, address) \
do { \
- tlb->need_flush = 1; \
__tlb_adjust_range(tlb, address); \
__pmd_free_tlb(tlb, pmdp, address); \
} while (0)
@@ -220,7 +220,6 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
/* Is it from 0 to ~0? */
tlb->fullmm = !(start | (end+1));
tlb->need_flush_all = 0;
- tlb->need_flush = 0;
tlb->local.next = NULL;
tlb->local.nr = 0;
tlb->local.max = ARRAY_SIZE(tlb->__pages);
@@ -236,7 +235,9 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
- tlb->need_flush = 0;
+ if (!tlb->end)
+ return;
+
tlb_flush(tlb);
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb_table_flush(tlb);
@@ -257,8 +258,6 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb)
void tlb_flush_mmu(struct mmu_gather *tlb)
{
- if (!tlb->need_flush)
- return;
tlb_flush_mmu_tlbonly(tlb);
tlb_flush_mmu_free(tlb);
}
@@ -293,7 +292,7 @@ int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
struct mmu_gather_batch *batch;
- VM_BUG_ON(!tlb->need_flush);
+ VM_BUG_ON(!tlb->end);
batch = tlb->active;
batch->pages[batch->nr++] = page;
@@ -360,8 +359,6 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
{
struct mmu_table_batch **batch = &tlb->batch;
- tlb->need_flush = 1;
-
/*
* When there's less then two users of this mm there cannot be a
* concurrent page-table walk.