diff mbox series

[2/2] mm: page_alloc: drain pages remotely

Message ID 20200616161409.299575008@fuller.cnet
State New
Headers show
Series re-enable remote per-cpu-pages draining | expand

Commit Message

Marcelo Tosatti June 16, 2020, 4:11 p.m. UTC
Remote draining of pages was removed from 5.6-rt.

Unfortunately its necessary for use-cases which have a busy spinning 
SCHED_FIFO thread on isolated CPU:

[ 7475.821066] INFO: task ld:274531 blocked for more than 600 seconds.
[ 7475.822157]       Not tainted 4.18.0-208.rt5.20.el8.x86_64 #1
[ 7475.823094] echo 0  /proc/sys/kernel/hung_task_timeout_secs disables this message.
[ 7475.824392] ld              D    0 274531 274530 0x00084080
[ 7475.825307] Call Trace:
[ 7475.825761]  __schedule+0x342/0x850
[ 7475.826377]  schedule+0x39/0xd0
[ 7475.826923]  schedule_timeout+0x20e/0x410
[ 7475.827610]  ? __schedule+0x34a/0x850
[ 7475.828247]  ? ___preempt_schedule+0x16/0x18
[ 7475.828953]  wait_for_completion+0x85/0xe0
[ 7475.829653]  flush_work+0x11a/0x1c0
[ 7475.830313]  ? flush_workqueue_prep_pwqs+0x130/0x130
[ 7475.831148]  drain_all_pages+0x140/0x190
[ 7475.831803]  __alloc_pages_slowpath+0x3f8/0xe20
[ 7475.832571]  ? mem_cgroup_commit_charge+0xcb/0x510
[ 7475.833371]  __alloc_pages_nodemask+0x1ca/0x2b0
[ 7475.834134]  pagecache_get_page+0xb5/0x2d0
[ 7475.834814]  ? account_page_dirtied+0x11a/0x220
[ 7475.835579]  grab_cache_page_write_begin+0x1f/0x40
[ 7475.836379]  iomap_write_begin.constprop.44+0x1c1/0x370
[ 7475.837241]  ? iomap_write_end+0x91/0x290
[ 7475.837911]  iomap_write_actor+0x92/0x170
...

So enable remote draining again.

The original commit message is:

    mm: page_alloc: rt-friendly per-cpu pages
    
    rt-friendly per-cpu pages: convert the irqs-off per-cpu locking
    method into a preemptible, explicit-per-cpu-locks method.
    
    Contains fixes from:
             Peter Zijlstra <a.p.zijlstra@chello.nl>
             Thomas Gleixner <tglx@linutronix.de>

From: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>

---
 mm/page_alloc.c |   21 +++++++++++++++++++--
 1 file changed, 19 insertions(+), 2 deletions(-)
diff mbox series

Patch

Index: linux-rt-devel/mm/page_alloc.c
===================================================================
--- linux-rt-devel.orig/mm/page_alloc.c
+++ linux-rt-devel/mm/page_alloc.c
@@ -360,6 +360,16 @@  EXPORT_SYMBOL(nr_online_nodes);
 
 static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
 
+#ifdef CONFIG_PREEMPT_RT
+# define cpu_lock_irqsave(cpu, flags)          \
+	local_lock_irqsave_on(pa_lock, flags, cpu)
+# define cpu_unlock_irqrestore(cpu, flags)     \
+	local_unlock_irqrestore_on(pa_lock, flags, cpu)
+#else
+# define cpu_lock_irqsave(cpu, flags)		local_irq_save(flags)
+# define cpu_unlock_irqrestore(cpu, flags)	local_irq_restore(flags)
+#endif
+
 int page_group_by_mobility_disabled __read_mostly;
 
 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
@@ -2852,7 +2862,7 @@  static void drain_pages_zone(unsigned in
 	LIST_HEAD(dst);
 	int count;
 
-	local_lock_irqsave(pa_lock, flags);
+	cpu_lock_irqsave(cpu, flags);
 	pset = per_cpu_ptr(zone->pageset, cpu);
 
 	pcp = &pset->pcp;
@@ -2860,7 +2870,7 @@  static void drain_pages_zone(unsigned in
 	if (count)
 		isolate_pcp_pages(count, pcp, &dst);
 
-	local_unlock_irqrestore(pa_lock, flags);
+	cpu_unlock_irqrestore(cpu, flags);
 
 	if (count)
 		free_pcppages_bulk(zone, &dst, false);
@@ -2898,6 +2908,7 @@  void drain_local_pages(struct zone *zone
 		drain_pages(cpu);
 }
 
+#ifndef CONFIG_PREEMPT_RT
 static void drain_local_pages_wq(struct work_struct *work)
 {
 	struct pcpu_drain *drain;
@@ -2915,6 +2926,7 @@  static void drain_local_pages_wq(struct
 	drain_local_pages(drain->zone);
 	migrate_enable();
 }
+#endif
 
 /*
  * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
@@ -2982,6 +2994,7 @@  void drain_all_pages(struct zone *zone)
 			cpumask_clear_cpu(cpu, &cpus_with_pcps);
 	}
 
+#ifndef CONFIG_PREEMPT_RT
 	for_each_cpu(cpu, &cpus_with_pcps) {
 		struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu);
 
@@ -2991,6 +3004,10 @@  void drain_all_pages(struct zone *zone)
 	}
 	for_each_cpu(cpu, &cpus_with_pcps)
 		flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work);
+#else
+	for_each_cpu(cpu, &cpus_with_pcps)
+		drain_pages(cpu);
+#endif
 
 	mutex_unlock(&pcpu_drain_mutex);
 }