diff mbox series

irqdomain: move revmap_trees_mutex to struct irq_domain

Message ID 1507128419-7543-1-git-send-email-yamada.masahiro@socionext.com
State Superseded
Headers show
Series irqdomain: move revmap_trees_mutex to struct irq_domain | expand

Commit Message

Masahiro Yamada Oct. 4, 2017, 2:46 p.m. UTC
The revmap_trees_mutex protects domain->revmap_tree.  It is allowed
to modify revmap_tree of two different domains concurrently.  Change
the global mutex to per-domain mutex.

Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>

---

 include/linux/irqdomain.h |  2 ++
 kernel/irq/irqdomain.c    | 14 +++++++-------
 2 files changed, 9 insertions(+), 7 deletions(-)

-- 
2.7.4

Comments

Marc Zyngier Oct. 4, 2017, 2:59 p.m. UTC | #1
On 04/10/17 15:46, Masahiro Yamada wrote:
> The revmap_trees_mutex protects domain->revmap_tree.  It is allowed

> to modify revmap_tree of two different domains concurrently.  Change

> the global mutex to per-domain mutex.

> 

> Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>


Whilst this doesn't seem completely outlandish from a data-structure
PoV, I really wonder if that's an actual bottleneck. Do you have a
use-case where the rate of interrupt being mapped/unmapped is so high
that this becomes an actual contention point? If so, it would be good to
mention it in the commit message.

Thanks,

	M.
-- 
Jazz is not dead. It just smells funny...
diff mbox series

Patch

diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 81e4889..56b68b0 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -32,6 +32,7 @@ 
 #include <linux/types.h>
 #include <linux/irqhandler.h>
 #include <linux/of.h>
+#include <linux/mutex.h>
 #include <linux/radix-tree.h>
 
 struct device_node;
@@ -172,6 +173,7 @@  struct irq_domain {
 	unsigned int revmap_direct_max_irq;
 	unsigned int revmap_size;
 	struct radix_tree_root revmap_tree;
+	struct mutex revmap_tree_mutex;
 	unsigned int linear_revmap[];
 };
 
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index ac4644e..7870800 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -21,7 +21,6 @@ 
 static LIST_HEAD(irq_domain_list);
 static DEFINE_MUTEX(irq_domain_mutex);
 
-static DEFINE_MUTEX(revmap_trees_mutex);
 static struct irq_domain *irq_default_domain;
 
 static void irq_domain_check_hierarchy(struct irq_domain *domain);
@@ -211,6 +210,7 @@  struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
 
 	/* Fill structure */
 	INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
+	mutex_init(&domain->revmap_tree_mutex);
 	domain->ops = ops;
 	domain->host_data = host_data;
 	domain->hwirq_max = hwirq_max;
@@ -462,9 +462,9 @@  static void irq_domain_clear_mapping(struct irq_domain *domain,
 	if (hwirq < domain->revmap_size) {
 		domain->linear_revmap[hwirq] = 0;
 	} else {
-		mutex_lock(&revmap_trees_mutex);
+		mutex_lock(&domain->revmap_tree_mutex);
 		radix_tree_delete(&domain->revmap_tree, hwirq);
-		mutex_unlock(&revmap_trees_mutex);
+		mutex_unlock(&domain->revmap_tree_mutex);
 	}
 }
 
@@ -475,9 +475,9 @@  static void irq_domain_set_mapping(struct irq_domain *domain,
 	if (hwirq < domain->revmap_size) {
 		domain->linear_revmap[hwirq] = irq_data->irq;
 	} else {
-		mutex_lock(&revmap_trees_mutex);
+		mutex_lock(&domain->revmap_tree_mutex);
 		radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
-		mutex_unlock(&revmap_trees_mutex);
+		mutex_unlock(&domain->revmap_tree_mutex);
 	}
 }
 
@@ -1459,11 +1459,11 @@  static void irq_domain_fix_revmap(struct irq_data *d)
 		return; /* Not using radix tree. */
 
 	/* Fix up the revmap. */
-	mutex_lock(&revmap_trees_mutex);
+	mutex_lock(&d->domain->revmap_tree_mutex);
 	slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq);
 	if (slot)
 		radix_tree_replace_slot(&d->domain->revmap_tree, slot, d);
-	mutex_unlock(&revmap_trees_mutex);
+	mutex_unlock(&d->domain->revmap_tree_mutex);
 }
 
 /**