@@ -5403,6 +5403,9 @@ struct wiphy_iftype_akm_suites {
*/
struct wiphy {
struct mutex mtx;
+#ifdef CONFIG_LOCKDEP
+ bool mutex_fully_held;
+#endif
/* assign these fields before you register the wiphy */
@@ -5722,22 +5725,105 @@ struct cfg80211_internal_bss;
struct cfg80211_cached_keys;
struct cfg80211_cqm_config;
+/**
+ * wiphy_lock_from_worker - lock the wiphy from worker on cfg80211 workqueue
+ * @wiphy: the wiphy to lock
+ *
+ * If the driver uses the cfg80211 workqueue (see wiphy_queue_work())
+ * and the workers need to lock the wiphy, this version must be used.
+ *
+ * (Note: this is a macro for the _ONCE part of the warning.)
+ */
+#define wiphy_lock_from_worker(wiphy) do { \
+ __acquire(&(wiphy)->mtx); \
+ mutex_lock(&(wiphy)->mtx); \
+ /* FIXME: can this be done better? */ \
+ WARN_ON_ONCE(strncmp(current->comm, "kworker/", 8)); \
+} while (0)
+
/**
* wiphy_lock - lock the wiphy
* @wiphy: the wiphy to lock
*
- * This is mostly exposed so it can be done around registering and
- * unregistering netdevs that aren't created through cfg80211 calls,
- * since that requires locking in cfg80211 when the notifiers is
- * called, but that cannot differentiate which way it's called.
+ * This is needed around registering and unregistering netdevs that
+ * aren't created through cfg80211 calls, since that requires locking
+ * in cfg80211 when the notifiers is called, but that cannot
+ * differentiate which way it's called.
+ *
+ * It can also be used by drivers for their own purposes.
*
* When cfg80211 ops are called, the wiphy is already locked.
+ *
+ * Note that this makes sure that no workers that have been queued
+ * with wiphy_queue_work() are running.
*/
-static inline void wiphy_lock(struct wiphy *wiphy)
- __acquires(&wiphy->mtx)
+void wiphy_lock(struct wiphy *wiphy) __acquires(&wiphy->mtx);
+
+/**
+ * wiphy_queue_work - queue work for the wiphy
+ * @wiphy: the wiphy to queue for
+ * @work: the worker
+ *
+ * This is useful for work that must be done asynchronously, and work
+ * queued here has the special property that it cannot run concurrently
+ * with any wiphy_lock() section, even if it doesn't use
+ * wiphy_lock_from_worker() itself. Therefore, wiphy_cancel_work() can
+ * use just cancel_work() instead of cancel_work_sync(), it requires
+ * being in a section protected by wiphy_lock().
+ */
+void wiphy_queue_work(struct wiphy *wiphy, struct work_struct *work);
+
+/**
+ * wiphy_cancel_work - cancel previously queued work
+ * @wiphy: the wiphy, for debug purposes
+ * @work: the work to cancel
+ *
+ * Cancel the work *without* waiting for it, this assumes being
+ * called under the wiphy mutex acquired by wiphy_lock().
+ */
+static inline void wiphy_cancel_work(struct wiphy *wiphy, struct work_struct *work)
+{
+#ifdef CONFIG_LOCKDEP
+ lockdep_assert_held(&wiphy->mtx);
+ WARN_ON_ONCE(!wiphy->mutex_fully_held);
+#endif
+ cancel_work(work);
+}
+
+/**
+ * wiphy_queue_delayed_work - queue delayed work for the wiphy
+ * @wiphy: the wiphy to queue for
+ * @dwork: the delayable worker
+ * @delay: number of jiffies to wait before queueing
+ *
+ * This is useful for work that must be done asynchronously, and work
+ * queued here has the special property that it cannot run concurrently
+ * with any wiphy_lock() section, even if it doesn't use
+ * wiphy_lock_from_worker() itself. Therefore,
+ * wiphy_cancel_delayed_work() can use just cancel_delayed_work()
+ * instead of cancel_delayed_work_sync(), it requires being in a
+ * section protected by wiphy_lock().
+ */
+void wiphy_queue_delayed_work(struct wiphy *wiphy,
+ struct delayed_work *dwork,
+ unsigned long delay);
+
+/**
+ * wiphy_cancel_delayed_work - cancel previously queued delayed work
+ * @wiphy: the wiphy, for debug purposes
+ * @dwork: the delayed work to cancel
+ *
+ * Cancel the work *without* waiting for it, this assumes being
+ * called under the wiphy mutex acquired by wiphy_lock().
+ */
+static inline void wiphy_cancel_delayed_work(struct wiphy *wiphy,
+ struct delayed_work *dwork)
{
- mutex_lock(&wiphy->mtx);
- __acquire(&wiphy->mtx);
+#ifdef CONFIG_LOCKDEP
+ lockdep_assert_held(&wiphy->mtx);
+ WARN_ON_ONCE(!wiphy->mutex_fully_held);
+#endif
+ cancel_delayed_work(dwork);
}
/**
@@ -5748,6 +5834,9 @@ static inline void wiphy_unlock(struct wiphy *wiphy)
__releases(&wiphy->mtx)
{
__release(&wiphy->mtx);
+#ifdef CONFIG_LOCKDEP
+ wiphy->mutex_fully_held = false;
+#endif
mutex_unlock(&wiphy->mtx);
}
@@ -408,6 +408,20 @@ static void cfg80211_propagate_cac_done_wk(struct work_struct *work)
rtnl_unlock();
}
+static void wiphy_work_sync(struct work_struct *work)
+{
+ struct cfg80211_registered_device *rdev;
+
+ rdev = container_of(work, struct cfg80211_registered_device,
+ wq_sync_work);
+
+ complete(&rdev->wq_sync_started);
+ wait_for_completion(&rdev->wq_sync_continue);
+ /* we'll now hang on the lock until the other side unlocks */
+ wiphy_lock_from_worker(&rdev->wiphy);
+ wiphy_unlock(&rdev->wiphy);
+}
+
/* exported functions */
struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
@@ -500,6 +514,11 @@ use_default_name:
}
mutex_init(&rdev->wiphy.mtx);
+ INIT_WORK(&rdev->wq_sync_work, wiphy_work_sync);
+ mutex_init(&rdev->wq_sync_mtx);
+ init_completion(&rdev->wq_sync_started);
+ init_completion(&rdev->wq_sync_continue);
+
INIT_LIST_HEAD(&rdev->wiphy.wdev_list);
INIT_LIST_HEAD(&rdev->beacon_registrations);
spin_lock_init(&rdev->beacon_registrations_lock);
@@ -540,6 +559,12 @@ use_default_name:
return NULL;
}
+ rdev->wq = alloc_ordered_workqueue("%s", 0, dev_name(&rdev->wiphy.dev));
+ if (!rdev->wq) {
+ wiphy_free(&rdev->wiphy);
+ return NULL;
+ }
+
INIT_WORK(&rdev->rfkill_block, cfg80211_rfkill_block_work);
INIT_WORK(&rdev->conn_work, cfg80211_conn_work);
INIT_WORK(&rdev->event_work, cfg80211_event_work);
@@ -1073,6 +1098,13 @@ void wiphy_unregister(struct wiphy *wiphy)
wiphy_unlock(&rdev->wiphy);
rtnl_unlock();
+ /*
+ * flush again, even if wiphy_lock() did above, something might
+ * have been reaching it still while the code above was running,
+ * e.g. via debugfs.
+ */
+ flush_workqueue(rdev->wq);
+
flush_work(&rdev->scan_done_wk);
cancel_work_sync(&rdev->conn_work);
flush_work(&rdev->event_work);
@@ -1098,6 +1130,10 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev)
{
struct cfg80211_internal_bss *scan, *tmp;
struct cfg80211_beacon_registration *reg, *treg;
+
+ if (rdev->wq) /* might be NULL in error cases */
+ destroy_workqueue(rdev->wq);
+
rfkill_destroy(rdev->wiphy.rfkill);
list_for_each_entry_safe(reg, treg, &rdev->beacon_registrations, list) {
list_del(®->list);
@@ -1573,6 +1609,66 @@ static struct pernet_operations cfg80211_pernet_ops = {
.exit = cfg80211_pernet_exit,
};
+void wiphy_lock(struct wiphy *wiphy)
+{
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+
+ /* lock the sync mutex so we're the only one using the work */
+ mutex_lock(&rdev->wq_sync_mtx);
+ /* flush the work in case it didn't complete yet after lock */
+ flush_work(&rdev->wq_sync_work);
+ /* reinit the completions so we can use them again */
+ reinit_completion(&rdev->wq_sync_started);
+ reinit_completion(&rdev->wq_sync_continue);
+
+ /* queue the work */
+ wiphy_queue_work(wiphy, &rdev->wq_sync_work);
+ /* and wait for it to start */
+ wait_for_completion(&rdev->wq_sync_started);
+
+ /*
+ * Now that the special work is running (we got the completion
+ * from it) actually take the wiphy mutex, if anything is now
+ * on the workqueue it's queued, but not running, and cannot
+ * be trying to take the lock.
+ */
+ mutex_lock(&wiphy->mtx);
+
+ /* and tell the worker to also continue and do that */
+ complete(&rdev->wq_sync_continue);
+
+ /*
+ * No longer need that now, the worker is now stuck waiting for
+ * the mutex we own and anyone else calling wiphy_lock() can get
+ * the wq_sync_mtx, but will wait on flushing the worker first,
+ * then do it all over again...
+ */
+ mutex_unlock(&rdev->wq_sync_mtx);
+
+#ifdef CONFIG_LOCKDEP
+ wiphy->mutex_fully_held = true;
+#endif
+}
+EXPORT_SYMBOL(wiphy_lock);
+
+void wiphy_queue_work(struct wiphy *wiphy, struct work_struct *work)
+{
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+
+ queue_work(rdev->wq, work);
+}
+EXPORT_SYMBOL(wiphy_queue_work);
+
+void wiphy_queue_delayed_work(struct wiphy *wiphy,
+ struct delayed_work *dwork,
+ unsigned long delay)
+{
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+
+ queue_delayed_work(rdev->wq, dwork, delay);
+}
+EXPORT_SYMBOL(wiphy_queue_delayed_work);
+
static int __init cfg80211_init(void)
{
int err;
@@ -109,6 +109,12 @@ struct cfg80211_registered_device {
/* lock for all wdev lists */
spinlock_t mgmt_registrations_lock;
+ struct workqueue_struct *wq;
+ struct mutex wq_sync_mtx;
+ struct completion wq_sync_started;
+ struct completion wq_sync_continue;
+ struct work_struct wq_sync_work;
+
/* must be last because of the way we do wiphy_priv(),
* and it should at least be aligned to NETDEV_ALIGN */
struct wiphy wiphy __aligned(NETDEV_ALIGN);