@@ -1452,9 +1452,10 @@ static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
struct fcoe_percpu_s *fps;
int rc;
- fps = &get_cpu_var(fcoe_percpu);
+ local_lock(&fcoe_percpu.lock);
+ fps = this_cpu_ptr(&fcoe_percpu);
rc = fcoe_get_paged_crc_eof(skb, tlen, fps);
- put_cpu_var(fcoe_percpu);
+ local_unlock(&fcoe_percpu.lock);
return rc;
}
@@ -2487,6 +2488,7 @@ static int __init fcoe_init(void)
p = per_cpu_ptr(&fcoe_percpu, cpu);
INIT_WORK(&p->work, fcoe_receive_work);
skb_queue_head_init(&p->fcoe_rx_list);
+ local_lock_init(&p->lock);
}
/* Setup link change notification */
@@ -14,6 +14,7 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/workqueue.h>
+#include <linux/local_lock.h>
#include <linux/random.h>
#include <scsi/fc/fc_fcoe.h>
#include <scsi/libfc.h>
@@ -326,6 +327,7 @@ struct fcoe_percpu_s {
struct sk_buff_head fcoe_rx_list;
struct page *crc_eof_page;
int crc_eof_offset;
+ local_lock_t lock;
};
/**
fcoe_get_paged_crc_eof() relies on the caller having preemption disabled to ensure the per-CPU fcoe_percpu context remains valid throughout the call. This is done by either holding spinlocks (such as bnx2fc_global_lock or qedf_global_lock) or the get_cpu() from fcoe_alloc_paged_crc_eof(). This last one breaks PREEMPT_RT semantics as there can be memory allocation and end up sleeping in atomic contexts. Introduce a local_lock_t to struct fcoe_percpu that will keep the non-RT case the same, mapping to preempt_disable/enable, while RT will use a per-CPU spinlock allowing the region to be preemptible but still maintain CPU locality. The other users of fcoe_percpu are already safe in this regard and do not require local_lock()ing. Signed-off-by: Davidlohr Bueso <dbueso@suse.de> --- drivers/scsi/fcoe/fcoe.c | 6 ++++-- include/scsi/libfcoe.h | 2 ++ 2 files changed, 6 insertions(+), 2 deletions(-)