@@ -72,7 +72,7 @@ void notrace stop_machine_yield(const struct cpumask *cpumask)
this_cpu = smp_processor_id();
if (__this_cpu_inc_return(cpu_relax_retry) >= spin_retry) {
__this_cpu_write(cpu_relax_retry, 0);
- cpu = cpumask_next_wrap(this_cpu, cpumask, this_cpu, false);
+ cpu = cpumask_next_wrap_old(this_cpu, cpumask, this_cpu, false);
if (cpu >= nr_cpu_ids)
return;
if (arch_vcpu_is_preempted(cpu))
@@ -1578,7 +1578,7 @@ static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
if (wq_unbound)
queue->io_cpu = WORK_CPU_UNBOUND;
else
- queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
+ queue->io_cpu = cpumask_next_wrap_old(n - 1, cpu_online_mask, -1, false);
}
static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid)
@@ -1757,7 +1757,7 @@ static int hv_compose_multi_msi_req_get_cpu(void)
spin_lock_irqsave(&multi_msi_cpu_lock, flags);
- cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask, nr_cpu_ids,
+ cpu_next = cpumask_next_wrap_old(cpu_next, cpu_online_mask, nr_cpu_ids,
false);
cpu = cpu_next;
@@ -12876,7 +12876,7 @@ lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
if (offline) {
/* Find next online CPU on original mask */
- cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
+ cpu_next = cpumask_next_wrap_old(cpu, orig_mask, cpu, true);
cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
/* Found a valid CPU */
@@ -296,7 +296,7 @@ unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
#if NR_CPUS == 1
static __always_inline
-unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
+unsigned int cpumask_next_wrap_old(int n, const struct cpumask *mask, int start, bool wrap)
{
cpumask_check(start);
if (n != -1)
@@ -312,7 +312,7 @@ unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, boo
return cpumask_first(mask);
}
#else
-unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
+unsigned int __pure cpumask_next_wrap_old(int n, const struct cpumask *mask, int start, bool wrap);
#endif
/**
@@ -274,7 +274,7 @@ static struct padata_priv *padata_find_next(struct parallel_data *pd,
if (remove_object) {
list_del_init(&padata->list);
++pd->processed;
- pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
+ pd->cpu = cpumask_next_wrap_old(cpu, pd->cpumask.pcpu, -1, false);
}
spin_unlock(&reorder->lock);
@@ -8,7 +8,7 @@
#include <linux/numa.h>
/**
- * cpumask_next_wrap - helper to implement for_each_cpu_wrap
+ * cpumask_next_wrap_old - helper to implement for_each_cpu_wrap
* @n: the cpu prior to the place to search
* @mask: the cpumask pointer
* @start: the start point of the iteration
@@ -19,7 +19,7 @@
* Note: the @wrap argument is required for the start condition when
* we cannot assume @start is set in @mask.
*/
-unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
+unsigned int cpumask_next_wrap_old(int n, const struct cpumask *mask, int start, bool wrap)
{
unsigned int next;
@@ -37,7 +37,7 @@ unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, boo
return next;
}
-EXPORT_SYMBOL(cpumask_next_wrap);
+EXPORT_SYMBOL(cpumask_next_wrap_old);
/* These are not inline because of header tangles. */
#ifdef CONFIG_CPUMASK_OFFSTACK
The next patche aligns implementation of cpumask_next_wrap() with the generic version in find.h which changes function signature. To make the transition smooth, this patch deprecates current implementation by adding an _old suffix. The following patches switch current users to the new implementation one by one. No functional changes were intended. Signed-off-by: Yury Norov <yury.norov@gmail.com> --- arch/s390/kernel/processor.c | 2 +- drivers/nvme/host/tcp.c | 2 +- drivers/pci/controller/pci-hyperv.c | 2 +- drivers/scsi/lpfc/lpfc_init.c | 2 +- include/linux/cpumask.h | 4 ++-- kernel/padata.c | 2 +- lib/cpumask.c | 6 +++--- 7 files changed, 10 insertions(+), 10 deletions(-)