@@ -19,9 +19,9 @@
void blk_mq_map_queues(struct blk_mq_queue_map *qmap)
{
const struct cpumask *masks;
- unsigned int queue, cpu;
+ unsigned int queue, cpu, nr_masks;
- masks = group_cpus_evenly(qmap->nr_queues);
+ masks = group_cpus_evenly(qmap->nr_queues, &nr_masks);
if (!masks) {
for_each_possible_cpu(cpu)
qmap->mq_map[cpu] = qmap->queue_offset;
@@ -29,7 +29,7 @@ void blk_mq_map_queues(struct blk_mq_queue_map *qmap)
}
for (queue = 0; queue < qmap->nr_queues; queue++) {
- for_each_cpu(cpu, &masks[queue])
+ for_each_cpu(cpu, &masks[queue % nr_masks])
qmap->mq_map[cpu] = qmap->queue_offset + queue;
}
kfree(masks);
@@ -329,20 +329,21 @@ create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
for (i = 0, usedvecs = 0; i < affd->nr_sets; i++) {
unsigned int this_vecs = affd->set_size[i];
+ unsigned int nr_masks;
int j;
- struct cpumask *result = group_cpus_evenly(this_vecs);
+ struct cpumask *result = group_cpus_evenly(this_vecs, &nr_masks);
if (!result) {
kfree(masks);
return NULL;
}
- for (j = 0; j < this_vecs; j++)
+ for (j = 0; j < nr_masks; j++)
cpumask_copy(&masks[curvec + j], &result[j]);
kfree(result);
- curvec += this_vecs;
- usedvecs += this_vecs;
+ curvec += nr_masks;
+ usedvecs += nr_masks;
}
/* Fill out vectors at the end that don't need affinity */
@@ -862,7 +862,7 @@ static void virtio_fs_requests_done_work(struct work_struct *work)
static void virtio_fs_map_queues(struct virtio_device *vdev, struct virtio_fs *fs)
{
const struct cpumask *mask, *masks;
- unsigned int q, cpu;
+ unsigned int q, cpu, nr_masks;
/* First attempt to map using existing transport layer affinities
* e.g. PCIe MSI-X
@@ -882,7 +882,7 @@ static void virtio_fs_map_queues(struct virtio_device *vdev, struct virtio_fs *f
return;
fallback:
/* Attempt to map evenly in groups over the CPUs */
- masks = group_cpus_evenly(fs->num_request_queues);
+ masks = group_cpus_evenly(fs->num_request_queues, &nr_masks);
/* If even this fails we default to all CPUs use first request queue */
if (!masks) {
for_each_possible_cpu(cpu)
@@ -891,7 +891,7 @@ static void virtio_fs_map_queues(struct virtio_device *vdev, struct virtio_fs *f
}
for (q = 0; q < fs->num_request_queues; q++) {
- for_each_cpu(cpu, &masks[q])
+ for_each_cpu(cpu, &masks[q % nr_masks])
fs->mq_map[cpu] = q + VQ_REQUEST;
}
kfree(masks);
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/cpu.h>
-struct cpumask *group_cpus_evenly(unsigned int numgrps);
+struct cpumask *group_cpus_evenly(unsigned int numgrps,
+ unsigned int *nummasks);
#endif
@@ -70,20 +70,21 @@ irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
*/
for (i = 0, usedvecs = 0; i < affd->nr_sets; i++) {
unsigned int this_vecs = affd->set_size[i];
+ unsigned int nr_masks;
int j;
- struct cpumask *result = group_cpus_evenly(this_vecs);
+ struct cpumask *result = group_cpus_evenly(this_vecs, &nr_masks);
if (!result) {
kfree(masks);
return NULL;
}
- for (j = 0; j < this_vecs; j++)
+ for (j = 0; j < nr_masks; j++)
cpumask_copy(&masks[curvec + j].mask, &result[j]);
kfree(result);
- curvec += this_vecs;
- usedvecs += this_vecs;
+ curvec += nr_masks;
+ usedvecs += nr_masks;
}
/* Fill out vectors at the end that don't need affinity */
@@ -332,9 +332,11 @@ static int __group_cpus_evenly(unsigned int startgrp, unsigned int numgrps,
/**
* group_cpus_evenly - Group all CPUs evenly per NUMA/CPU locality
* @numgrps: number of groups
+ * @nummasks: number of initialized cpumasks
*
* Return: cpumask array if successful, NULL otherwise. And each element
- * includes CPUs assigned to this group
+ * includes CPUs assigned to this group. nummasks contains the number
+ * of initialized masks which can be less than numgrps.
*
* Try to put close CPUs from viewpoint of CPU and NUMA locality into
* same group, and run two-stage grouping:
@@ -344,7 +346,8 @@ static int __group_cpus_evenly(unsigned int startgrp, unsigned int numgrps,
* We guarantee in the resulted grouping that all CPUs are covered, and
* no same CPU is assigned to multiple groups
*/
-struct cpumask *group_cpus_evenly(unsigned int numgrps)
+struct cpumask *group_cpus_evenly(unsigned int numgrps,
+ unsigned int *nummasks)
{
unsigned int curgrp = 0, nr_present = 0, nr_others = 0;
cpumask_var_t *node_to_cpumask;
@@ -421,10 +424,12 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps)
kfree(masks);
return NULL;
}
+ *nummasks = nr_present + nr_others;
return masks;
}
#else /* CONFIG_SMP */
-struct cpumask *group_cpus_evenly(unsigned int numgrps)
+struct cpumask *group_cpus_evenly(unsigned int numgrps,
+ unsigned int *nummasks)
{
struct cpumask *masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
@@ -433,6 +438,7 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps)
/* assign all CPUs(cpu 0) to the 1st group only */
cpumask_copy(&masks[0], cpu_possible_mask);
+ *nummasks = 1;
return masks;
}
#endif /* CONFIG_SMP */
group_cpu_evenly might allocated less groups then the requested: group_cpu_evenly __group_cpus_evenly alloc_nodes_groups # allocated total groups may be less than numgrps when # active total CPU number is less then numgrps In this case, the caller will do an out of bound access because the caller assumes the masks returned has numgrps. Return the number of groups created so the caller can limit the access range accordingly. Signed-off-by: Daniel Wagner <wagi@kernel.org> --- block/blk-mq-cpumap.c | 6 +++--- drivers/virtio/virtio_vdpa.c | 9 +++++---- fs/fuse/virtio_fs.c | 6 +++--- include/linux/group_cpus.h | 3 ++- kernel/irq/affinity.c | 9 +++++---- lib/group_cpus.c | 12 +++++++++--- 6 files changed, 27 insertions(+), 18 deletions(-)