@@ -250,6 +250,7 @@ static int setup_demand_paging(struct kvm_vm *vm,
struct test_params {
bool use_uffd;
useconds_t uffd_delay;
+ bool partition_vcpu_memory_access;
};
static void run_test(enum vm_guest_mode mode, void *arg)
@@ -277,7 +278,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
TEST_ASSERT(vcpu_threads, "Memory allocation failed");
- perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size);
+ perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size,
+ p->partition_vcpu_memory_access);
if (p->use_uffd) {
uffd_handler_threads =
@@ -293,10 +295,19 @@ static void run_test(enum vm_guest_mode mode, void *arg)
for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
vm_paddr_t vcpu_gpa;
void *vcpu_hva;
+ uint64_t vcpu_mem_size;
- vcpu_gpa = guest_test_phys_mem + (vcpu_id * guest_percpu_mem_size);
+
+ if (p->partition_vcpu_memory_access) {
+ vcpu_gpa = guest_test_phys_mem +
+ (vcpu_id * guest_percpu_mem_size);
+ vcpu_mem_size = guest_percpu_mem_size;
+ } else {
+ vcpu_gpa = guest_test_phys_mem;
+ vcpu_mem_size = guest_percpu_mem_size * nr_vcpus;
+ }
PER_VCPU_DEBUG("Added VCPU %d with test mem gpa [%lx, %lx)\n",
- vcpu_id, vcpu_gpa, vcpu_gpa + guest_percpu_mem_size);
+ vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_mem_size);
/* Cache the HVA pointer of the region */
vcpu_hva = addr_gpa2hva(vm, vcpu_gpa);
@@ -313,7 +324,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
&uffd_handler_threads[vcpu_id],
pipefds[vcpu_id * 2],
p->uffd_delay, &uffd_args[vcpu_id],
- vcpu_hva, guest_percpu_mem_size);
+ vcpu_hva, vcpu_mem_size);
if (r < 0)
exit(-r);
}
@@ -376,7 +387,7 @@ static void help(char *name)
{
puts("");
printf("usage: %s [-h] [-m mode] [-u] [-d uffd_delay_usec]\n"
- " [-b memory] [-v vcpus]\n", name);
+ " [-b memory] [-v vcpus] [-o]\n", name);
guest_modes_help();
printf(" -u: use User Fault FD to handle vCPU page\n"
" faults.\n");
@@ -387,6 +398,8 @@ static void help(char *name)
" demand paged by each vCPU. e.g. 10M or 3G.\n"
" Default: 1G\n");
printf(" -v: specify the number of vCPUs to run.\n");
+ printf(" -o: Overlap guest memory accesses instead of partitioning\n"
+ " them into a separate region of memory for each vCPU.\n");
puts("");
exit(0);
}
@@ -394,12 +407,14 @@ static void help(char *name)
int main(int argc, char *argv[])
{
int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
- struct test_params p = {};
+ struct test_params p = {
+ .partition_vcpu_memory_access = true,
+ };
int opt;
guest_modes_append_default();
- while ((opt = getopt(argc, argv, "hm:ud:b:v:")) != -1) {
+ while ((opt = getopt(argc, argv, "hm:ud:b:v:o")) != -1) {
switch (opt) {
case 'm':
guest_modes_cmdline(optarg);
@@ -419,6 +434,9 @@ int main(int argc, char *argv[])
TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus,
"Invalid number of vcpus, must be between 1 and %d", max_vcpus);
break;
+ case 'o':
+ p.partition_vcpu_memory_access = false;
+ break;
case 'h':
default:
help(argv[0]);
@@ -92,6 +92,7 @@ struct test_params {
unsigned long iterations;
uint64_t phys_offset;
int wr_fract;
+ bool partition_vcpu_memory_access;
};
static void run_test(enum vm_guest_mode mode, void *arg)
@@ -129,7 +130,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
TEST_ASSERT(vcpu_threads, "Memory allocation failed");
- perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size);
+ perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size,
+ p->partition_vcpu_memory_access);
sync_global_to_guest(vm, perf_test_args);
@@ -240,7 +242,7 @@ static void help(char *name)
{
puts("");
printf("usage: %s [-h] [-i iterations] [-p offset] "
- "[-m mode] [-b vcpu bytes] [-v vcpus]\n", name);
+ "[-m mode] [-b vcpu bytes] [-v vcpus] [-o]\n", name);
puts("");
printf(" -i: specify iteration counts (default: %"PRIu64")\n",
TEST_HOST_LOOP_N);
@@ -255,6 +257,8 @@ static void help(char *name)
" 1/<fraction of pages to write>.\n"
" (default: 1 i.e. all pages are written to.)\n");
printf(" -v: specify the number of vCPUs to run.\n");
+ printf(" -o: Overlap guest memory accesses instead of partitioning\n"
+ " them into a separate region of memory for each vCPU.\n");
puts("");
exit(0);
}
@@ -265,6 +269,7 @@ int main(int argc, char *argv[])
struct test_params p = {
.iterations = TEST_HOST_LOOP_N,
.wr_fract = 1,
+ .partition_vcpu_memory_access = true,
};
int opt;
@@ -275,7 +280,7 @@ int main(int argc, char *argv[])
guest_modes_append_default();
- while ((opt = getopt(argc, argv, "hi:p:m:b:f:v:")) != -1) {
+ while ((opt = getopt(argc, argv, "hi:p:m:b:f:v:o")) != -1) {
switch (opt) {
case 'i':
p.iterations = atoi(optarg);
@@ -299,6 +304,9 @@ int main(int argc, char *argv[])
TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus,
"Invalid number of vcpus, must be between 1 and %d", max_vcpus);
break;
+ case 'o':
+ p.partition_vcpu_memory_access = false;
+ break;
case 'h':
default:
help(argv[0]);
@@ -46,6 +46,8 @@ extern uint64_t guest_test_phys_mem;
struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
uint64_t vcpu_memory_bytes);
void perf_test_destroy_vm(struct kvm_vm *vm);
-void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus, uint64_t vcpu_memory_bytes);
+void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus,
+ uint64_t vcpu_memory_bytes,
+ bool partition_vcpu_memory_access);
#endif /* SELFTEST_KVM_PERF_TEST_UTIL_H */
@@ -112,7 +112,9 @@ void perf_test_destroy_vm(struct kvm_vm *vm)
kvm_vm_free(vm);
}
-void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus, uint64_t vcpu_memory_bytes)
+void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus,
+ uint64_t vcpu_memory_bytes,
+ bool partition_vcpu_memory_access)
{
vm_paddr_t vcpu_gpa;
struct perf_test_vcpu_args *vcpu_args;
@@ -122,13 +124,22 @@ void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus, uint64_t vcpu_memory_by
vcpu_args = &perf_test_args.vcpu_args[vcpu_id];
vcpu_args->vcpu_id = vcpu_id;
- vcpu_args->gva = guest_test_virt_mem +
- (vcpu_id * vcpu_memory_bytes);
- vcpu_args->pages = vcpu_memory_bytes /
- perf_test_args.guest_page_size;
+ if (partition_vcpu_memory_access) {
+ vcpu_args->gva = guest_test_virt_mem +
+ (vcpu_id * vcpu_memory_bytes);
+ vcpu_args->pages = vcpu_memory_bytes /
+ perf_test_args.guest_page_size;
+ vcpu_gpa = guest_test_phys_mem +
+ (vcpu_id * vcpu_memory_bytes);
+ } else {
+ vcpu_args->gva = guest_test_virt_mem;
+ vcpu_args->pages = (vcpus * vcpu_memory_bytes) /
+ perf_test_args.guest_page_size;
+ vcpu_gpa = guest_test_phys_mem;
+ }
- vcpu_gpa = guest_test_phys_mem + (vcpu_id * vcpu_memory_bytes);
pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n",
- vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_memory_bytes);
+ vcpu_id, vcpu_gpa, vcpu_gpa +
+ (vcpu_args->pages * perf_test_args.guest_page_size));
}
}