@@ -160,8 +160,10 @@ static const KVMCapabilityInfo kvm_required_capabilites[] = {
static NotifierList kvm_irqchip_change_notifiers =
NOTIFIER_LIST_INITIALIZER(kvm_irqchip_change_notifiers);
-#define kvm_slots_lock(kml) qemu_mutex_lock(&(kml)->slots_lock)
-#define kvm_slots_unlock(kml) qemu_mutex_unlock(&(kml)->slots_lock)
+static QemuMutex kml_slots_lock;
+
+#define kvm_slots_lock() qemu_mutex_lock(&kml_slots_lock)
+#define kvm_slots_unlock() qemu_mutex_unlock(&kml_slots_lock)
int kvm_get_max_memslots(void)
{
@@ -211,9 +213,9 @@ bool kvm_has_free_slot(MachineState *ms)
bool result;
KVMMemoryListener *kml = &s->memory_listener;
- kvm_slots_lock(kml);
+ kvm_slots_lock();
result = !!kvm_get_free_slot(kml);
- kvm_slots_unlock(kml);
+ kvm_slots_unlock();
return result;
}
@@ -279,7 +281,7 @@ int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
KVMMemoryListener *kml = &s->memory_listener;
int i, ret = 0;
- kvm_slots_lock(kml);
+ kvm_slots_lock();
for (i = 0; i < s->nr_slots; i++) {
KVMSlot *mem = &kml->slots[i];
@@ -289,7 +291,7 @@ int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
break;
}
}
- kvm_slots_unlock(kml);
+ kvm_slots_unlock();
return ret;
}
@@ -468,7 +470,7 @@ static int kvm_section_update_flags(KVMMemoryListener *kml,
return 0;
}
- kvm_slots_lock(kml);
+ kvm_slots_lock();
while (size && !ret) {
slot_size = MIN(kvm_max_slot_size, size);
@@ -484,7 +486,7 @@ static int kvm_section_update_flags(KVMMemoryListener *kml,
}
out:
- kvm_slots_unlock(kml);
+ kvm_slots_unlock();
return ret;
}
@@ -754,7 +756,7 @@ static int kvm_physical_log_clear(KVMMemoryListener *kml,
return ret;
}
- kvm_slots_lock(kml);
+ kvm_slots_lock();
for (i = 0; i < s->nr_slots; i++) {
mem = &kml->slots[i];
@@ -780,7 +782,7 @@ static int kvm_physical_log_clear(KVMMemoryListener *kml,
}
}
- kvm_slots_unlock(kml);
+ kvm_slots_unlock();
return ret;
}
@@ -1085,7 +1087,7 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
ram = memory_region_get_ram_ptr(mr) + section->offset_within_region +
(start_addr - section->offset_within_address_space);
- kvm_slots_lock(kml);
+ kvm_slots_lock();
if (!add) {
do {
@@ -1143,7 +1145,7 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
} while (size);
out:
- kvm_slots_unlock(kml);
+ kvm_slots_unlock();
}
static void kvm_region_add(MemoryListener *listener,
@@ -1170,9 +1172,9 @@ static void kvm_log_sync(MemoryListener *listener,
KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
int r;
- kvm_slots_lock(kml);
+ kvm_slots_lock();
r = kvm_physical_sync_dirty_bitmap(kml, section);
- kvm_slots_unlock(kml);
+ kvm_slots_unlock();
if (r < 0) {
abort();
}
@@ -1272,7 +1274,7 @@ void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
{
int i;
- qemu_mutex_init(&kml->slots_lock);
+ qemu_mutex_init(&kml_slots_lock);
kml->slots = g_malloc0(s->nr_slots * sizeof(KVMSlot));
kml->as_id = as_id;
@@ -27,8 +27,6 @@ typedef struct KVMSlot
typedef struct KVMMemoryListener {
MemoryListener listener;
- /* Protects the slots and all inside them */
- QemuMutex slots_lock;
KVMSlot *slots;
int as_id;
} KVMMemoryListener;
Per-kml slots_lock will bring some trouble if we want to take all slots_lock of all the KMLs, especially when we're in a context that we could have taken some of the KML slots_lock, then we even need to figure out what we've taken and what we need to take. Make this simple by merging all KML slots_lock into a single slots lock. Per-kml slots_lock isn't anything that helpful anyway - so far only x86 has two address spaces (so, two slots_locks). All the rest archs will be having one address space always, which means there's actually one slots_lock so it will be the same as before. Signed-off-by: Peter Xu <peterx@redhat.com> --- accel/kvm/kvm-all.c | 32 +++++++++++++++++--------------- include/sysemu/kvm_int.h | 2 -- 2 files changed, 17 insertions(+), 17 deletions(-)