Message ID | 20201001182641.80232-5-peterx@redhat.com |
---|---|
State | Superseded |
Headers | show |
Series | migration/postcopy: Sync faulted addresses after network recovered | expand |
* Peter Xu (peterx@redhat.com) wrote: > We synchronize the requested pages right after a postcopy recovery happens. > This helps to synchronize the prioritized pages on source so that the faulted > threads can be served faster. > > Reported-by: Xiaohui Li <xiaohli@redhat.com> > Signed-off-by: Peter Xu <peterx@redhat.com> > --- > migration/savevm.c | 57 ++++++++++++++++++++++++++++++++++++++++++ > migration/trace-events | 1 + > 2 files changed, 58 insertions(+) > > diff --git a/migration/savevm.c b/migration/savevm.c > index 34e4b71052..56a2bfb24c 100644 > --- a/migration/savevm.c > +++ b/migration/savevm.c > @@ -2011,6 +2011,49 @@ static int loadvm_postcopy_handle_run(MigrationIncomingState *mis) > return LOADVM_QUIT; > } > > +/* We must be with page_request_mutex held */ > +static gboolean postcopy_sync_page_req(gpointer key, gpointer value, > + gpointer data) > +{ > + MigrationIncomingState *mis = data; > + void *host_addr = (void *) key; > + ram_addr_t rb_offset; > + RAMBlock *rb; > + int ret; > + > + rb = qemu_ram_block_from_host(host_addr, true, &rb_offset); > + if (!rb) { > + /* > + * This should _never_ happen. However be nice for a migrating VM to > + * not crash/assert. Post an error (note: intended to not use *_once > + * because we do want to see all the illegal addresses; and this can > + * never be triggered by the guest so we're safe) and move on next. > + */ > + error_report("%s: illegal host addr %p", __func__, host_addr); > + /* Try the next entry */ > + return FALSE; > + } > + > + ret = migrate_send_rp_message_req_pages(mis, rb, rb_offset); > + if (ret) { > + /* Please refer to above comment. */ > + error_report("%s: send rp message failed for addr %p", > + __func__, host_addr); > + return FALSE; > + } > + > + trace_postcopy_page_req_sync((uint64_t)(uintptr_t)host_addr); Again that's a case for host_addr and a %p I think. Dave > + return FALSE; > +} > + > +static void migrate_send_rp_req_pages_pending(MigrationIncomingState *mis) > +{ > + WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) { > + g_tree_foreach(mis->page_requested, postcopy_sync_page_req, mis); > + } > +} > + > static int loadvm_postcopy_handle_resume(MigrationIncomingState *mis) > { > if (mis->state != MIGRATION_STATUS_POSTCOPY_RECOVER) { > @@ -2033,6 +2076,20 @@ static int loadvm_postcopy_handle_resume(MigrationIncomingState *mis) > /* Tell source that "we are ready" */ > migrate_send_rp_resume_ack(mis, MIGRATION_RESUME_ACK_VALUE); > > + /* > + * After a postcopy recovery, the source should have lost the postcopy > + * queue, or potentially the requested pages could have been lost during > + * the network down phase. Let's re-sync with the source VM by re-sending > + * all the pending pages that we eagerly need, so these threads won't get > + * blocked too long due to the recovery. > + * > + * Without this procedure, the faulted destination VM threads (waiting for > + * page requests right before the postcopy is interrupted) can keep hanging > + * until the pages are sent by the source during the background copying of > + * pages, or another thread faulted on the same address accidentally. > + */ > + migrate_send_rp_req_pages_pending(mis); > + > return 0; > } > > diff --git a/migration/trace-events b/migration/trace-events > index 9187b03725..5d0b0662a8 100644 > --- a/migration/trace-events > +++ b/migration/trace-events > @@ -49,6 +49,7 @@ vmstate_save(const char *idstr, const char *vmsd_name) "%s, %s" > vmstate_load(const char *idstr, const char *vmsd_name) "%s, %s" > postcopy_pause_incoming(void) "" > postcopy_pause_incoming_continued(void) "" > +postcopy_page_req_sync(uint64_t host_addr) "sync page req 0x%"PRIx64 > > # vmstate.c > vmstate_load_field_error(const char *field, int ret) "field \"%s\" load failed, ret = %d" > -- > 2.26.2 >
On Fri, Oct 02, 2020 at 06:26:46PM +0100, Dr. David Alan Gilbert wrote: > > + trace_postcopy_page_req_sync((uint64_t)(uintptr_t)host_addr); > > Again that's a case for host_addr and a %p I think. Yeah, I'll fix both places and repost. Thanks. -- Peter Xu
diff --git a/migration/savevm.c b/migration/savevm.c index 34e4b71052..56a2bfb24c 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -2011,6 +2011,49 @@ static int loadvm_postcopy_handle_run(MigrationIncomingState *mis) return LOADVM_QUIT; } +/* We must be with page_request_mutex held */ +static gboolean postcopy_sync_page_req(gpointer key, gpointer value, + gpointer data) +{ + MigrationIncomingState *mis = data; + void *host_addr = (void *) key; + ram_addr_t rb_offset; + RAMBlock *rb; + int ret; + + rb = qemu_ram_block_from_host(host_addr, true, &rb_offset); + if (!rb) { + /* + * This should _never_ happen. However be nice for a migrating VM to + * not crash/assert. Post an error (note: intended to not use *_once + * because we do want to see all the illegal addresses; and this can + * never be triggered by the guest so we're safe) and move on next. + */ + error_report("%s: illegal host addr %p", __func__, host_addr); + /* Try the next entry */ + return FALSE; + } + + ret = migrate_send_rp_message_req_pages(mis, rb, rb_offset); + if (ret) { + /* Please refer to above comment. */ + error_report("%s: send rp message failed for addr %p", + __func__, host_addr); + return FALSE; + } + + trace_postcopy_page_req_sync((uint64_t)(uintptr_t)host_addr); + + return FALSE; +} + +static void migrate_send_rp_req_pages_pending(MigrationIncomingState *mis) +{ + WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) { + g_tree_foreach(mis->page_requested, postcopy_sync_page_req, mis); + } +} + static int loadvm_postcopy_handle_resume(MigrationIncomingState *mis) { if (mis->state != MIGRATION_STATUS_POSTCOPY_RECOVER) { @@ -2033,6 +2076,20 @@ static int loadvm_postcopy_handle_resume(MigrationIncomingState *mis) /* Tell source that "we are ready" */ migrate_send_rp_resume_ack(mis, MIGRATION_RESUME_ACK_VALUE); + /* + * After a postcopy recovery, the source should have lost the postcopy + * queue, or potentially the requested pages could have been lost during + * the network down phase. Let's re-sync with the source VM by re-sending + * all the pending pages that we eagerly need, so these threads won't get + * blocked too long due to the recovery. + * + * Without this procedure, the faulted destination VM threads (waiting for + * page requests right before the postcopy is interrupted) can keep hanging + * until the pages are sent by the source during the background copying of + * pages, or another thread faulted on the same address accidentally. + */ + migrate_send_rp_req_pages_pending(mis); + return 0; } diff --git a/migration/trace-events b/migration/trace-events index 9187b03725..5d0b0662a8 100644 --- a/migration/trace-events +++ b/migration/trace-events @@ -49,6 +49,7 @@ vmstate_save(const char *idstr, const char *vmsd_name) "%s, %s" vmstate_load(const char *idstr, const char *vmsd_name) "%s, %s" postcopy_pause_incoming(void) "" postcopy_pause_incoming_continued(void) "" +postcopy_page_req_sync(uint64_t host_addr) "sync page req 0x%"PRIx64 # vmstate.c vmstate_load_field_error(const char *field, int ret) "field \"%s\" load failed, ret = %d"
We synchronize the requested pages right after a postcopy recovery happens. This helps to synchronize the prioritized pages on source so that the faulted threads can be served faster. Reported-by: Xiaohui Li <xiaohli@redhat.com> Signed-off-by: Peter Xu <peterx@redhat.com> --- migration/savevm.c | 57 ++++++++++++++++++++++++++++++++++++++++++ migration/trace-events | 1 + 2 files changed, 58 insertions(+)