diff mbox series

[v9,07/20] multi-process: define transmission functions in remote

Message ID 20200827181231.22778-8-elena.ufimtseva@oracle.com
State New
Headers show
Series [v9,01/20] memory: alloc RAM from file at offset | expand

Commit Message

Elena Ufimtseva Aug. 27, 2020, 6:12 p.m. UTC
From: Elena Ufimtseva <elena.ufimtseva@oracle.com>

remote process uses Qemu event loop and cannot block while
communicating with the remote proxy object.
This patch defines the co-routines to receive and send
messages to the proxy from remote process.

TODO: Avoid the aio_poll by entering the co-routines
from the higher level to avoid aio_poll.

Signed-off-by: Elena Ufimtseva <elena.ufimtseva@oracle.com>
Signed-off-by: John G Johnson <john.g.johnson@oracle.com>
Signed-off-by: Jagannathan Raman <jag.raman@oracle.com>
---
 include/io/mpqemu-link.h |  12 +++++
 io/mpqemu-link.c         | 108 +++++++++++++++++++++++++++++++++++++++
 2 files changed, 120 insertions(+)

Comments

Stefan Hajnoczi Sept. 23, 2020, 2:02 p.m. UTC | #1
On Thu, Aug 27, 2020 at 11:12:18AM -0700, elena.ufimtseva@oracle.com wrote:
> TODO: Avoid the aio_poll by entering the co-routines

> from the higher level to avoid aio_poll.


The monitor is unresponsive during the aio_poll() loop. Is this a
blocker for you?

Running all mpqemu communication in a coroutine as mentioned in this
TODO is a cleaner solution. Then this patch will be unnecessary.

> +static void coroutine_fn mpqemu_msg_send_co(void *data)

> +{

> +    MPQemuRequest *req = (MPQemuRequest *)data;

> +    Error *local_err = NULL;

> +

> +    mpqemu_msg_send(req->msg, req->ioc, &local_err);

> +    if (local_err) {

> +        error_report("ERROR: failed to send command to remote %d, ",

> +                     req->msg->cmd);

> +        req->finished = true;

> +        req->error = -EINVAL;

> +        return;


local_err is leaked.

> +    }

> +

> +    req->finished = true;

> +}

> +

> +void mpqemu_msg_send_in_co(MPQemuRequest *req, QIOChannel *ioc,

> +                                  Error **errp)

> +{

> +    Coroutine *co;

> +

> +    if (!req->ioc) {

> +        if (errp) {

> +            error_setg(errp, "Channel is set to NULL");

> +        } else {

> +            error_report("Channel is set to NULL");

> +        }


The caller should provide an errp if they are interested in the error
message. Duplicating error messages is messy.

> +static void coroutine_fn mpqemu_msg_recv_co(void *data)

> +{

> +    MPQemuRequest *req = (MPQemuRequest *)data;

> +    Error *local_err = NULL;

> +

> +    mpqemu_msg_recv(req->msg, req->ioc, &local_err);

> +    if (local_err) {

> +        error_report("ERROR: failed to send command to remote %d, ",

> +                     req->msg->cmd);

> +        req->finished = true;

> +        req->error = -EINVAL;

> +        return;


local_err is leaked.
Elena Ufimtseva Sept. 24, 2020, 5:18 p.m. UTC | #2
On Wed, Sep 23, 2020 at 03:02:46PM +0100, Stefan Hajnoczi wrote:
> On Thu, Aug 27, 2020 at 11:12:18AM -0700, elena.ufimtseva@oracle.com wrote:

> > TODO: Avoid the aio_poll by entering the co-routines

> > from the higher level to avoid aio_poll.

> 

> The monitor is unresponsive during the aio_poll() loop. Is this a

> blocker for you?

>

Hi Stefan
No, not a blocker, had to leave out removal of aio_poll for the next round.
 
> Running all mpqemu communication in a coroutine as mentioned in this

> TODO is a cleaner solution. Then this patch will be unnecessary.

>

Yes, thank you, it will go away in v10.

> > +static void coroutine_fn mpqemu_msg_send_co(void *data)

> > +{

> > +    MPQemuRequest *req = (MPQemuRequest *)data;

> > +    Error *local_err = NULL;

> > +

> > +    mpqemu_msg_send(req->msg, req->ioc, &local_err);

> > +    if (local_err) {

> > +        error_report("ERROR: failed to send command to remote %d, ",

> > +                     req->msg->cmd);

> > +        req->finished = true;

> > +        req->error = -EINVAL;

> > +        return;

> 

> local_err is leaked.

> 

> > +    }

> > +

> > +    req->finished = true;

> > +}

> > +

> > +void mpqemu_msg_send_in_co(MPQemuRequest *req, QIOChannel *ioc,

> > +                                  Error **errp)

> > +{

> > +    Coroutine *co;

> > +

> > +    if (!req->ioc) {

> > +        if (errp) {

> > +            error_setg(errp, "Channel is set to NULL");

> > +        } else {

> > +            error_report("Channel is set to NULL");

> > +        }

> 

> The caller should provide an errp if they are interested in the error

> message. Duplicating error messages is messy.

> 

> > +static void coroutine_fn mpqemu_msg_recv_co(void *data)

> > +{

> > +    MPQemuRequest *req = (MPQemuRequest *)data;

> > +    Error *local_err = NULL;

> > +

> > +    mpqemu_msg_recv(req->msg, req->ioc, &local_err);

> > +    if (local_err) {

> > +        error_report("ERROR: failed to send command to remote %d, ",

> > +                     req->msg->cmd);

> > +        req->finished = true;

> > +        req->error = -EINVAL;

> > +        return;

> 

> local_err is leaked.


Thank you for reviewing these, will fix If the code above will be re-used.

Elena
diff mbox series

Patch

diff --git a/include/io/mpqemu-link.h b/include/io/mpqemu-link.h
index c7de8648bc..e02b5ce663 100644
--- a/include/io/mpqemu-link.h
+++ b/include/io/mpqemu-link.h
@@ -52,6 +52,18 @@  typedef struct {
     int num_fds;
 } MPQemuMsg;
 
+struct MPQemuRequest {
+    MPQemuMsg *msg;
+    QIOChannel *ioc;
+    bool finished;
+    int error;
+};
+
+typedef struct MPQemuRequest MPQemuRequest;
+
+void mpqemu_msg_send_in_co(MPQemuRequest *req, QIOChannel *ioc, Error **errp);
+void mpqemu_msg_recv_in_co(MPQemuRequest *req, QIOChannel *ioc, Error **errp);
+
 void mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp);
 void mpqemu_msg_recv(MPQemuMsg *msg, QIOChannel *ioc, Error **errp);
 
diff --git a/io/mpqemu-link.c b/io/mpqemu-link.c
index d9be2bdeab..1dd776f81b 100644
--- a/io/mpqemu-link.c
+++ b/io/mpqemu-link.c
@@ -150,6 +150,114 @@  fail:
     }
 }
 
+static void coroutine_fn mpqemu_msg_send_co(void *data)
+{
+    MPQemuRequest *req = (MPQemuRequest *)data;
+    Error *local_err = NULL;
+
+    mpqemu_msg_send(req->msg, req->ioc, &local_err);
+    if (local_err) {
+        error_report("ERROR: failed to send command to remote %d, ",
+                     req->msg->cmd);
+        req->finished = true;
+        req->error = -EINVAL;
+        return;
+    }
+
+    req->finished = true;
+}
+
+void mpqemu_msg_send_in_co(MPQemuRequest *req, QIOChannel *ioc,
+                                  Error **errp)
+{
+    Coroutine *co;
+
+    if (!req->ioc) {
+        if (errp) {
+            error_setg(errp, "Channel is set to NULL");
+        } else {
+            error_report("Channel is set to NULL");
+        }
+        return;
+    }
+
+    req->error = 0;
+    req->finished = false;
+
+    co = qemu_coroutine_create(mpqemu_msg_send_co, req);
+    qemu_coroutine_enter(co);
+
+    while (!req->finished) {
+        aio_poll(qemu_get_aio_context(), true);
+    }
+
+    if (req->error) {
+        if (errp) {
+            error_setg(errp, "Error sending message to proxy, "
+                             "error %d", req->error);
+        } else {
+            error_report("Error sending message to proxy, "
+                         "error %d", req->error);
+        }
+    }
+
+    return;
+}
+
+static void coroutine_fn mpqemu_msg_recv_co(void *data)
+{
+    MPQemuRequest *req = (MPQemuRequest *)data;
+    Error *local_err = NULL;
+
+    mpqemu_msg_recv(req->msg, req->ioc, &local_err);
+    if (local_err) {
+        error_report("ERROR: failed to send command to remote %d, ",
+                     req->msg->cmd);
+        req->finished = true;
+        req->error = -EINVAL;
+        return;
+    }
+
+    req->finished = true;
+}
+
+void mpqemu_msg_recv_in_co(MPQemuRequest *req, QIOChannel *ioc,
+                               Error **errp)
+{
+    Coroutine *co;
+
+    if (!req->ioc) {
+        if (errp) {
+            error_setg(errp, "Channel is set to NULL");
+        } else {
+            error_report("Channel is set to NULL");
+        }
+        return;
+    }
+
+    req->error = 0;
+    req->finished = false;
+
+    co = qemu_coroutine_create(mpqemu_msg_recv_co, req);
+    qemu_coroutine_enter(co);
+
+    while (!req->finished) {
+        aio_poll(qemu_get_aio_context(), true);
+    }
+
+    if (req->error) {
+        if (errp) {
+            error_setg(errp, "Error sending message to proxy, "
+                             "error %d", req->error);
+        } else {
+            error_report("Error sending message to proxy, "
+                         "error %d", req->error);
+        }
+    }
+
+    return;
+}
+
 bool mpqemu_msg_valid(MPQemuMsg *msg)
 {
     if (msg->cmd >= MAX && msg->cmd < 0) {