@@ -886,6 +886,26 @@ int xc_vcpu_getcontext(xc_interface *xch,
vcpu_guest_context_any_t *ctxt);
/**
+ * This function initializes the vuart emulation and returns
+ * the event to be used by the backend for communicating with
+ * the emulation code.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * #parm type type of vuart
+ * @parm domid the domain to get information from
+ * @parm console_domid the domid of the backend console
+ * @parm gfn the guest pfn to be used as the ring buffer
+ * @parm evtchn the event channel to be used for events
+ * @return 0 on success, negative error on failure
+ */
+int xc_dom_vuart_init(xc_interface *xch,
+ uint32_t type,
+ domid_t domid,
+ domid_t console_domid,
+ xen_pfn_t gfn,
+ evtchn_port_t *evtchn);
+
+/**
* This function returns information about the XSAVE state of a particular
* vcpu of a domain. If extstate->size and extstate->xfeature_mask are 0,
* the call is considered a query to retrieve them and the buffer is not
@@ -343,6 +343,33 @@ int xc_domain_get_guest_width(xc_interface *xch, uint32_t domid,
return 0;
}
+int xc_dom_vuart_init(xc_interface *xch,
+ uint32_t type,
+ domid_t domid,
+ domid_t console_domid,
+ xen_pfn_t gfn,
+ evtchn_port_t *evtchn)
+{
+ DECLARE_DOMCTL;
+ int rc = 0;
+
+ memset(&domctl, 0, sizeof(domctl));
+
+ domctl.cmd = XEN_DOMCTL_vuart_op;
+ domctl.domain = domid;
+ domctl.u.vuart_op.cmd = XEN_DOMCTL_VUART_OP_INIT;
+ domctl.u.vuart_op.type = type;
+ domctl.u.vuart_op.console_domid = console_domid;
+ domctl.u.vuart_op.gfn = gfn;
+
+ if ( (rc = do_domctl(xch, &domctl)) < 0 )
+ return rc;
+
+ *evtchn = domctl.u.vuart_op.evtchn;
+
+ return rc;
+}
+
int xc_domain_getinfo(xc_interface *xch,
uint32_t first_domid,
unsigned int max_doms,
@@ -44,6 +44,13 @@ int libxl__arch_domain_finalise_hw_description(libxl__gc *gc,
libxl_domain_build_info *info,
struct xc_dom_image *dom);
+/* perform any pending hardware initialization */
+_hidden
+int libxl__arch_build_dom_finish(libxl__gc *gc,
+ libxl_domain_build_info *info,
+ struct xc_dom_image *dom,
+ libxl__domain_build_state *state);
+
/* build vNUMA vmemrange with arch specific information */
_hidden
int libxl__arch_vnuma_build_vmemrange(libxl__gc *gc,
@@ -1038,6 +1038,33 @@ int libxl__arch_domain_finalise_hw_description(libxl__gc *gc,
return 0;
}
+int libxl__arch_build_dom_finish(libxl__gc *gc,
+ libxl_domain_build_info *info,
+ struct xc_dom_image *dom,
+ libxl__domain_build_state *state)
+{
+ int rc = 0, ret;
+
+ if (info->arch_arm.vuart != LIBXL_VUART_TYPE_SBSA_UART) {
+ rc = 0;
+ goto out;
+ }
+
+ ret = xc_dom_vuart_init(CTX->xch,
+ XEN_DOMCTL_VUART_TYPE_VPL011,
+ dom->guest_domid,
+ dom->console_domid,
+ dom->vuart_gfn,
+ &state->vuart_port);
+ if (ret < 0) {
+ rc = ERROR_FAIL;
+ LOG(ERROR, "xc_dom_vuart_init failed\n");
+ }
+
+out:
+ return rc;
+}
+
int libxl__arch_vnuma_build_vmemrange(libxl__gc *gc,
uint32_t domid,
libxl_domain_build_info *info,
@@ -702,6 +702,10 @@ static int libxl__build_dom(libxl__gc *gc, uint32_t domid,
LOGE(ERROR, "xc_dom_gnttab_init failed");
goto out;
}
+ if ((ret = libxl__arch_build_dom_finish(gc, info, dom, state)) != 0) {
+ LOGE(ERROR, "libxl__arch_build_dom_finish failed");
+ goto out;
+ }
out:
return ret != 0 ? ERROR_FAIL : 0;
@@ -391,6 +391,14 @@ int libxl__arch_domain_finalise_hw_description(libxl__gc *gc,
return rc;
}
+int libxl__arch_build_dom_finish(libxl__gc *gc,
+ libxl_domain_build_info *info,
+ struct xc_dom_image *dom,
+ libxl__domain_build_state *state)
+{
+ return 0;
+}
+
/* Return 0 on success, ERROR_* on failure. */
int libxl__arch_vnuma_build_vmemrange(libxl__gc *gc,
uint32_t domid,
@@ -874,6 +874,12 @@ int domain_relinquish_resources(struct domain *d)
if ( ret )
return ret;
+ /*
+ * Release the resources allocated for vpl011 which were
+ * allocated via a DOMCTL call XEN_DOMCTL_vuart_op.
+ */
+ domain_vpl011_deinit(d);
+
d->arch.relmem = RELMEM_xen;
/* Fallthrough */
@@ -5,9 +5,11 @@
*/
#include <xen/errno.h>
+#include <xen/guest_access.h>
#include <xen/hypercall.h>
#include <xen/iocap.h>
#include <xen/lib.h>
+#include <xen/mm.h>
#include <xen/sched.h>
#include <xen/types.h>
#include <xsm/xsm.h>
@@ -20,6 +22,29 @@ void arch_get_domain_info(const struct domain *d,
info->flags |= XEN_DOMINF_hap;
}
+static int handle_vuart_init(struct domain *d,
+ struct xen_domctl_vuart_op *vuart_op)
+{
+ int rc;
+ struct vpl011_init_info info;
+
+ info.console_domid = vuart_op->console_domid;
+ info.gfn = _gfn(vuart_op->gfn);
+
+ if ( d->creation_finished )
+ return -EPERM;
+
+ if ( vuart_op->type != XEN_DOMCTL_VUART_TYPE_VPL011 )
+ return -EOPNOTSUPP;
+
+ rc = domain_vpl011_init(d, &info);
+
+ if ( !rc )
+ vuart_op->evtchn = info.evtchn;
+
+ return rc;
+}
+
long arch_do_domctl(struct xen_domctl *domctl, struct domain *d,
XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
{
@@ -119,6 +144,33 @@ long arch_do_domctl(struct xen_domctl *domctl, struct domain *d,
d->disable_migrate = domctl->u.disable_migrate.disable;
return 0;
+ case XEN_DOMCTL_vuart_op:
+ {
+ int rc;
+ unsigned int i;
+ struct xen_domctl_vuart_op *vuart_op = &domctl->u.vuart_op;
+
+ /* check that structure padding must be 0. */
+ for ( i = 0; i < sizeof(vuart_op->pad); i++ )
+ if ( vuart_op->pad[i] )
+ return -EINVAL;
+
+ switch( vuart_op->cmd )
+ {
+ case XEN_DOMCTL_VUART_OP_INIT:
+ rc = handle_vuart_init(d, vuart_op);
+ break;
+
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ if ( !rc )
+ rc = copy_to_guest(u_domctl, domctl, 1);
+
+ return rc;
+ }
default:
{
int rc;
@@ -33,6 +33,7 @@
#endif
#include "xen.h"
+#include "event_channel.h"
#include "grant_table.h"
#include "hvm/save.h"
#include "memory.h"
@@ -1077,6 +1078,27 @@ struct xen_domctl_set_gnttab_limits {
uint32_t maptrack_frames; /* IN */
};
+/* XEN_DOMCTL_vuart_op */
+struct xen_domctl_vuart_op {
+#define XEN_DOMCTL_VUART_OP_INIT 0
+ uint32_t cmd; /* XEN_DOMCTL_VUART_OP_* */
+#define XEN_DOMCTL_VUART_TYPE_VPL011 0
+ uint32_t type; /* IN - type of vuart.
+ * Currently only vpl011 supported.
+ */
+ uint64_aligned_t gfn; /* IN - guest gfn to be used as a
+ * ring buffer.
+ */
+ domid_t console_domid; /* IN - domid of domain running the
+ * backend console.
+ */
+ uint8_t pad[2];
+ evtchn_port_t evtchn; /* OUT - remote port of the event
+ * channel used for sending
+ * ring buffer events.
+ */
+};
+
struct xen_domctl {
uint32_t cmd;
#define XEN_DOMCTL_createdomain 1
@@ -1155,6 +1177,7 @@ struct xen_domctl {
#define XEN_DOMCTL_psr_cat_op 78
#define XEN_DOMCTL_soft_reset 79
#define XEN_DOMCTL_set_gnttab_limits 80
+#define XEN_DOMCTL_vuart_op 81
#define XEN_DOMCTL_gdbsx_guestmemio 1000
#define XEN_DOMCTL_gdbsx_pausevcpu 1001
#define XEN_DOMCTL_gdbsx_unpausevcpu 1002
@@ -1218,6 +1241,7 @@ struct xen_domctl {
struct xen_domctl_monitor_op monitor_op;
struct xen_domctl_psr_cat_op psr_cat_op;
struct xen_domctl_set_gnttab_limits set_gnttab_limits;
+ struct xen_domctl_vuart_op vuart_op;
uint8_t pad[128];
} u;
};