diff mbox

[Xen-devel,3/4] xen/arm: introduce XENMEM_cache_flush

Message ID 1412244158-12124-3-git-send-email-stefano.stabellini@eu.citrix.com
State New
Headers show

Commit Message

Stefano Stabellini Oct. 2, 2014, 10:02 a.m. UTC
Introduce a new hypercall to perform cache maintenance operation on
behalf of the guest. The argument is a machine address and a size. The
implementation checks that the memory range is owned by the guest or the
guest has been granted access to it by another domain.

Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
---
 xen/arch/arm/mm.c           |   92 +++++++++++++++++++++++++++++++++++++++++++
 xen/include/public/memory.h |   17 ++++++++
 2 files changed, 109 insertions(+)

Comments

Julien Grall Oct. 2, 2014, 12:17 p.m. UTC | #1
Hi Stefano,

On 10/02/2014 11:02 AM, Stefano Stabellini wrote:
> Introduce a new hypercall to perform cache maintenance operation on
> behalf of the guest. The argument is a machine address and a size. The
> implementation checks that the memory range is owned by the guest or the
> guest has been granted access to it by another domain.
> 
> Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
> ---
>  xen/arch/arm/mm.c           |   92 +++++++++++++++++++++++++++++++++++++++++++
>  xen/include/public/memory.h |   17 ++++++++
>  2 files changed, 109 insertions(+)
> 
> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
> index c5b48ef..f6139bd 100644
> --- a/xen/arch/arm/mm.c
> +++ b/xen/arch/arm/mm.c
> @@ -1134,6 +1134,98 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
>      case XENMEM_get_sharing_shared_pages:
>      case XENMEM_get_sharing_freed_pages:
>          return 0;
> +    case XENMEM_cache_flush:
> +    {
> +        struct xen_cache_flush cflush;
> +        struct domain *d, *owner;
> +        struct page_info *page;
> +        uint64_t mfn, end;
> +        uint64_t offset, size;
> +        void *v;
> +        int ret = 0;
> +
> +        if ( copy_from_guest(&cflush, arg, 1) )
> +            return -EFAULT;
> +
> +        d = rcu_lock_current_domain();
> +        if ( d == NULL )
> +            return -ESRCH;
> +
> +        if ( (cflush.size >> PAGE_SHIFT) > (1U<<MAX_ORDER) )
> +        {
> +            printk(XENLOG_G_ERR "invalid size %llx\n", cflush.size);

This hypercall is only used with the current domain. So I would replace
all printk by gdprintk.

> +            ret = -EINVAL;
> +            goto out;
> +        }
> +
> +        if ( cflush.size == 0 || cflush.op == 0 )
> +        {
> +            ret = 0;
> +            goto out;
> +        }
> +
> +        if ( cflush.op & ~(XENMEM_CACHE_INVAL|XENMEM_CACHE_CLEAN) )
> +        {
> +            printk(XENLOG_G_ERR "invalid op %x\n", cflush.op);
> +            ret = -EINVAL;
> +            goto out;
> +        }
> +
> +        end = cflush.addr + cflush.size;
> +        while ( cflush.addr < end )
> +        {
> +            mfn = cflush.addr >> PAGE_SHIFT;
> +            offset = cflush.addr & ~PAGE_MASK;
> +
> +            if ( !mfn_valid(mfn) )
> +            {
> +                printk(XENLOG_G_ERR "mfn=%llx is not valid\n", mfn);
> +                ret = -EINVAL;
> +                goto out;
> +            }
> +
> +            page = mfn_to_page(mfn);
> +            if ( !page )
> +            {
> +                printk(XENLOG_G_ERR "couldn't get page for mfn %llx\n", mfn);
> +                ret =  -EFAULT;
> +                goto out;
> +            }

This check is not necessary as mfn_to_page will never return NULL.

> +
> +            owner = page_get_owner(page);

Don't you need to take a reference on the page?

The foreign guest may decide to drop the mapping while Xen is using it
to clean. I think this could invalidate the owner pointer (for instance
when a guest that is crashing).

I'm also wondering if we need to take a reference on the foreign domain...

[..]

> diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
> index db961ec..4a6641d 100644
> --- a/xen/include/public/memory.h
> +++ b/xen/include/public/memory.h
> @@ -571,6 +571,23 @@ DEFINE_XEN_GUEST_HANDLE(vnuma_topology_info_t);
>   */
>  #define XENMEM_get_vnumainfo               26
>  
> +/*
> + * Issue one or more cache maintenance operations on a memory range
> + * owned by the calling domain or granted to the calling domain by a
> + * foreign domain.
> + */
> +#define XENMEM_cache_flush                 27
> +struct xen_cache_flush {
> +    /* addr is the machine address at the start of the memory range */
> +    uint64_t addr;
> +    uint64_t size;
> +#define XENMEM_CACHE_CLEAN      (1<<0)
> +#define XENMEM_CACHE_INVAL      (1<<1)
> +    uint32_t op;
> +};
> +typedef struct xen_cache_flush xen_cache_flush_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_cache_flush_t);
> +
>  #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
>  
>  /* Next available subop number is 27 */

You forgot to update the comment.

Regards,
Ian Campbell Oct. 3, 2014, 1:41 p.m. UTC | #2
On Thu, 2014-10-02 at 11:02 +0100, Stefano Stabellini wrote:
> +            if ( owner != d && !grant_map_exists(d, owner->grant_table, mfn) )
> +            {
> +                printk(XENLOG_G_ERR "mfn %llx hasn't been granted by %d to %d\n", mfn, owner->domain_id, d->domain_id);
> +                ret = -EINVAL;
> +                goto out;
> +            }

I know you are rewriting this as a gnttab op but this made me think to
mention: Be sure to consider the case of a loop back grant, i.e.
granting something to yourself. We do occasionally have call to loopback
attach e.g. vbd devices to dom0.

Ian.
diff mbox

Patch

diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index c5b48ef..f6139bd 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -1134,6 +1134,98 @@  long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
     case XENMEM_get_sharing_shared_pages:
     case XENMEM_get_sharing_freed_pages:
         return 0;
+    case XENMEM_cache_flush:
+    {
+        struct xen_cache_flush cflush;
+        struct domain *d, *owner;
+        struct page_info *page;
+        uint64_t mfn, end;
+        uint64_t offset, size;
+        void *v;
+        int ret = 0;
+
+        if ( copy_from_guest(&cflush, arg, 1) )
+            return -EFAULT;
+
+        d = rcu_lock_current_domain();
+        if ( d == NULL )
+            return -ESRCH;
+
+        if ( (cflush.size >> PAGE_SHIFT) > (1U<<MAX_ORDER) )
+        {
+            printk(XENLOG_G_ERR "invalid size %llx\n", cflush.size);
+            ret = -EINVAL;
+            goto out;
+        }
+
+        if ( cflush.size == 0 || cflush.op == 0 )
+        {
+            ret = 0;
+            goto out;
+        }
+
+        if ( cflush.op & ~(XENMEM_CACHE_INVAL|XENMEM_CACHE_CLEAN) )
+        {
+            printk(XENLOG_G_ERR "invalid op %x\n", cflush.op);
+            ret = -EINVAL;
+            goto out;
+        }
+
+        end = cflush.addr + cflush.size;
+        while ( cflush.addr < end )
+        {
+            mfn = cflush.addr >> PAGE_SHIFT;
+            offset = cflush.addr & ~PAGE_MASK;
+
+            if ( !mfn_valid(mfn) )
+            {
+                printk(XENLOG_G_ERR "mfn=%llx is not valid\n", mfn);
+                ret = -EINVAL;
+                goto out;
+            }
+
+            page = mfn_to_page(mfn);
+            if ( !page )
+            {
+                printk(XENLOG_G_ERR "couldn't get page for mfn %llx\n", mfn);
+                ret =  -EFAULT;
+                goto out;
+            }
+
+            owner = page_get_owner(page);
+            if ( !owner )
+            {
+                printk(XENLOG_G_ERR "couldn't get owner for mfn %llx\n", mfn);
+                ret = -EFAULT;
+                goto out;
+            }
+
+            if ( owner != d && !grant_map_exists(d, owner->grant_table, mfn) )
+            {
+                printk(XENLOG_G_ERR "mfn %llx hasn't been granted by %d to %d\n", mfn, owner->domain_id, d->domain_id);
+                ret = -EINVAL;
+                goto out;
+            }
+
+            v = map_domain_page(mfn);
+            v += offset;
+            size = cflush.size - cflush.addr;
+            if ( size > PAGE_SIZE - offset )
+                size = PAGE_SIZE - offset;
+
+            if ( cflush.op & XENMEM_CACHE_INVAL )
+                invalidate_xen_dcache_va_range(v, size);
+            if ( cflush.op & XENMEM_CACHE_CLEAN )
+                clean_xen_dcache_va_range(v, size);
+            unmap_domain_page(v);
+
+            cflush.addr += PAGE_SIZE - offset;
+        }
+
+out:
+        rcu_unlock_domain(d);
+        return ret;
+    }
 
     default:
         return -ENOSYS;
diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
index db961ec..4a6641d 100644
--- a/xen/include/public/memory.h
+++ b/xen/include/public/memory.h
@@ -571,6 +571,23 @@  DEFINE_XEN_GUEST_HANDLE(vnuma_topology_info_t);
  */
 #define XENMEM_get_vnumainfo               26
 
+/*
+ * Issue one or more cache maintenance operations on a memory range
+ * owned by the calling domain or granted to the calling domain by a
+ * foreign domain.
+ */
+#define XENMEM_cache_flush                 27
+struct xen_cache_flush {
+    /* addr is the machine address at the start of the memory range */
+    uint64_t addr;
+    uint64_t size;
+#define XENMEM_CACHE_CLEAN      (1<<0)
+#define XENMEM_CACHE_INVAL      (1<<1)
+    uint32_t op;
+};
+typedef struct xen_cache_flush xen_cache_flush_t;
+DEFINE_XEN_GUEST_HANDLE(xen_cache_flush_t);
+
 #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
 
 /* Next available subop number is 27 */