diff mbox series

[v3,14/29] riscv/mm: Implement map_shadow_stack() syscall

Message ID 20240403234054.2020347-15-debug@rivosinc.com
State Superseded
Headers show
Series riscv control-flow integrity for usermode | expand

Commit Message

Deepak Gupta April 3, 2024, 11:35 p.m. UTC
As discussed extensively in the changelog for the addition of this
syscall on x86 ("x86/shstk: Introduce map_shadow_stack syscall") the
existing mmap() and madvise() syscalls do not map entirely well onto the
security requirements for shadow stack memory since they lead to windows
where memory is allocated but not yet protected or stacks which are not
properly and safely initialised. Instead a new syscall map_shadow_stack()
has been defined which allocates and initialises a shadow stack page.

This patch implements this syscall for riscv. riscv doesn't require token
to be setup by kernel because user mode can do that by itself. However to
provide compatibility and portability with other architectues, user mode
can specify token set flag.

Signed-off-by: Deepak Gupta <debug@rivosinc.com>
---
 arch/riscv/kernel/Makefile      |   2 +
 arch/riscv/kernel/usercfi.c     | 149 ++++++++++++++++++++++++++++++++
 include/uapi/asm-generic/mman.h |   1 +
 3 files changed, 152 insertions(+)
 create mode 100644 arch/riscv/kernel/usercfi.c

Comments

Alexandre Ghiti May 12, 2024, 4:50 p.m. UTC | #1
On 04/04/2024 01:35, Deepak Gupta wrote:
> As discussed extensively in the changelog for the addition of this
> syscall on x86 ("x86/shstk: Introduce map_shadow_stack syscall") the
> existing mmap() and madvise() syscalls do not map entirely well onto the
> security requirements for shadow stack memory since they lead to windows
> where memory is allocated but not yet protected or stacks which are not
> properly and safely initialised. Instead a new syscall map_shadow_stack()
> has been defined which allocates and initialises a shadow stack page.
>
> This patch implements this syscall for riscv. riscv doesn't require token
> to be setup by kernel because user mode can do that by itself. However to
> provide compatibility and portability with other architectues, user mode
> can specify token set flag.
>
> Signed-off-by: Deepak Gupta <debug@rivosinc.com>
> ---
>   arch/riscv/kernel/Makefile      |   2 +
>   arch/riscv/kernel/usercfi.c     | 149 ++++++++++++++++++++++++++++++++
>   include/uapi/asm-generic/mman.h |   1 +
>   3 files changed, 152 insertions(+)
>   create mode 100644 arch/riscv/kernel/usercfi.c
>
> diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
> index 604d6bf7e476..3bec82f4e94c 100644
> --- a/arch/riscv/kernel/Makefile
> +++ b/arch/riscv/kernel/Makefile
> @@ -107,3 +107,5 @@ obj-$(CONFIG_COMPAT)		+= compat_vdso/
>   
>   obj-$(CONFIG_64BIT)		+= pi/
>   obj-$(CONFIG_ACPI)		+= acpi.o
> +
> +obj-$(CONFIG_RISCV_USER_CFI) += usercfi.o
> diff --git a/arch/riscv/kernel/usercfi.c b/arch/riscv/kernel/usercfi.c
> new file mode 100644
> index 000000000000..c4ed0d4e33d6
> --- /dev/null
> +++ b/arch/riscv/kernel/usercfi.c
> @@ -0,0 +1,149 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Copyright (C) 2024 Rivos, Inc.
> + * Deepak Gupta <debug@rivosinc.com>
> + */
> +
> +#include <linux/sched.h>
> +#include <linux/bitops.h>
> +#include <linux/types.h>
> +#include <linux/mm.h>
> +#include <linux/mman.h>
> +#include <linux/uaccess.h>
> +#include <linux/sizes.h>
> +#include <linux/user.h>
> +#include <linux/syscalls.h>
> +#include <linux/prctl.h>
> +#include <asm/csr.h>
> +#include <asm/usercfi.h>
> +
> +#define SHSTK_ENTRY_SIZE sizeof(void *)
> +
> +/*
> + * Writes on shadow stack can either be `sspush` or `ssamoswap`. `sspush` can happen
> + * implicitly on current shadow stack pointed to by CSR_SSP. `ssamoswap` takes pointer to
> + * shadow stack. To keep it simple, we plan to use `ssamoswap` to perform writes on shadow
> + * stack.
> + */
> +static noinline unsigned long amo_user_shstk(unsigned long *addr, unsigned long val)
> +{
> +	/*
> +	 * Since shadow stack is supported only in 64bit configuration,
> +	 * ssamoswap.d is used below.

> *        * CONFIG_RISCV_USER_CFI is dependent
> +	 * on 64BIT and compile of this file is dependent on CONFIG_RISCV_USER_CFI
> +	 * In case ssamoswap faults, return -1.


To me, this part of the comment is not needed.


> +	 * Never expect -1 on shadow stack. Expect return addresses and zero


In that case, should we BUG() instead?


> +	 */
> +	unsigned long swap = -1;
> +
> +	__enable_user_access();
> +	asm goto(
> +				".option push\n"
> +				".option arch, +zicfiss\n"
> +				"1: ssamoswap.d %[swap], %[val], %[addr]\n"
> +				_ASM_EXTABLE(1b, %l[fault])
> +				RISCV_ACQUIRE_BARRIER
> +				".option pop\n"
> +				: [swap] "=r" (swap), [addr] "+A" (*addr)
> +				: [val] "r" (val)
> +				: "memory"
> +				: fault
> +			);
> +	__disable_user_access();
> +	return swap;
> +fault:
> +	__disable_user_access();
> +	return -1;
> +}
> +
> +/*
> + * Create a restore token on the shadow stack.  A token is always XLEN wide
> + * and aligned to XLEN.
> + */
> +static int create_rstor_token(unsigned long ssp, unsigned long *token_addr)
> +{
> +	unsigned long addr;
> +
> +	/* Token must be aligned */
> +	if (!IS_ALIGNED(ssp, SHSTK_ENTRY_SIZE))
> +		return -EINVAL;
> +
> +	/* On RISC-V we're constructing token to be function of address itself */
> +	addr = ssp - SHSTK_ENTRY_SIZE;
> +
> +	if (amo_user_shstk((unsigned long __user *)addr, (unsigned long) ssp) == -1)
> +		return -EFAULT;
> +
> +	if (token_addr)
> +		*token_addr = addr;
> +
> +	return 0;
> +}
> +
> +static unsigned long allocate_shadow_stack(unsigned long addr, unsigned long size,
> +				unsigned long token_offset,
> +				bool set_tok)
> +{
> +	int flags = MAP_ANONYMOUS | MAP_PRIVATE;
> +	struct mm_struct *mm = current->mm;
> +	unsigned long populate, tok_loc = 0;
> +
> +	if (addr)
> +		flags |= MAP_FIXED_NOREPLACE;
> +
> +	mmap_write_lock(mm);
> +	addr = do_mmap(NULL, addr, size, PROT_READ, flags,


Hmmm why do you map the shadow stack as PROT_READ here?


> +				VM_SHADOW_STACK | VM_WRITE, 0, &populate, NULL);
> +	mmap_write_unlock(mm);
> +
> +	if (!set_tok || IS_ERR_VALUE(addr))
> +		goto out;
> +
> +	if (create_rstor_token(addr + token_offset, &tok_loc)) {
> +		vm_munmap(addr, size);
> +		return -EINVAL;
> +	}
> +
> +	addr = tok_loc;
> +
> +out:
> +	return addr;
> +}
> +
> +SYSCALL_DEFINE3(map_shadow_stack, unsigned long, addr, unsigned long, size, unsigned int, flags)
> +{
> +	bool set_tok = flags & SHADOW_STACK_SET_TOKEN;
> +	unsigned long aligned_size = 0;
> +
> +	if (!cpu_supports_shadow_stack())
> +		return -EOPNOTSUPP;
> +
> +	/* Anything other than set token should result in invalid param */
> +	if (flags & ~SHADOW_STACK_SET_TOKEN)
> +		return -EINVAL;
> +
> +	/*
> +	 * Unlike other architectures, on RISC-V, SSP pointer is held in CSR_SSP and is available
> +	 * CSR in all modes. CSR accesses are performed using 12bit index programmed in instruction
> +	 * itself. This provides static property on register programming and writes to CSR can't
> +	 * be unintentional from programmer's perspective. As long as programmer has guarded areas
> +	 * which perform writes to CSR_SSP properly, shadow stack pivoting is not possible. Since
> +	 * CSR_SSP is writeable by user mode, it itself can setup a shadow stack token subsequent
> +	 * to allocation. Although in order to provide portablity with other architecture (because
> +	 * `map_shadow_stack` is arch agnostic syscall), RISC-V will follow expectation of a token
> +	 * flag in flags and if provided in flags, setup a token at the base.
> +	 */
> +
> +	/* If there isn't space for a token */
> +	if (set_tok && size < SHSTK_ENTRY_SIZE)
> +		return -ENOSPC;
> +
> +	if (addr && (addr % PAGE_SIZE))


I would use:

if (addr && (addr & (PAGE_SIZE - 1))


> +		return -EINVAL;
> +
> +	aligned_size = PAGE_ALIGN(size);
> +	if (aligned_size < size)
> +		return -EOVERFLOW;
> +
> +	return allocate_shadow_stack(addr, aligned_size, size, set_tok);
> +}
> diff --git a/include/uapi/asm-generic/mman.h b/include/uapi/asm-generic/mman.h
> index 57e8195d0b53..0c0ac6214de6 100644
> --- a/include/uapi/asm-generic/mman.h
> +++ b/include/uapi/asm-generic/mman.h
> @@ -19,4 +19,5 @@
>   #define MCL_FUTURE	2		/* lock all future mappings */
>   #define MCL_ONFAULT	4		/* lock all pages that are faulted in */
>   
> +#define SHADOW_STACK_SET_TOKEN (1ULL << 0)     /* Set up a restore token in the shadow stack */
>   #endif /* __ASM_GENERIC_MMAN_H */


Don't we need to advertise this new syscall to the man pages?
Deepak Gupta May 13, 2024, 5:25 p.m. UTC | #2
On Sun, May 12, 2024 at 06:50:18PM +0200, Alexandre Ghiti wrote:
>
>On 04/04/2024 01:35, Deepak Gupta wrote:
>>As discussed extensively in the changelog for the addition of this
>>syscall on x86 ("x86/shstk: Introduce map_shadow_stack syscall") the
>>existing mmap() and madvise() syscalls do not map entirely well onto the
>>security requirements for shadow stack memory since they lead to windows
>>where memory is allocated but not yet protected or stacks which are not
>>properly and safely initialised. Instead a new syscall map_shadow_stack()
>>has been defined which allocates and initialises a shadow stack page.
>>
>>This patch implements this syscall for riscv. riscv doesn't require token
>>to be setup by kernel because user mode can do that by itself. However to
>>provide compatibility and portability with other architectues, user mode
>>can specify token set flag.
>>
>>Signed-off-by: Deepak Gupta <debug@rivosinc.com>
>>---
>>  arch/riscv/kernel/Makefile      |   2 +
>>  arch/riscv/kernel/usercfi.c     | 149 ++++++++++++++++++++++++++++++++
>>  include/uapi/asm-generic/mman.h |   1 +
>>  3 files changed, 152 insertions(+)
>>  create mode 100644 arch/riscv/kernel/usercfi.c
>>
>>diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
>>index 604d6bf7e476..3bec82f4e94c 100644
>>--- a/arch/riscv/kernel/Makefile
>>+++ b/arch/riscv/kernel/Makefile
>>@@ -107,3 +107,5 @@ obj-$(CONFIG_COMPAT)		+= compat_vdso/
>>  obj-$(CONFIG_64BIT)		+= pi/
>>  obj-$(CONFIG_ACPI)		+= acpi.o
>>+
>>+obj-$(CONFIG_RISCV_USER_CFI) += usercfi.o
>>diff --git a/arch/riscv/kernel/usercfi.c b/arch/riscv/kernel/usercfi.c
>>new file mode 100644
>>index 000000000000..c4ed0d4e33d6
>>--- /dev/null
>>+++ b/arch/riscv/kernel/usercfi.c
>>@@ -0,0 +1,149 @@
>>+// SPDX-License-Identifier: GPL-2.0
>>+/*
>>+ * Copyright (C) 2024 Rivos, Inc.
>>+ * Deepak Gupta <debug@rivosinc.com>
>>+ */
>>+
>>+#include <linux/sched.h>
>>+#include <linux/bitops.h>
>>+#include <linux/types.h>
>>+#include <linux/mm.h>
>>+#include <linux/mman.h>
>>+#include <linux/uaccess.h>
>>+#include <linux/sizes.h>
>>+#include <linux/user.h>
>>+#include <linux/syscalls.h>
>>+#include <linux/prctl.h>
>>+#include <asm/csr.h>
>>+#include <asm/usercfi.h>
>>+
>>+#define SHSTK_ENTRY_SIZE sizeof(void *)
>>+
>>+/*
>>+ * Writes on shadow stack can either be `sspush` or `ssamoswap`. `sspush` can happen
>>+ * implicitly on current shadow stack pointed to by CSR_SSP. `ssamoswap` takes pointer to
>>+ * shadow stack. To keep it simple, we plan to use `ssamoswap` to perform writes on shadow
>>+ * stack.
>>+ */
>>+static noinline unsigned long amo_user_shstk(unsigned long *addr, unsigned long val)
>>+{
>>+	/*
>>+	 * Since shadow stack is supported only in 64bit configuration,
>>+	 * ssamoswap.d is used below.
>
>>*        * CONFIG_RISCV_USER_CFI is dependent
>>+	 * on 64BIT and compile of this file is dependent on CONFIG_RISCV_USER_CFI
>>+	 * In case ssamoswap faults, return -1.
>
>
>To me, this part of the comment is not needed.

Ok, will remove it.

>
>
>>+	 * Never expect -1 on shadow stack. Expect return addresses and zero
>
>
>In that case, should we BUG() instead?

Caller (create_rstor_token) of `amo_user_shstk` is returning -EFAULT. It'll translate to 
signal (SIGSEGV) delivery to user app or terminate.

>
>
>>+	 */
>>+	unsigned long swap = -1;
>>+
>>+	__enable_user_access();
>>+	asm goto(
>>+				".option push\n"
>>+				".option arch, +zicfiss\n"
>>+				"1: ssamoswap.d %[swap], %[val], %[addr]\n"
>>+				_ASM_EXTABLE(1b, %l[fault])
>>+				RISCV_ACQUIRE_BARRIER
>>+				".option pop\n"
>>+				: [swap] "=r" (swap), [addr] "+A" (*addr)
>>+				: [val] "r" (val)
>>+				: "memory"
>>+				: fault
>>+			);
>>+	__disable_user_access();
>>+	return swap;
>>+fault:
>>+	__disable_user_access();
>>+	return -1;
>>+}
>>+
>>+/*
>>+ * Create a restore token on the shadow stack.  A token is always XLEN wide
>>+ * and aligned to XLEN.
>>+ */
>>+static int create_rstor_token(unsigned long ssp, unsigned long *token_addr)
>>+{
>>+	unsigned long addr;
>>+
>>+	/* Token must be aligned */
>>+	if (!IS_ALIGNED(ssp, SHSTK_ENTRY_SIZE))
>>+		return -EINVAL;
>>+
>>+	/* On RISC-V we're constructing token to be function of address itself */
>>+	addr = ssp - SHSTK_ENTRY_SIZE;
>>+
>>+	if (amo_user_shstk((unsigned long __user *)addr, (unsigned long) ssp) == -1)
>>+		return -EFAULT;
>>+
>>+	if (token_addr)
>>+		*token_addr = addr;
>>+
>>+	return 0;
>>+}
>>+
>>+static unsigned long allocate_shadow_stack(unsigned long addr, unsigned long size,
>>+				unsigned long token_offset,
>>+				bool set_tok)
>>+{
>>+	int flags = MAP_ANONYMOUS | MAP_PRIVATE;
>>+	struct mm_struct *mm = current->mm;
>>+	unsigned long populate, tok_loc = 0;
>>+
>>+	if (addr)
>>+		flags |= MAP_FIXED_NOREPLACE;
>>+
>>+	mmap_write_lock(mm);
>>+	addr = do_mmap(NULL, addr, size, PROT_READ, flags,
>
>
>Hmmm why do you map the shadow stack as PROT_READ here?

I believe its redundant here. I followed what x86 did for their shadow stack creation.
GCS (arm shadow stack) patches also do same thing. Collectively, we think at some time in
future many of these flows will become generic (arch agnostic).

>
>
>>+				VM_SHADOW_STACK | VM_WRITE, 0, &populate, NULL);
>>+	mmap_write_unlock(mm);
>>+
>>+	if (!set_tok || IS_ERR_VALUE(addr))
>>+		goto out;
>>+
>>+	if (create_rstor_token(addr + token_offset, &tok_loc)) {
>>+		vm_munmap(addr, size);
>>+		return -EINVAL;
>>+	}
>>+
>>+	addr = tok_loc;
>>+
>>+out:
>>+	return addr;
>>+}
>>+
>>+SYSCALL_DEFINE3(map_shadow_stack, unsigned long, addr, unsigned long, size, unsigned int, flags)
>>+{
>>+	bool set_tok = flags & SHADOW_STACK_SET_TOKEN;
>>+	unsigned long aligned_size = 0;
>>+
>>+	if (!cpu_supports_shadow_stack())
>>+		return -EOPNOTSUPP;
>>+
>>+	/* Anything other than set token should result in invalid param */
>>+	if (flags & ~SHADOW_STACK_SET_TOKEN)
>>+		return -EINVAL;
>>+
>>+	/*
>>+	 * Unlike other architectures, on RISC-V, SSP pointer is held in CSR_SSP and is available
>>+	 * CSR in all modes. CSR accesses are performed using 12bit index programmed in instruction
>>+	 * itself. This provides static property on register programming and writes to CSR can't
>>+	 * be unintentional from programmer's perspective. As long as programmer has guarded areas
>>+	 * which perform writes to CSR_SSP properly, shadow stack pivoting is not possible. Since
>>+	 * CSR_SSP is writeable by user mode, it itself can setup a shadow stack token subsequent
>>+	 * to allocation. Although in order to provide portablity with other architecture (because
>>+	 * `map_shadow_stack` is arch agnostic syscall), RISC-V will follow expectation of a token
>>+	 * flag in flags and if provided in flags, setup a token at the base.
>>+	 */
>>+
>>+	/* If there isn't space for a token */
>>+	if (set_tok && size < SHSTK_ENTRY_SIZE)
>>+		return -ENOSPC;
>>+
>>+	if (addr && (addr % PAGE_SIZE))
>
>
>I would use:
>
>if (addr && (addr & (PAGE_SIZE - 1))

noted.

>
>
>>+		return -EINVAL;
>>+
>>+	aligned_size = PAGE_ALIGN(size);
>>+	if (aligned_size < size)
>>+		return -EOVERFLOW;
>>+
>>+	return allocate_shadow_stack(addr, aligned_size, size, set_tok);
>>+}
>>diff --git a/include/uapi/asm-generic/mman.h b/include/uapi/asm-generic/mman.h
>>index 57e8195d0b53..0c0ac6214de6 100644
>>--- a/include/uapi/asm-generic/mman.h
>>+++ b/include/uapi/asm-generic/mman.h
>>@@ -19,4 +19,5 @@
>>  #define MCL_FUTURE	2		/* lock all future mappings */
>>  #define MCL_ONFAULT	4		/* lock all pages that are faulted in */
>>+#define SHADOW_STACK_SET_TOKEN (1ULL << 0)     /* Set up a restore token in the shadow stack */
>>  #endif /* __ASM_GENERIC_MMAN_H */
>
>
>Don't we need to advertise this new syscall to the man pages?

`map_shadow_stack` is already mainline as part of x86. I am assuming there is man page for this.
I'll check to be sure and confirm here.

>
diff mbox series

Patch

diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index 604d6bf7e476..3bec82f4e94c 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -107,3 +107,5 @@  obj-$(CONFIG_COMPAT)		+= compat_vdso/
 
 obj-$(CONFIG_64BIT)		+= pi/
 obj-$(CONFIG_ACPI)		+= acpi.o
+
+obj-$(CONFIG_RISCV_USER_CFI) += usercfi.o
diff --git a/arch/riscv/kernel/usercfi.c b/arch/riscv/kernel/usercfi.c
new file mode 100644
index 000000000000..c4ed0d4e33d6
--- /dev/null
+++ b/arch/riscv/kernel/usercfi.c
@@ -0,0 +1,149 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Rivos, Inc.
+ * Deepak Gupta <debug@rivosinc.com>
+ */
+
+#include <linux/sched.h>
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/uaccess.h>
+#include <linux/sizes.h>
+#include <linux/user.h>
+#include <linux/syscalls.h>
+#include <linux/prctl.h>
+#include <asm/csr.h>
+#include <asm/usercfi.h>
+
+#define SHSTK_ENTRY_SIZE sizeof(void *)
+
+/*
+ * Writes on shadow stack can either be `sspush` or `ssamoswap`. `sspush` can happen
+ * implicitly on current shadow stack pointed to by CSR_SSP. `ssamoswap` takes pointer to
+ * shadow stack. To keep it simple, we plan to use `ssamoswap` to perform writes on shadow
+ * stack.
+ */
+static noinline unsigned long amo_user_shstk(unsigned long *addr, unsigned long val)
+{
+	/*
+	 * Since shadow stack is supported only in 64bit configuration,
+	 * ssamoswap.d is used below. CONFIG_RISCV_USER_CFI is dependent
+	 * on 64BIT and compile of this file is dependent on CONFIG_RISCV_USER_CFI
+	 * In case ssamoswap faults, return -1.
+	 * Never expect -1 on shadow stack. Expect return addresses and zero
+	 */
+	unsigned long swap = -1;
+
+	__enable_user_access();
+	asm goto(
+				".option push\n"
+				".option arch, +zicfiss\n"
+				"1: ssamoswap.d %[swap], %[val], %[addr]\n"
+				_ASM_EXTABLE(1b, %l[fault])
+				RISCV_ACQUIRE_BARRIER
+				".option pop\n"
+				: [swap] "=r" (swap), [addr] "+A" (*addr)
+				: [val] "r" (val)
+				: "memory"
+				: fault
+			);
+	__disable_user_access();
+	return swap;
+fault:
+	__disable_user_access();
+	return -1;
+}
+
+/*
+ * Create a restore token on the shadow stack.  A token is always XLEN wide
+ * and aligned to XLEN.
+ */
+static int create_rstor_token(unsigned long ssp, unsigned long *token_addr)
+{
+	unsigned long addr;
+
+	/* Token must be aligned */
+	if (!IS_ALIGNED(ssp, SHSTK_ENTRY_SIZE))
+		return -EINVAL;
+
+	/* On RISC-V we're constructing token to be function of address itself */
+	addr = ssp - SHSTK_ENTRY_SIZE;
+
+	if (amo_user_shstk((unsigned long __user *)addr, (unsigned long) ssp) == -1)
+		return -EFAULT;
+
+	if (token_addr)
+		*token_addr = addr;
+
+	return 0;
+}
+
+static unsigned long allocate_shadow_stack(unsigned long addr, unsigned long size,
+				unsigned long token_offset,
+				bool set_tok)
+{
+	int flags = MAP_ANONYMOUS | MAP_PRIVATE;
+	struct mm_struct *mm = current->mm;
+	unsigned long populate, tok_loc = 0;
+
+	if (addr)
+		flags |= MAP_FIXED_NOREPLACE;
+
+	mmap_write_lock(mm);
+	addr = do_mmap(NULL, addr, size, PROT_READ, flags,
+				VM_SHADOW_STACK | VM_WRITE, 0, &populate, NULL);
+	mmap_write_unlock(mm);
+
+	if (!set_tok || IS_ERR_VALUE(addr))
+		goto out;
+
+	if (create_rstor_token(addr + token_offset, &tok_loc)) {
+		vm_munmap(addr, size);
+		return -EINVAL;
+	}
+
+	addr = tok_loc;
+
+out:
+	return addr;
+}
+
+SYSCALL_DEFINE3(map_shadow_stack, unsigned long, addr, unsigned long, size, unsigned int, flags)
+{
+	bool set_tok = flags & SHADOW_STACK_SET_TOKEN;
+	unsigned long aligned_size = 0;
+
+	if (!cpu_supports_shadow_stack())
+		return -EOPNOTSUPP;
+
+	/* Anything other than set token should result in invalid param */
+	if (flags & ~SHADOW_STACK_SET_TOKEN)
+		return -EINVAL;
+
+	/*
+	 * Unlike other architectures, on RISC-V, SSP pointer is held in CSR_SSP and is available
+	 * CSR in all modes. CSR accesses are performed using 12bit index programmed in instruction
+	 * itself. This provides static property on register programming and writes to CSR can't
+	 * be unintentional from programmer's perspective. As long as programmer has guarded areas
+	 * which perform writes to CSR_SSP properly, shadow stack pivoting is not possible. Since
+	 * CSR_SSP is writeable by user mode, it itself can setup a shadow stack token subsequent
+	 * to allocation. Although in order to provide portablity with other architecture (because
+	 * `map_shadow_stack` is arch agnostic syscall), RISC-V will follow expectation of a token
+	 * flag in flags and if provided in flags, setup a token at the base.
+	 */
+
+	/* If there isn't space for a token */
+	if (set_tok && size < SHSTK_ENTRY_SIZE)
+		return -ENOSPC;
+
+	if (addr && (addr % PAGE_SIZE))
+		return -EINVAL;
+
+	aligned_size = PAGE_ALIGN(size);
+	if (aligned_size < size)
+		return -EOVERFLOW;
+
+	return allocate_shadow_stack(addr, aligned_size, size, set_tok);
+}
diff --git a/include/uapi/asm-generic/mman.h b/include/uapi/asm-generic/mman.h
index 57e8195d0b53..0c0ac6214de6 100644
--- a/include/uapi/asm-generic/mman.h
+++ b/include/uapi/asm-generic/mman.h
@@ -19,4 +19,5 @@ 
 #define MCL_FUTURE	2		/* lock all future mappings */
 #define MCL_ONFAULT	4		/* lock all pages that are faulted in */
 
+#define SHADOW_STACK_SET_TOKEN (1ULL << 0)     /* Set up a restore token in the shadow stack */
 #endif /* __ASM_GENERIC_MMAN_H */