@@ -50,6 +50,9 @@ static inline u64 gcsss2(void)
return Xt;
}
+#define PR_SHADOW_STACK_SUPPORTED_STATUS_MASK \
+ (PR_SHADOW_STACK_ENABLE | PR_SHADOW_STACK_WRITE | PR_SHADOW_STACK_PUSH)
+
#ifdef CONFIG_ARM64_GCS
static inline bool task_gcs_el0_enabled(struct task_struct *task)
@@ -63,6 +66,20 @@ void gcs_preserve_current_state(void);
unsigned long gcs_alloc_thread_stack(struct task_struct *tsk,
const struct kernel_clone_args *args);
+static inline int gcs_check_locked(struct task_struct *task,
+ unsigned long new_val)
+{
+ unsigned long cur_val = task->thread.gcs_el0_mode;
+
+ cur_val &= task->thread.gcs_el0_locked;
+ new_val &= task->thread.gcs_el0_locked;
+
+ if (cur_val != new_val)
+ return -EBUSY;
+
+ return 0;
+}
+
#else
static inline bool task_gcs_el0_enabled(struct task_struct *task)
@@ -78,6 +95,11 @@ static inline unsigned long gcs_alloc_thread_stack(struct task_struct *tsk,
{
return -ENOTSUPP;
}
+static inline int gcs_check_locked(struct task_struct *task,
+ unsigned long new_val)
+{
+ return 0;
+}
#endif
@@ -187,6 +187,7 @@ struct thread_struct {
u64 por_el0;
#ifdef CONFIG_ARM64_GCS
unsigned int gcs_el0_mode;
+ unsigned int gcs_el0_locked;
u64 gcspr_el0;
u64 gcs_base;
u64 gcs_size;
@@ -109,3 +109,82 @@ void gcs_free(struct task_struct *task)
task->thread.gcs_base = 0;
task->thread.gcs_size = 0;
}
+
+int arch_set_shadow_stack_status(struct task_struct *task, unsigned long arg)
+{
+ unsigned long gcs, size;
+ int ret;
+
+ if (!system_supports_gcs())
+ return -EINVAL;
+
+ if (is_compat_thread(task_thread_info(task)))
+ return -EINVAL;
+
+ /* Reject unknown flags */
+ if (arg & ~PR_SHADOW_STACK_SUPPORTED_STATUS_MASK)
+ return -EINVAL;
+
+ ret = gcs_check_locked(task, arg);
+ if (ret != 0)
+ return ret;
+
+ /* If we are enabling GCS then make sure we have a stack */
+ if (arg & PR_SHADOW_STACK_ENABLE &&
+ !task_gcs_el0_enabled(task)) {
+ /* Do not allow GCS to be reenabled */
+ if (task->thread.gcs_base || task->thread.gcspr_el0)
+ return -EINVAL;
+
+ if (task != current)
+ return -EBUSY;
+
+ size = gcs_size(0);
+ gcs = alloc_gcs(0, size);
+ if (!gcs)
+ return -ENOMEM;
+
+ task->thread.gcspr_el0 = gcs + size - sizeof(u64);
+ task->thread.gcs_base = gcs;
+ task->thread.gcs_size = size;
+ if (task == current)
+ write_sysreg_s(task->thread.gcspr_el0,
+ SYS_GCSPR_EL0);
+ }
+
+ task->thread.gcs_el0_mode = arg;
+ if (task == current)
+ gcs_set_el0_mode(task);
+
+ return 0;
+}
+
+int arch_get_shadow_stack_status(struct task_struct *task,
+ unsigned long __user *arg)
+{
+ if (!system_supports_gcs())
+ return -EINVAL;
+
+ if (is_compat_thread(task_thread_info(task)))
+ return -EINVAL;
+
+ return put_user(task->thread.gcs_el0_mode, arg);
+}
+
+int arch_lock_shadow_stack_status(struct task_struct *task,
+ unsigned long arg)
+{
+ if (!system_supports_gcs())
+ return -EINVAL;
+
+ if (is_compat_thread(task_thread_info(task)))
+ return -EINVAL;
+
+ /*
+ * We support locking unknown bits so applications can prevent
+ * any changes in a future proof manner.
+ */
+ task->thread.gcs_el0_locked |= arg;
+
+ return 0;
+}