@@ -117,6 +117,8 @@ infrastructure:
+------------------------------+---------+---------+
| Name | bits | visible |
+------------------------------+---------+---------+
+ | RNDR | [63-60] | y |
+ +------------------------------+---------+---------+
| TS | [55-52] | y |
+------------------------------+---------+---------+
| FHM | [51-48] | y |
new file mode 100644
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARCHRANDOM_H
+#define _ASM_ARCHRANDOM_H
+
+#ifdef CONFIG_ARCH_RANDOM
+
+#include <asm/alternative.h>
+
+void arm64_update_get_random_seed_long(struct alt_instr *alt,
+ __le32 *origptr, __le32 *updptr,
+ int nr_inst);
+
+static inline bool __must_check arch_get_random_long(unsigned long *v)
+{
+ return false;
+}
+
+static inline bool __must_check arch_get_random_int(unsigned int *v)
+{
+ return false;
+}
+
+/*
+ * The ALTERNATIVE infrastructure leads GCC to believe that the
+ * inline assembly is quite large, rather than two insns, which
+ * leads to the function being considered not profitable to inline.
+ * Override this decision with __always_inline.
+ */
+static __always_inline __must_check
+bool arch_get_random_seed_long(unsigned long *v)
+{
+ register unsigned long x0 __asm__("x0");
+ unsigned long ok;
+
+ asm volatile(ALTERNATIVE_CB("bl boot_get_random_seed_long\n",
+ arm64_update_get_random_seed_long)
+ "cset %1, ne\n"
+ : "=r" (x0), "=r" (ok) : : "cc");
+
+ *v = x0;
+ return ok;
+}
+
+static __always_inline __must_check
+bool arch_get_random_seed_int(unsigned int *v)
+{
+ unsigned long val;
+ bool ok = arch_get_random_seed_long(&val);
+
+ *v = val;
+ return ok;
+}
+
+#endif /* CONFIG_ARCH_RANDOM */
+#endif /* _ASM_ARCHRANDOM_H */
@@ -54,7 +54,8 @@
#define ARM64_WORKAROUND_1463225 44
#define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM 45
#define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46
+#define ARM64_HAS_RNG 47
-#define ARM64_NCAPS 47
+#define ARM64_NCAPS 48
#endif /* __ASM_CPUCAPS_H */
@@ -365,6 +365,9 @@
#define SYS_CTR_EL0 sys_reg(3, 3, 0, 0, 1)
#define SYS_DCZID_EL0 sys_reg(3, 3, 0, 0, 7)
+#define SYS_RNDR_EL0 sys_reg(3, 3, 2, 4, 0)
+#define SYS_RNDRRS_EL0 sys_reg(3, 3, 2, 4, 1)
+
#define SYS_PMCR_EL0 sys_reg(3, 3, 9, 12, 0)
#define SYS_PMCNTENSET_EL0 sys_reg(3, 3, 9, 12, 1)
#define SYS_PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2)
@@ -539,6 +542,7 @@
ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_RES1)
/* id_aa64isar0 */
+#define ID_AA64ISAR0_RNDR_SHIFT 60
#define ID_AA64ISAR0_TS_SHIFT 52
#define ID_AA64ISAR0_FHM_SHIFT 48
#define ID_AA64ISAR0_DP_SHIFT 44
@@ -119,6 +119,7 @@ static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap);
* sync with the documentation of the CPU feature register ABI.
*/
static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RNDR_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
@@ -1565,6 +1566,18 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.sign = FTR_UNSIGNED,
.min_field_value = 1,
},
+#endif
+#ifdef CONFIG_ARCH_RANDOM
+ {
+ .desc = "Random Number Generator",
+ .capability = ARM64_HAS_RNG,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64ISAR0_EL1,
+ .field_pos = ID_AA64ISAR0_RNDR_SHIFT,
+ .sign = FTR_UNSIGNED,
+ .min_field_value = 1,
+ },
#endif
{},
};
new file mode 100644
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Random number generation using ARMv8.5-RNG.
+ */
+
+#include <linux/random.h>
+#include <linux/ratelimit.h>
+#include <linux/printk.h>
+#include <linux/preempt.h>
+#include <asm/cpufeature.h>
+
+/*
+ * Before alternatives are finalized, arch_get_random_seed_long calls
+ * this function. The abi is as if
+ *
+ * msr x0, rndr
+ *
+ * Preserve all other call-clobbered regs.
+ */
+
+asm(".globl boot_get_random_seed_long\n"
+".type boot_get_random_seed_long, @function\n"
+"boot_get_random_seed_long:\n"
+" stp x29, x30, [sp, -160]!\n"
+" stp x1, x2, [sp, 16]\n"
+" stp x3, x4, [sp, 32]\n"
+" stp x5, x6, [sp, 48]\n"
+" stp x7, x8, [sp, 64]\n"
+" stp x9, x10, [sp, 80]\n"
+" stp x11, x12, [sp, 96]\n"
+" stp x13, x14, [sp, 112]\n"
+" stp x15, x16, [sp, 128]\n"
+" stp x17, x18, [sp, 144]\n"
+" mov x0, " __stringify(ARM64_HAS_RNG) "\n"
+" bl this_cpu_has_cap\n"
+" ldp x1, x2, [sp, 16]\n"
+" ldp x3, x4, [sp, 32]\n"
+" ldp x5, x6, [sp, 48]\n"
+" ldp x7, x8, [sp, 64]\n"
+" ldp x9, x10, [sp, 80]\n"
+" ldp x11, x12, [sp, 96]\n"
+" ldp x13, x14, [sp, 112]\n"
+" ldp x15, x16, [sp, 128]\n"
+" ldp x17, x18, [sp, 144]\n"
+" ldp x29, x30, [sp], 160\n"
+/* Test this_cpu_has_cap result, clearing x0 and setting Z if false. */
+" ands w0, w0, #0xff\n"
+" beq 1f\n"
+ __mrs_s("x0", SYS_RNDR_EL0) "\n"
+"1: ret\n"
+".size boot_get_random_seed_long, . - boot_get_random_seed_long\n");
+
+
+void arm64_update_get_random_seed_long(struct alt_instr *alt,
+ __le32 *origptr, __le32 *updptr,
+ int nr_inst)
+{
+ u32 insn;
+
+ BUG_ON(nr_inst != 1);
+
+ if (cpus_have_cap(ARM64_HAS_RNG))
+ insn = 0xd53b2400; /* mrs x0, rndr */
+ else
+ insn = 0xea1f03e0; /* ands x0, xzr, xzr */
+ updptr[0] = cpu_to_le32(insn);
+}
@@ -1438,6 +1438,18 @@ config ARM64_PTR_AUTH
endmenu
+menu "ARMv8.5 architectural features"
+
+config ARCH_RANDOM
+ bool "Enable support for random number generation"
+ default y
+ help
+ Random number generation (part of the ARMv8.5 Extensions)
+ provides a high bandwidth, cryptographically secure
+ hardware random number generator.
+
+endmenu
+
config ARM64_SVE
bool "ARM Scalable Vector Extension support"
default y
@@ -63,6 +63,7 @@ obj-$(CONFIG_CRASH_CORE) += crash_core.o
obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o
obj-$(CONFIG_ARM64_SSBD) += ssbd.o
obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o
+obj-$(CONFIG_ARCH_RANDOM) += random.o
obj-y += vdso/ probes/
obj-$(CONFIG_COMPAT_VDSO) += vdso32/
@@ -539,7 +539,7 @@ endmenu
config RANDOM_TRUST_CPU
bool "Trust the CPU manufacturer to initialize Linux's CRNG"
- depends on X86 || S390 || PPC
+ depends on X86 || S390 || PPC || ARM64
default n
help
Assume that CPU manufacturer (e.g., Intel or AMD for RDSEED or
@@ -559,4 +559,4 @@ config RANDOM_TRUST_BOOTLOADER
device randomness. Say Y here to assume the entropy provided by the
booloader is trustworthy so it will be added to the kernel's entropy
pool. Otherwise, say N here so it will be regarded as device input that
- only mixes the entropy pool.
\ No newline at end of file
+ only mixes the entropy pool.