@@ -9,6 +9,7 @@
#define __ASM_ENCRYPTED_STATE_H
#include <linux/types.h>
+#include <linux/sev.h>
#include <asm/insn.h>
#include <asm/sev-common.h>
@@ -74,9 +75,6 @@ struct cc_blob_sev_info {
/* Software defined (when rFlags.CF = 1) */
#define PVALIDATE_FAIL_NOUPDATE 255
-/* RMP page size */
-#define RMP_PG_SIZE_4K 0
-
/* Memory opertion for snp_prep_memory() */
enum snp_mem_op {
MEMORY_PRIVATE,
@@ -46,6 +46,10 @@
#define DR7_RESET_VALUE 0x400
+#define RMPTABLE_ENTRIES_OFFSET 0x4000
+#define RMPENTRY_SHIFT 8
+#define rmptable_page_offset(x) (RMPTABLE_ENTRIES_OFFSET + (((unsigned long)x) >> RMPENTRY_SHIFT))
+
/* For early boot hypervisor communication in SEV-ES enabled guests */
static struct ghcb boot_ghcb_page __bss_decrypted __aligned(PAGE_SIZE);
@@ -2198,3 +2202,27 @@ static int __init snp_rmptable_init(void)
* passthough state, and it is available after subsys_initcall().
*/
fs_initcall(snp_rmptable_init);
+
+struct rmpentry *snp_lookup_page_in_rmptable(struct page *page, int *level)
+{
+ unsigned long phys = page_to_pfn(page) << PAGE_SHIFT;
+ struct rmpentry *entry, *large_entry;
+ unsigned long vaddr;
+
+ if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
+ return NULL;
+
+ vaddr = rmptable_start + rmptable_page_offset(phys);
+ if (unlikely(vaddr > rmptable_end))
+ return NULL;
+
+ entry = (struct rmpentry *)vaddr;
+
+ /* Read a large RMP entry to get the correct page level used in RMP entry. */
+ vaddr = rmptable_start + rmptable_page_offset(phys & PMD_MASK);
+ large_entry = (struct rmpentry *)vaddr;
+ *level = RMP_TO_X86_PG_LEVEL(rmpentry_pagesize(large_entry));
+
+ return entry;
+}
+EXPORT_SYMBOL_GPL(snp_lookup_page_in_rmptable);
new file mode 100644
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AMD Secure Encrypted Virtualization
+ *
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ */
+
+#ifndef __LINUX_SEV_H
+#define __LINUX_SEV_H
+
+struct __packed rmpentry {
+ union {
+ struct {
+ u64 assigned : 1,
+ pagesize : 1,
+ immutable : 1,
+ rsvd1 : 9,
+ gpa : 39,
+ asid : 10,
+ vmsa : 1,
+ validated : 1,
+ rsvd2 : 1;
+ } info;
+ u64 low;
+ };
+ u64 high;
+};
+
+#define rmpentry_assigned(x) ((x)->info.assigned)
+#define rmpentry_pagesize(x) ((x)->info.pagesize)
+#define rmpentry_vmsa(x) ((x)->info.vmsa)
+#define rmpentry_asid(x) ((x)->info.asid)
+#define rmpentry_validated(x) ((x)->info.validated)
+#define rmpentry_gpa(x) ((unsigned long)(x)->info.gpa)
+#define rmpentry_immutable(x) ((x)->info.immutable)
+
+/* RMP page size */
+#define RMP_PG_SIZE_4K 0
+
+#define RMP_TO_X86_PG_LEVEL(level) (((level) == RMP_PG_SIZE_4K) ? PG_LEVEL_4K : PG_LEVEL_2M)
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+struct rmpentry *snp_lookup_page_in_rmptable(struct page *page, int *level);
+#else
+static inline struct rmpentry *snp_lookup_page_in_rmptable(struct page *page, int *level)
+{
+ return NULL;
+}
+
+#endif /* CONFIG_AMD_MEM_ENCRYPT */
+#endif /* __LINUX_SEV_H */
The snp_lookup_page_in_rmptable() can be used by the host to read the RMP entry for a given page. The RMP entry format is documented in AMD PPR, see https://bugzilla.kernel.org/attachment.cgi?id=296015. Signed-off-by: Brijesh Singh <brijesh.singh@amd.com> --- arch/x86/include/asm/sev.h | 4 +-- arch/x86/kernel/sev.c | 28 +++++++++++++++++++++ include/linux/sev.h | 51 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 80 insertions(+), 3 deletions(-) create mode 100644 include/linux/sev.h