Message ID | 41f3cc3be60571ebe4d5c6d51f1ed27f32afd58c.1623174621.git.ashish.kalra@amd.com |
---|---|
State | New |
Headers | show |
Series | Add Guest API & Guest Kernel support for SEV live migration. | expand |
On Tue, Jun 08, 2021 at 06:06:26PM +0000, Ashish Kalra wrote: > +void notify_range_enc_status_changed(unsigned long vaddr, int npages, > + bool enc) You don't need to break this line. > @@ -285,12 +333,13 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) > static int __init early_set_memory_enc_dec(unsigned long vaddr, > unsigned long size, bool enc) > { > - unsigned long vaddr_end, vaddr_next; > + unsigned long vaddr_end, vaddr_next, start; > unsigned long psize, pmask; > int split_page_size_mask; > int level, ret; > pte_t *kpte; > > + start = vaddr; > vaddr_next = vaddr; > vaddr_end = vaddr + size; > > @@ -345,6 +394,8 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr, > > ret = 0; > > + notify_range_enc_status_changed(start, PAGE_ALIGN(size) >> PAGE_SHIFT, > + enc); Ditto. > out: > __flush_tlb_all(); > return ret; > diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c > index 156cd235659f..9729cb0d99e3 100644 > --- a/arch/x86/mm/pat/set_memory.c > +++ b/arch/x86/mm/pat/set_memory.c > @@ -2020,6 +2020,13 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) > */ > cpa_flush(&cpa, 0); > > + /* > + * Notify hypervisor that a given memory range is mapped encrypted > + * or decrypted. The hypervisor will use this information during the > + * VM migration. > + */ Simplify that comment: /* * Notify the hypervisor about the encryption status change of the memory * range. It will use this information during the VM migration. */ With those nitpicks fixed: Reviewed-by: Borislav Petkov <bp@suse.de> Paulo, if you want me to take this, lemme know, but I think it'll conflict with patch 5 so perhaps it all should go together through the kvm tree... Thx. -- Regards/Gruss, Boris. https://people.kernel.org/tglx/notes-about-netiquette
Hello Boris, Paolo, On Thu, Jun 10, 2021 at 08:30:52PM +0200, Borislav Petkov wrote: > On Tue, Jun 08, 2021 at 06:06:26PM +0000, Ashish Kalra wrote: > > +void notify_range_enc_status_changed(unsigned long vaddr, int npages, > > + bool enc) > > You don't need to break this line. > > > @@ -285,12 +333,13 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) > > static int __init early_set_memory_enc_dec(unsigned long vaddr, > > unsigned long size, bool enc) > > { > > - unsigned long vaddr_end, vaddr_next; > > + unsigned long vaddr_end, vaddr_next, start; > > unsigned long psize, pmask; > > int split_page_size_mask; > > int level, ret; > > pte_t *kpte; > > > > + start = vaddr; > > vaddr_next = vaddr; > > vaddr_end = vaddr + size; > > > > @@ -345,6 +394,8 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr, > > > > ret = 0; > > > > + notify_range_enc_status_changed(start, PAGE_ALIGN(size) >> PAGE_SHIFT, > > + enc); > > Ditto. > > > out: > > __flush_tlb_all(); > > return ret; > > diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c > > index 156cd235659f..9729cb0d99e3 100644 > > --- a/arch/x86/mm/pat/set_memory.c > > +++ b/arch/x86/mm/pat/set_memory.c > > @@ -2020,6 +2020,13 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) > > */ > > cpa_flush(&cpa, 0); > > > > + /* > > + * Notify hypervisor that a given memory range is mapped encrypted > > + * or decrypted. The hypervisor will use this information during the > > + * VM migration. > > + */ > > Simplify that comment: > > /* > * Notify the hypervisor about the encryption status change of the memory > * range. It will use this information during the VM migration. > */ > > > With those nitpicks fixed: > > Reviewed-by: Borislav Petkov <bp@suse.de> > > Paulo, if you want me to take this, lemme know, but I think it'll > conflict with patch 5 so perhaps it all should go together through the > kvm tree... > Will these patches be merged into 5.14 ? I have posted another version (v5) for patch 5 after more review comments from Boris, so please pull in all these patches together. Thanks, Ashish > Thx. > > -- > Regards/Gruss, > Boris. > > https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fpeople.kernel.org%2Ftglx%2Fnotes-about-netiquette&data=04%7C01%7CAshish.Kalra%40amd.com%7C142a30170b8145b44a2f08d92c3de599%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637589466634224968%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=nCXHGP8%2F9on0DurrLLbBT0MivMWXfNqwS73rKkqclUM%3D&reserved=0
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index da3a1ac82be5..540bf8cb37db 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -97,6 +97,12 @@ static inline void paravirt_arch_exit_mmap(struct mm_struct *mm) PVOP_VCALL1(mmu.exit_mmap, mm); } +static inline void notify_page_enc_status_changed(unsigned long pfn, + int npages, bool enc) +{ + PVOP_VCALL3(mmu.notify_page_enc_status_changed, pfn, npages, enc); +} + #ifdef CONFIG_PARAVIRT_XXL static inline void load_sp0(unsigned long sp0) { diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index d9d6b0203ec4..664199820239 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -168,6 +168,7 @@ struct pv_mmu_ops { /* Hook for intercepting the destruction of an mm_struct. */ void (*exit_mmap)(struct mm_struct *mm); + void (*notify_page_enc_status_changed)(unsigned long pfn, int npages, bool enc); #ifdef CONFIG_PARAVIRT_XXL struct paravirt_callee_save read_cr2; diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h index 43fa081a1adb..872617542bbc 100644 --- a/arch/x86/include/asm/set_memory.h +++ b/arch/x86/include/asm/set_memory.h @@ -83,6 +83,7 @@ int set_pages_rw(struct page *page, int numpages); int set_direct_map_invalid_noflush(struct page *page); int set_direct_map_default_noflush(struct page *page); bool kernel_page_present(struct page *page); +void notify_range_enc_status_changed(unsigned long vaddr, int npages, bool enc); extern int kernel_set_to_readonly; diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 04cafc057bed..1cc20ac9a54f 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -296,6 +296,7 @@ struct paravirt_patch_template pv_ops = { (void (*)(struct mmu_gather *, void *))tlb_remove_page, .mmu.exit_mmap = paravirt_nop, + .mmu.notify_page_enc_status_changed = paravirt_nop, #ifdef CONFIG_PARAVIRT_XXL .mmu.read_cr2 = __PV_IS_CALLEE_SAVE(native_read_cr2), diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index ff08dc463634..6b12620376a4 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -228,29 +228,77 @@ void __init sev_setup_arch(void) swiotlb_adjust_size(size); } -static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) +static unsigned long pg_level_to_pfn(int level, pte_t *kpte, pgprot_t *ret_prot) { - pgprot_t old_prot, new_prot; - unsigned long pfn, pa, size; - pte_t new_pte; + unsigned long pfn = 0; + pgprot_t prot; switch (level) { case PG_LEVEL_4K: pfn = pte_pfn(*kpte); - old_prot = pte_pgprot(*kpte); + prot = pte_pgprot(*kpte); break; case PG_LEVEL_2M: pfn = pmd_pfn(*(pmd_t *)kpte); - old_prot = pmd_pgprot(*(pmd_t *)kpte); + prot = pmd_pgprot(*(pmd_t *)kpte); break; case PG_LEVEL_1G: pfn = pud_pfn(*(pud_t *)kpte); - old_prot = pud_pgprot(*(pud_t *)kpte); + prot = pud_pgprot(*(pud_t *)kpte); break; default: - return; + WARN_ONCE(1, "Invalid level for kpte\n"); + return 0; } + if (ret_prot) + *ret_prot = prot; + + return pfn; +} + +void notify_range_enc_status_changed(unsigned long vaddr, int npages, + bool enc) +{ +#ifdef CONFIG_PARAVIRT + unsigned long sz = npages << PAGE_SHIFT; + unsigned long vaddr_end = vaddr + sz; + + while (vaddr < vaddr_end) { + int psize, pmask, level; + unsigned long pfn; + pte_t *kpte; + + kpte = lookup_address(vaddr, &level); + if (!kpte || pte_none(*kpte)) { + WARN_ONCE(1, "kpte lookup for vaddr\n"); + return; + } + + pfn = pg_level_to_pfn(level, kpte, NULL); + if (!pfn) + continue; + + psize = page_level_size(level); + pmask = page_level_mask(level); + + notify_page_enc_status_changed(pfn, psize >> PAGE_SHIFT, enc); + + vaddr = (vaddr & pmask) + psize; + } +#endif +} + +static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) +{ + pgprot_t old_prot, new_prot; + unsigned long pfn, pa, size; + pte_t new_pte; + + pfn = pg_level_to_pfn(level, kpte, &old_prot); + if (!pfn) + return; + new_prot = old_prot; if (enc) pgprot_val(new_prot) |= _PAGE_ENC; @@ -285,12 +333,13 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) static int __init early_set_memory_enc_dec(unsigned long vaddr, unsigned long size, bool enc) { - unsigned long vaddr_end, vaddr_next; + unsigned long vaddr_end, vaddr_next, start; unsigned long psize, pmask; int split_page_size_mask; int level, ret; pte_t *kpte; + start = vaddr; vaddr_next = vaddr; vaddr_end = vaddr + size; @@ -345,6 +394,8 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr, ret = 0; + notify_range_enc_status_changed(start, PAGE_ALIGN(size) >> PAGE_SHIFT, + enc); out: __flush_tlb_all(); return ret; diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index 156cd235659f..9729cb0d99e3 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -2020,6 +2020,13 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) */ cpa_flush(&cpa, 0); + /* + * Notify hypervisor that a given memory range is mapped encrypted + * or decrypted. The hypervisor will use this information during the + * VM migration. + */ + notify_range_enc_status_changed(addr, numpages, enc); + return ret; }