Message ID | 20210804184513.512888-12-ltykernel@gmail.com |
---|---|
State | Superseded |
Headers | show |
Series | x86/Hyper-V: Add Hyper-V Isolation VM support | expand |
Hi Konrad: Could you have a look at this new version? The change since v1 is make swiotlb_init_io_tlb_mem() return error code when dma_map_decrypted() fails according your previous comment. If this change is ok, could you give your ack and this series needs to be merged via Hyper-V next tree. Thanks. On 8/5/2021 2:45 AM, Tianyu Lan wrote: > From: Tianyu Lan <Tianyu.Lan@microsoft.com> > > In Isolation VM with AMD SEV, bounce buffer needs to be accessed via > extra address space which is above shared_gpa_boundary > (E.G 39 bit address line) reported by Hyper-V CPUID ISOLATION_CONFIG. > The access physical address will be original physical address + > shared_gpa_boundary. The shared_gpa_boundary in the AMD SEV SNP > spec is called virtual top of memory(vTOM). Memory addresses below > vTOM are automatically treated as private while memory above > vTOM is treated as shared. > > Use dma_map_decrypted() in the swiotlb code, store remap address returned > and use the remap address to copy data from/to swiotlb bounce buffer. > > Signed-off-by: Tianyu Lan <Tianyu.Lan@microsoft.com> > --- > Change since v1: > * Make swiotlb_init_io_tlb_mem() return error code and return > error when dma_map_decrypted() fails. > > Signed-off-by: Tianyu Lan <Tianyu.Lan@microsoft.com> > --- > include/linux/swiotlb.h | 4 ++++ > kernel/dma/swiotlb.c | 32 ++++++++++++++++++++++++-------- > 2 files changed, 28 insertions(+), 8 deletions(-) > > diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h > index f507e3eacbea..584560ecaa8e 100644 > --- a/include/linux/swiotlb.h > +++ b/include/linux/swiotlb.h > @@ -72,6 +72,9 @@ extern enum swiotlb_force swiotlb_force; > * @end: The end address of the swiotlb memory pool. Used to do a quick > * range check to see if the memory was in fact allocated by this > * API. > + * @vaddr: The vaddr of the swiotlb memory pool. The swiotlb > + * memory pool may be remapped in the memory encrypted case and store > + * virtual address for bounce buffer operation. > * @nslabs: The number of IO TLB blocks (in groups of 64) between @start and > * @end. For default swiotlb, this is command line adjustable via > * setup_io_tlb_npages. > @@ -89,6 +92,7 @@ extern enum swiotlb_force swiotlb_force; > struct io_tlb_mem { > phys_addr_t start; > phys_addr_t end; > + void *vaddr; > unsigned long nslabs; > unsigned long used; > unsigned int index; > diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c > index 1fa81c096c1d..29b6d888ef3b 100644 > --- a/kernel/dma/swiotlb.c > +++ b/kernel/dma/swiotlb.c > @@ -176,7 +176,7 @@ void __init swiotlb_update_mem_attributes(void) > memset(vaddr, 0, bytes); > } > > -static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, > +static int swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, > unsigned long nslabs, bool late_alloc) > { > void *vaddr = phys_to_virt(start); > @@ -194,14 +194,21 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, > mem->slots[i].alloc_size = 0; > } > > - set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); > - memset(vaddr, 0, bytes); > + mem->vaddr = dma_map_decrypted(vaddr, bytes); > + if (!mem->vaddr) { > + pr_err("Failed to decrypt memory.\n"); > + return -ENOMEM; > + } > + > + memset(mem->vaddr, 0, bytes); > + return 0; > } > > int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) > { > struct io_tlb_mem *mem; > size_t alloc_size; > + int ret; > > if (swiotlb_force == SWIOTLB_NO_FORCE) > return 0; > @@ -216,7 +223,11 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) > panic("%s: Failed to allocate %zu bytes align=0x%lx\n", > __func__, alloc_size, PAGE_SIZE); > > - swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false); > + ret = swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false); > + if (ret) { > + memblock_free(__pa(mem), alloc_size); > + return ret; > + } > > io_tlb_default_mem = mem; > if (verbose) > @@ -304,6 +315,8 @@ int > swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) > { > struct io_tlb_mem *mem; > + int size = get_order(struct_size(mem, slots, nslabs)); > + int ret; > > if (swiotlb_force == SWIOTLB_NO_FORCE) > return 0; > @@ -312,12 +325,15 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) > if (WARN_ON_ONCE(io_tlb_default_mem)) > return -ENOMEM; > > - mem = (void *)__get_free_pages(GFP_KERNEL, > - get_order(struct_size(mem, slots, nslabs))); > + mem = (void *)__get_free_pages(GFP_KERNEL, size); > if (!mem) > return -ENOMEM; > > - swiotlb_init_io_tlb_mem(mem, virt_to_phys(tlb), nslabs, true); > + ret = swiotlb_init_io_tlb_mem(mem, virt_to_phys(tlb), nslabs, true); > + if (ret) { > + free_pages((unsigned long)mem, size); > + return ret; > + } > > io_tlb_default_mem = mem; > swiotlb_print_info(); > @@ -360,7 +376,7 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size > phys_addr_t orig_addr = mem->slots[index].orig_addr; > size_t alloc_size = mem->slots[index].alloc_size; > unsigned long pfn = PFN_DOWN(orig_addr); > - unsigned char *vaddr = phys_to_virt(tlb_addr); > + unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start; > unsigned int tlb_offset; > > if (orig_addr == INVALID_PHYS_ADDR) >
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index f507e3eacbea..584560ecaa8e 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -72,6 +72,9 @@ extern enum swiotlb_force swiotlb_force; * @end: The end address of the swiotlb memory pool. Used to do a quick * range check to see if the memory was in fact allocated by this * API. + * @vaddr: The vaddr of the swiotlb memory pool. The swiotlb + * memory pool may be remapped in the memory encrypted case and store + * virtual address for bounce buffer operation. * @nslabs: The number of IO TLB blocks (in groups of 64) between @start and * @end. For default swiotlb, this is command line adjustable via * setup_io_tlb_npages. @@ -89,6 +92,7 @@ extern enum swiotlb_force swiotlb_force; struct io_tlb_mem { phys_addr_t start; phys_addr_t end; + void *vaddr; unsigned long nslabs; unsigned long used; unsigned int index; diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 1fa81c096c1d..29b6d888ef3b 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -176,7 +176,7 @@ void __init swiotlb_update_mem_attributes(void) memset(vaddr, 0, bytes); } -static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, +static int swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, unsigned long nslabs, bool late_alloc) { void *vaddr = phys_to_virt(start); @@ -194,14 +194,21 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, mem->slots[i].alloc_size = 0; } - set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); - memset(vaddr, 0, bytes); + mem->vaddr = dma_map_decrypted(vaddr, bytes); + if (!mem->vaddr) { + pr_err("Failed to decrypt memory.\n"); + return -ENOMEM; + } + + memset(mem->vaddr, 0, bytes); + return 0; } int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) { struct io_tlb_mem *mem; size_t alloc_size; + int ret; if (swiotlb_force == SWIOTLB_NO_FORCE) return 0; @@ -216,7 +223,11 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) panic("%s: Failed to allocate %zu bytes align=0x%lx\n", __func__, alloc_size, PAGE_SIZE); - swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false); + ret = swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false); + if (ret) { + memblock_free(__pa(mem), alloc_size); + return ret; + } io_tlb_default_mem = mem; if (verbose) @@ -304,6 +315,8 @@ int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) { struct io_tlb_mem *mem; + int size = get_order(struct_size(mem, slots, nslabs)); + int ret; if (swiotlb_force == SWIOTLB_NO_FORCE) return 0; @@ -312,12 +325,15 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) if (WARN_ON_ONCE(io_tlb_default_mem)) return -ENOMEM; - mem = (void *)__get_free_pages(GFP_KERNEL, - get_order(struct_size(mem, slots, nslabs))); + mem = (void *)__get_free_pages(GFP_KERNEL, size); if (!mem) return -ENOMEM; - swiotlb_init_io_tlb_mem(mem, virt_to_phys(tlb), nslabs, true); + ret = swiotlb_init_io_tlb_mem(mem, virt_to_phys(tlb), nslabs, true); + if (ret) { + free_pages((unsigned long)mem, size); + return ret; + } io_tlb_default_mem = mem; swiotlb_print_info(); @@ -360,7 +376,7 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size phys_addr_t orig_addr = mem->slots[index].orig_addr; size_t alloc_size = mem->slots[index].alloc_size; unsigned long pfn = PFN_DOWN(orig_addr); - unsigned char *vaddr = phys_to_virt(tlb_addr); + unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start; unsigned int tlb_offset; if (orig_addr == INVALID_PHYS_ADDR)