@@ -267,3 +267,33 @@ int hv_set_mem_enc(unsigned long addr, int numpages, bool enc)
enc ? VMBUS_PAGE_NOT_VISIBLE
: VMBUS_PAGE_VISIBLE_READ_WRITE);
}
+
+/*
+ * hv_map_memory - map memory to extra space in the AMD SEV-SNP Isolation VM.
+ */
+unsigned long hv_map_memory(unsigned long addr, unsigned long size)
+{
+ unsigned long *pfns = kcalloc(size / HV_HYP_PAGE_SIZE,
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ unsigned long vaddr;
+ int i;
+
+ if (!pfns)
+ return (unsigned long)NULL;
+
+ for (i = 0; i < size / HV_HYP_PAGE_SIZE; i++)
+ pfns[i] = virt_to_hvpfn((void *)addr + i * HV_HYP_PAGE_SIZE) +
+ (ms_hyperv.shared_gpa_boundary >> HV_HYP_PAGE_SHIFT);
+
+ vaddr = (unsigned long)vmap_pfn(pfns, size / HV_HYP_PAGE_SIZE,
+ PAGE_KERNEL_IO);
+ kfree(pfns);
+
+ return vaddr;
+}
+
+void hv_unmap_memory(unsigned long addr)
+{
+ vunmap((void *)addr);
+}
@@ -253,6 +253,8 @@ int hv_map_ioapic_interrupt(int ioapic_id, bool level, int vcpu, int vector,
int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry);
int hv_mark_gpa_visibility(u16 count, const u64 pfn[], u32 visibility);
int hv_set_mem_enc(unsigned long addr, int numpages, bool enc);
+unsigned long hv_map_memory(unsigned long addr, unsigned long size);
+void hv_unmap_memory(unsigned long addr);
void hv_sint_wrmsrl_ghcb(u64 msr, u64 value);
void hv_sint_rdmsrl_ghcb(u64 msr, u64 *value);
void hv_signal_eom_ghcb(void);
@@ -49,6 +49,8 @@ int set_memory_decrypted(unsigned long addr, int numpages);
int set_memory_np_noalias(unsigned long addr, int numpages);
int set_memory_nonglobal(unsigned long addr, int numpages);
int set_memory_global(unsigned long addr, int numpages);
+unsigned long set_memory_decrypted_map(unsigned long addr, unsigned long size);
+int set_memory_encrypted_unmap(unsigned long addr, unsigned long size);
int set_pages_array_uc(struct page **pages, int addrinarray);
int set_pages_array_wc(struct page **pages, int addrinarray);
@@ -2039,6 +2039,34 @@ int set_memory_decrypted(unsigned long addr, int numpages)
}
EXPORT_SYMBOL_GPL(set_memory_decrypted);
+static unsigned long __map_memory(unsigned long addr, unsigned long size)
+{
+ if (hv_is_isolation_supported())
+ return hv_map_memory(addr, size);
+
+ return addr;
+}
+
+static void __unmap_memory(unsigned long addr)
+{
+ if (hv_is_isolation_supported())
+ hv_unmap_memory(addr);
+}
+
+unsigned long set_memory_decrypted_map(unsigned long addr, unsigned long size)
+{
+ if (__set_memory_enc_dec(addr, size / PAGE_SIZE, false))
+ return (unsigned long)NULL;
+
+ return __map_memory(addr, size);
+}
+
+int set_memory_encrypted_unmap(unsigned long addr, unsigned long size)
+{
+ __unmap_memory(addr);
+ return __set_memory_enc_dec(addr, size / PAGE_SIZE, true);
+}
+
int set_pages_uc(struct page *page, int numpages)
{
unsigned long addr = (unsigned long)page_address(page);
@@ -72,6 +72,9 @@ extern enum swiotlb_force swiotlb_force;
* @end: The end address of the swiotlb memory pool. Used to do a quick
* range check to see if the memory was in fact allocated by this
* API.
+ * @vstart: The virtual start address of the swiotlb memory pool. The swiotlb
+ * memory pool may be remapped in the memory encrypted case and store
+ * virtual address for bounce buffer operation.
* @nslabs: The number of IO TLB blocks (in groups of 64) between @start and
* @end. For default swiotlb, this is command line adjustable via
* setup_io_tlb_npages.
@@ -89,6 +92,7 @@ extern enum swiotlb_force swiotlb_force;
struct io_tlb_mem {
phys_addr_t start;
phys_addr_t end;
+ void *vstart;
unsigned long nslabs;
unsigned long used;
unsigned int index;
@@ -194,8 +194,13 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
mem->slots[i].alloc_size = 0;
}
- set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
- memset(vaddr, 0, bytes);
+ mem->vstart = (void *)set_memory_decrypted_map((unsigned long)vaddr, bytes);
+ if (!mem->vstart) {
+ pr_err("Failed to decrypt memory.\n");
+ return;
+ }
+
+ memset(mem->vstart, 0, bytes);
}
int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
@@ -352,7 +357,7 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
phys_addr_t orig_addr = mem->slots[index].orig_addr;
size_t alloc_size = mem->slots[index].alloc_size;
unsigned long pfn = PFN_DOWN(orig_addr);
- unsigned char *vaddr = phys_to_virt(tlb_addr);
+ unsigned char *vaddr = mem->vstart + tlb_addr - mem->start;
if (orig_addr == INVALID_PHYS_ADDR)
return;