Message ID | 20241114160131.48616-53-richard.henderson@linaro.org |
---|---|
State | New |
Headers | show |
Series | accel/tcg: Convert victim tlb to IntervalTree | expand |
On 11/14/24 08:01, Richard Henderson wrote: > The new tlb_fill_align hook returns page data via structure > rather than by function call, so we can make tlb_set_page_full > be local to cputlb.c. There are no users of tlb_set_page > or tlb_set_page_with_attrs, so those can be eliminated. > > Signed-off-by: Richard Henderson <richard.henderson@linaro.org> > --- > include/exec/exec-all.h | 57 ----------------------------------------- > accel/tcg/cputlb.c | 27 ++----------------- > 2 files changed, 2 insertions(+), 82 deletions(-) > > diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h > index 69bdb77584..b65fc547bd 100644 > --- a/include/exec/exec-all.h > +++ b/include/exec/exec-all.h > @@ -184,63 +184,6 @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, > vaddr len, > uint16_t idxmap, > unsigned bits); > - > -/** > - * tlb_set_page_full: > - * @cpu: CPU context > - * @mmu_idx: mmu index of the tlb to modify > - * @addr: virtual address of the entry to add > - * @full: the details of the tlb entry > - * > - * Add an entry to @cpu tlb index @mmu_idx. All of the fields of > - * @full must be filled, except for xlat_section, and constitute > - * the complete description of the translated page. > - * > - * This is generally called by the target tlb_fill function after > - * having performed a successful page table walk to find the physical > - * address and attributes for the translation. > - * > - * At most one entry for a given virtual address is permitted. Only a > - * single TARGET_PAGE_SIZE region is mapped; @full->lg_page_size is only > - * used by tlb_flush_page. > - */ > -void tlb_set_page_full(CPUState *cpu, int mmu_idx, vaddr addr, > - CPUTLBEntryFull *full); > - > -/** > - * tlb_set_page_with_attrs: > - * @cpu: CPU to add this TLB entry for > - * @addr: virtual address of page to add entry for > - * @paddr: physical address of the page > - * @attrs: memory transaction attributes > - * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits) > - * @mmu_idx: MMU index to insert TLB entry for > - * @size: size of the page in bytes > - * > - * Add an entry to this CPU's TLB (a mapping from virtual address > - * @addr to physical address @paddr) with the specified memory > - * transaction attributes. This is generally called by the target CPU > - * specific code after it has been called through the tlb_fill() > - * entry point and performed a successful page table walk to find > - * the physical address and attributes for the virtual address > - * which provoked the TLB miss. > - * > - * At most one entry for a given virtual address is permitted. Only a > - * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only > - * used by tlb_flush_page. > - */ > -void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr, > - hwaddr paddr, MemTxAttrs attrs, > - int prot, int mmu_idx, vaddr size); > -/* tlb_set_page: > - * > - * This function is equivalent to calling tlb_set_page_with_attrs() > - * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided > - * as a convenience for CPUs which don't use memory transaction attributes. > - */ > -void tlb_set_page(CPUState *cpu, vaddr addr, > - hwaddr paddr, int prot, > - int mmu_idx, vaddr size); > #else > static inline void tlb_init(CPUState *cpu) > { > diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c > index ec597ed6f5..3d731b8f3d 100644 > --- a/accel/tcg/cputlb.c > +++ b/accel/tcg/cputlb.c > @@ -1037,8 +1037,8 @@ static inline void tlb_set_compare(CPUTLBEntryFull *full, CPUTLBEntry *ent, > * Called from TCG-generated code, which is under an RCU read-side > * critical section. > */ > -void tlb_set_page_full(CPUState *cpu, int mmu_idx, > - vaddr addr, CPUTLBEntryFull *full) > +static void tlb_set_page_full(CPUState *cpu, int mmu_idx, > + vaddr addr, CPUTLBEntryFull *full) > { > CPUTLB *tlb = &cpu->neg.tlb; > CPUTLBDesc *desc = &tlb->d[mmu_idx]; > @@ -1189,29 +1189,6 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx, > qemu_spin_unlock(&tlb->c.lock); > } > > -void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr, > - hwaddr paddr, MemTxAttrs attrs, int prot, > - int mmu_idx, uint64_t size) > -{ > - CPUTLBEntryFull full = { > - .phys_addr = paddr, > - .attrs = attrs, > - .prot = prot, > - .lg_page_size = ctz64(size) > - }; > - > - assert(is_power_of_2(size)); > - tlb_set_page_full(cpu, mmu_idx, addr, &full); > -} > - > -void tlb_set_page(CPUState *cpu, vaddr addr, > - hwaddr paddr, int prot, > - int mmu_idx, uint64_t size) > -{ > - tlb_set_page_with_attrs(cpu, addr, paddr, MEMTXATTRS_UNSPECIFIED, > - prot, mmu_idx, size); > -} > - > /* > * Note: tlb_fill_align() can trigger a resize of the TLB. > * This means that all of the caller's prior references to the TLB table Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index 69bdb77584..b65fc547bd 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -184,63 +184,6 @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr len, uint16_t idxmap, unsigned bits); - -/** - * tlb_set_page_full: - * @cpu: CPU context - * @mmu_idx: mmu index of the tlb to modify - * @addr: virtual address of the entry to add - * @full: the details of the tlb entry - * - * Add an entry to @cpu tlb index @mmu_idx. All of the fields of - * @full must be filled, except for xlat_section, and constitute - * the complete description of the translated page. - * - * This is generally called by the target tlb_fill function after - * having performed a successful page table walk to find the physical - * address and attributes for the translation. - * - * At most one entry for a given virtual address is permitted. Only a - * single TARGET_PAGE_SIZE region is mapped; @full->lg_page_size is only - * used by tlb_flush_page. - */ -void tlb_set_page_full(CPUState *cpu, int mmu_idx, vaddr addr, - CPUTLBEntryFull *full); - -/** - * tlb_set_page_with_attrs: - * @cpu: CPU to add this TLB entry for - * @addr: virtual address of page to add entry for - * @paddr: physical address of the page - * @attrs: memory transaction attributes - * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits) - * @mmu_idx: MMU index to insert TLB entry for - * @size: size of the page in bytes - * - * Add an entry to this CPU's TLB (a mapping from virtual address - * @addr to physical address @paddr) with the specified memory - * transaction attributes. This is generally called by the target CPU - * specific code after it has been called through the tlb_fill() - * entry point and performed a successful page table walk to find - * the physical address and attributes for the virtual address - * which provoked the TLB miss. - * - * At most one entry for a given virtual address is permitted. Only a - * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only - * used by tlb_flush_page. - */ -void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr, - hwaddr paddr, MemTxAttrs attrs, - int prot, int mmu_idx, vaddr size); -/* tlb_set_page: - * - * This function is equivalent to calling tlb_set_page_with_attrs() - * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided - * as a convenience for CPUs which don't use memory transaction attributes. - */ -void tlb_set_page(CPUState *cpu, vaddr addr, - hwaddr paddr, int prot, - int mmu_idx, vaddr size); #else static inline void tlb_init(CPUState *cpu) { diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index ec597ed6f5..3d731b8f3d 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1037,8 +1037,8 @@ static inline void tlb_set_compare(CPUTLBEntryFull *full, CPUTLBEntry *ent, * Called from TCG-generated code, which is under an RCU read-side * critical section. */ -void tlb_set_page_full(CPUState *cpu, int mmu_idx, - vaddr addr, CPUTLBEntryFull *full) +static void tlb_set_page_full(CPUState *cpu, int mmu_idx, + vaddr addr, CPUTLBEntryFull *full) { CPUTLB *tlb = &cpu->neg.tlb; CPUTLBDesc *desc = &tlb->d[mmu_idx]; @@ -1189,29 +1189,6 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx, qemu_spin_unlock(&tlb->c.lock); } -void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr, - hwaddr paddr, MemTxAttrs attrs, int prot, - int mmu_idx, uint64_t size) -{ - CPUTLBEntryFull full = { - .phys_addr = paddr, - .attrs = attrs, - .prot = prot, - .lg_page_size = ctz64(size) - }; - - assert(is_power_of_2(size)); - tlb_set_page_full(cpu, mmu_idx, addr, &full); -} - -void tlb_set_page(CPUState *cpu, vaddr addr, - hwaddr paddr, int prot, - int mmu_idx, uint64_t size) -{ - tlb_set_page_with_attrs(cpu, addr, paddr, MEMTXATTRS_UNSPECIFIED, - prot, mmu_idx, size); -} - /* * Note: tlb_fill_align() can trigger a resize of the TLB. * This means that all of the caller's prior references to the TLB table
The new tlb_fill_align hook returns page data via structure rather than by function call, so we can make tlb_set_page_full be local to cputlb.c. There are no users of tlb_set_page or tlb_set_page_with_attrs, so those can be eliminated. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- include/exec/exec-all.h | 57 ----------------------------------------- accel/tcg/cputlb.c | 27 ++----------------- 2 files changed, 2 insertions(+), 82 deletions(-)