@@ -100,12 +100,12 @@
typedef struct CPUTLBEntryFull {
/*
* @xlat_section contains:
- * - in the lower TARGET_PAGE_BITS, a physical section number
- * - with the lower TARGET_PAGE_BITS masked off, an offset which
- * must be added to the virtual address to obtain:
- * + the ram_addr_t of the target RAM (if the physical section
- * number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM)
- * + the offset within the target MemoryRegion (otherwise)
+ * - For ram, an offset which must be added to the virtual address
+ * to obtain the ram_addr_t of the target RAM
+ * - For other memory regions,
+ * + in the lower TARGET_PAGE_BITS, the physical section number
+ * + with the TARGET_PAGE_BITS masked off, the offset within
+ * the target MemoryRegion
*/
hwaddr xlat_section;
@@ -1193,6 +1193,7 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
write_flags = read_flags;
if (is_ram) {
iotlb = memory_region_get_ram_addr(section->mr) + xlat;
+ assert(!(iotlb & ~TARGET_PAGE_MASK));
/*
* Computing is_clean is expensive; avoid all that unless
* the page is actually writable.
@@ -1255,10 +1256,12 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
/* refill the tlb */
/*
- * At this point iotlb contains a physical section number in the lower
- * TARGET_PAGE_BITS, and either
- * + the ram_addr_t of the page base of the target RAM (RAM)
- * + the offset within section->mr of the page base (I/O, ROMD)
+ * When memory region is ram, iotlb contains a TARGET_PAGE_BITS
+ * aligned ram_addr_t of the page base of the target RAM.
+ * Otherwise, iotlb contains
+ * - a physical section number in the lower TARGET_PAGE_BITS
+ * - the offset within section->mr of the page base (I/O, ROMD) with the
+ * TARGET_PAGE_BITS masked off.
* We subtract addr_page (which is page aligned and thus won't
* disturb the low bits) to give an offset which can be added to the
* (non-page-aligned) vaddr of the eventual memory access to get