@@ -34,6 +34,9 @@
#include "qemu/atomic.h"
#include "qemu/atomic128.h"
#include "translate-all.h"
+#ifdef CONFIG_PLUGIN
+#include "qemu/plugin-memory.h"
+#endif
/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
/* #define DEBUG_TLB */
@@ -1247,6 +1250,45 @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
return (void *)((uintptr_t)addr + entry->addend);
}
+
+#ifdef CONFIG_PLUGIN
+/*
+ * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
+ * This should be a hot path as we will have just looked this path up
+ * in the softmmu lookup code (or helper). We don't handle re-fills or
+ * checking the victim table. This is purely informational.
+ *
+ * This should never fail as the memory access being instrumented
+ * should have just filled the TLB.
+ */
+
+bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
+ bool is_store, struct qemu_plugin_hwaddr *data)
+{
+ CPUArchState *env = cpu->env_ptr;
+ CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
+ uintptr_t index = tlb_index(env, mmu_idx, addr);
+ target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;
+
+ if (likely(tlb_hit(tlb_addr, addr))) {
+ /* We must have an iotlb entry for MMIO */
+ if (tlb_addr & TLB_MMIO) {
+ CPUIOTLBEntry *iotlbentry;
+ iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
+ data->is_io = true;
+ data->v.io.section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
+ data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
+ } else {
+ data->is_io = false;
+ data->v.ram.hostaddr = addr + tlbe->addend;
+ }
+ return true;
+ }
+ return false;
+}
+
+#endif
+
/* Probe for a read-modify-write atomic operation. Do not allow unaligned
* operations, or io operations to proceed. Return the host address. */
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
new file mode 100644
@@ -0,0 +1,40 @@
+/*
+ * Plugin Memory API
+ *
+ * Copyright (c) 2019 Linaro Ltd
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef _PLUGIN_MEMORY_H_
+#define _PLUGIN_MEMORY_H_
+
+struct qemu_plugin_hwaddr {
+ bool is_io;
+ bool is_store;
+ union {
+ struct {
+ MemoryRegionSection *section;
+ hwaddr offset;
+ } io;
+ struct {
+ uint64_t hostaddr;
+ } ram;
+ } v;
+};
+
+/**
+ * tlb_plugin_lookup: query last TLB lookup
+ * @cpu: cpu environment
+ *
+ * This function can be used directly after a memory operation to
+ * query information about the access. It is used by the plugin
+ * infrastructure to expose more information about the address.
+ *
+ * It would only fail if not called from an instrumented memory access
+ * which would be an abuse of the API.
+ */
+bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
+ bool is_store, struct qemu_plugin_hwaddr *data);
+
+#endif /* _PLUGIN_MEMORY_H_ */
@@ -285,6 +285,14 @@ bool qemu_plugin_mem_is_store(qemu_plugin_meminfo_t info);
struct qemu_plugin_hwaddr *qemu_plugin_get_hwaddr(qemu_plugin_meminfo_t info,
uint64_t vaddr);
+/*
+ * The following additional queries can be run on the hwaddr structure
+ * to return information about it. For non-IO accesses the device
+ * offset will be into the appropriate block of RAM.
+ */
+bool qemu_plugin_hwaddr_is_io(struct qemu_plugin_hwaddr *hwaddr);
+uint64_t qemu_plugin_hwaddr_device_offset(const struct qemu_plugin_hwaddr *haddr);
+
typedef void
(*qemu_plugin_vcpu_mem_cb_t)(unsigned int vcpu_index,
qemu_plugin_meminfo_t info, uint64_t vaddr,
@@ -42,6 +42,7 @@
#include "trace/mem-internal.h" /* mem_info macros */
#include "plugin.h"
#ifndef CONFIG_USER_ONLY
+#include "qemu/plugin-memory.h"
#include "hw/boards.h"
#endif
@@ -240,11 +241,59 @@ bool qemu_plugin_mem_is_store(qemu_plugin_meminfo_t info)
* Virtual Memory queries
*/
+#ifdef CONFIG_SOFTMMU
+static __thread struct qemu_plugin_hwaddr hwaddr_info;
+
+struct qemu_plugin_hwaddr *qemu_plugin_get_hwaddr(qemu_plugin_meminfo_t info,
+ uint64_t vaddr)
+{
+ CPUState *cpu = current_cpu;
+ unsigned int mmu_idx = info >> TRACE_MEM_MMU_SHIFT;
+ hwaddr_info.is_store = info & TRACE_MEM_ST;
+
+ if (!tlb_plugin_lookup(cpu, vaddr, mmu_idx,
+ info & TRACE_MEM_ST, &hwaddr_info)) {
+ error_report("invalid use of qemu_plugin_get_hwaddr");
+ return NULL;
+ }
+
+ return &hwaddr_info;
+}
+#else
struct qemu_plugin_hwaddr *qemu_plugin_get_hwaddr(qemu_plugin_meminfo_t info,
uint64_t vaddr)
{
return NULL;
}
+#endif
+
+bool qemu_plugin_hwaddr_is_io(struct qemu_plugin_hwaddr *hwaddr)
+{
+#ifdef CONFIG_SOFTMMU
+ return hwaddr->is_io;
+#else
+ return false;
+#endif
+}
+
+uint64_t qemu_plugin_hwaddr_device_offset(const struct qemu_plugin_hwaddr *haddr)
+{
+#ifdef CONFIG_SOFTMMU
+ if (haddr) {
+ if (!haddr->is_io) {
+ ram_addr_t ram_addr = qemu_ram_addr_from_host((void *) haddr->v.ram.hostaddr);
+ if (ram_addr == RAM_ADDR_INVALID) {
+ error_report("Bad ram pointer %"PRIx64"", haddr->v.ram.hostaddr);
+ abort();
+ }
+ return ram_addr;
+ } else {
+ return haddr->v.io.offset;
+ }
+ }
+#endif
+ return 0;
+}
/*
* Queries to the number and potential maximum number of vCPUs there