@@ -134,6 +134,7 @@ extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned l
extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
extern bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs);
+extern void __weak arch_uprobe_xol_copy(struct arch_uprobe *auprobe, void *vaddr);
#else /* !CONFIG_UPROBES */
struct uprobes_state {
};
@@ -1238,6 +1238,11 @@ static unsigned long xol_take_insn_slot(struct xol_area *area)
return slot_addr;
}
+void __weak arch_uprobe_xol_copy(struct arch_uprobe *auprobe, void *vaddr)
+{
+ memcpy(vaddr, auprobe->insn, MAX_UINSN_BYTES);
+}
+
/*
* xol_get_insn_slot - allocate a slot for xol.
* Returns the allocated slot address or 0.
@@ -1246,6 +1251,7 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
{
struct xol_area *area;
unsigned long xol_vaddr;
+ void *kaddr;
area = get_xol_area();
if (!area)
@@ -1256,7 +1262,9 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
return 0;
/* Initialize the slot */
- copy_to_page(area->page, xol_vaddr, uprobe->arch.insn, MAX_UINSN_BYTES);
+ kaddr = kmap_atomic(area->page);
+ arch_uprobe_xol_copy(&uprobe->arch, kaddr + (xol_vaddr & ~PAGE_MASK));
+ kunmap_atomic(kaddr);
/*
* We probably need flush_icache_user_range() but it needs vma.
* This should work on supported architectures too.