diff mbox

[Xen-devel,RFC,10/16] xen/arm: Introduce alternative runtime patching

Message ID 1462466065-30212-11-git-send-email-julien.grall@arm.com
State Superseded
Headers show

Commit Message

Julien Grall May 5, 2016, 4:34 p.m. UTC
Some of the processor erratum will require to modify code sequence.
As those modifications may impact the performance, they should only
be enabled on affected cores. Furthermore, Xen may also want to take
advantage of new hardware features coming up with v8.1 and v8.2.

This patch adds an infrastructure to patch Xen during boot time
depending on the "features" available on the platform.

This code is based on the file arch/arm64/kernel/alternative.c in
Linux 4.6-rc3. Any references to arm64 have been dropped to make the
code as generic as possible.

Furthermore, in Xen the executable sections (.text and .init.text)
are always mapped read-only and enforced by SCTLR.WNX. So it is not
possible to directly patch Xen.

To by-pass this restriction, a temporary writeable mapping is made with
vmap. This mapping will be used to write the new instructions.

Lastly, runtime patching is currently not necessary for ARM32. So the
code is only enabled for ARM64.

Signed-off-by: Julien Grall <julien.grall@arm.com>
---
 xen/arch/arm/Kconfig              |   5 +
 xen/arch/arm/Makefile             |   1 +
 xen/arch/arm/alternative.c        | 217 ++++++++++++++++++++++++++++++++++++++
 xen/arch/arm/setup.c              |   8 ++
 xen/arch/arm/xen.lds.S            |   7 ++
 xen/include/asm-arm/alternative.h | 165 +++++++++++++++++++++++++++++
 xen/include/asm-arm/arm64/insn.h  |  16 +++
 7 files changed, 419 insertions(+)
 create mode 100644 xen/arch/arm/alternative.c
 create mode 100644 xen/include/asm-arm/alternative.h

Comments

Julien Grall May 14, 2016, 6:02 p.m. UTC | #1
Hi Konrad,

On 13/05/2016 21:26, Konrad Rzeszutek Wilk wrote:
> On Thu, May 05, 2016 at 05:34:19PM +0100, Julien Grall wrote:
>> Some of the processor erratum will require to modify code sequence.
>> As those modifications may impact the performance, they should only
>> be enabled on affected cores. Furthermore, Xen may also want to take
>> advantage of new hardware features coming up with v8.1 and v8.2.
>>
>> This patch adds an infrastructure to patch Xen during boot time
>> depending on the "features" available on the platform.
>>
>> This code is based on the file arch/arm64/kernel/alternative.c in
>> Linux 4.6-rc3. Any references to arm64 have been dropped to make the
>> code as generic as possible.
>>
>> Furthermore, in Xen the executable sections (.text and .init.text)
>> are always mapped read-only and enforced by SCTLR.WNX. So it is not
>> possible to directly patch Xen.
>
> Is it not possible to temporarily 'unenforce' the SCTLR.WNX?

The problem is not because of SCTLR.WNX but because the entries are 
change from read-write to read-only afterwards.

The ARM ARM (see D4-1732 in ARM DDI 0487A.i) recommends the use of 
break-before-make if either the old or new entry is writable when the 
page table is shared between multiples processor. This is to avoid 
possible TLB conflicts on platform have aggressive prefetcher.

The break-before-make approach requires the following steps:
    - Replacing the old entry with an invalid entry
    - Invalidate the entry by flush the TLBs
    - Write the new entry

However, with the current approach, secondary CPUs runs at the same, so 
even with the break-before-make approach it might be possible to hit a 
data abort if the CPU execute code on an entry that is currently been 
changed.

It would be possible to make the CPUs spinning outside of the 
executable, but I think this make the code more complicate. Note that 
you would have the same issue with XSplice.

You may wonder why Xen is patched when all the CPUs are online. This is 
because in big.LITTLE environment, big and little cores will have 
different errata. The only way to know the list of errata is to probe 
each CPU, and therefore bring them up.

I hope this gives you more insights on why I choose this approach. I can 
update the commit message with it.

>
>>
>> To by-pass this restriction, a temporary writeable mapping is made with
>> vmap. This mapping will be used to write the new instructions.
>>
>> Lastly, runtime patching is currently not necessary for ARM32. So the
>> code is only enabled for ARM64.
>>
>> Signed-off-by: Julien Grall <julien.grall@arm.com>
>> ---
>>   xen/arch/arm/Kconfig              |   5 +
>>   xen/arch/arm/Makefile             |   1 +
>>   xen/arch/arm/alternative.c        | 217 ++++++++++++++++++++++++++++++++++++++
>>   xen/arch/arm/setup.c              |   8 ++
>>   xen/arch/arm/xen.lds.S            |   7 ++
>>   xen/include/asm-arm/alternative.h | 165 +++++++++++++++++++++++++++++
>>   xen/include/asm-arm/arm64/insn.h  |  16 +++
>>   7 files changed, 419 insertions(+)
>>   create mode 100644 xen/arch/arm/alternative.c
>>   create mode 100644 xen/include/asm-arm/alternative.h
>>
>> diff --git a/xen/arch/arm/Kconfig b/xen/arch/arm/Kconfig
>> index 6231cd5..9b3e66b 100644
>> --- a/xen/arch/arm/Kconfig
>> +++ b/xen/arch/arm/Kconfig
>> @@ -14,6 +14,7 @@ config ARM_64
>>   	def_bool y
>>   	depends on 64BIT
>>   	select HAS_GICV3
>> +    select ALTERNATIVE
>
> Hard tabs, not spaces.

Right, I will change in the next version.

>>
>>   config ARM
>>   	def_bool y
>> @@ -46,6 +47,10 @@ config ACPI
>>   config HAS_GICV3
>>   	bool
>>
>> +# Select ALTERNATIVE if the architecture supports runtime patching
>> +config ALTERNATIVE
>> +    bool
>
> Ditto.

Ok.

>> +
>>   endmenu
>>
>>   source "common/Kconfig"
>> diff --git a/xen/arch/arm/Makefile b/xen/arch/arm/Makefile
>> index 9122f78..2d330aa 100644
>> --- a/xen/arch/arm/Makefile
>> +++ b/xen/arch/arm/Makefile
>> @@ -4,6 +4,7 @@ subdir-y += platforms
>>   subdir-$(CONFIG_ARM_64) += efi
>>   subdir-$(CONFIG_ACPI) += acpi
>>
>> +obj-$(CONFIG_ALTERNATIVE) += alternative.o
>
> This probably needs to be alternative.init.o ?
>>   obj-y += bootfdt.o
>>   obj-y += cpu.o
>>   obj-y += cpufeature.o
>> diff --git a/xen/arch/arm/alternative.c b/xen/arch/arm/alternative.c
>> new file mode 100644
>> index 0000000..0a5d1c8
>> --- /dev/null
>> +++ b/xen/arch/arm/alternative.c
>> @@ -0,0 +1,217 @@
>> +/*
>> + * alternative runtime patching
>> + * inspired by the x86 version
>> + *
>> + * Copyright (C) 2014 ARM Ltd.
>
> Not 2016?

The code has been taken from Linux. I was not sure what to do here. I 
will update with 2014-2016.

>
>> + *
>> + * This program is free software; you can redistribute it and/or modify
>> + * it under the terms of the GNU General Public License version 2 as
>> + * published by the Free Software Foundation.
>> + *
>> + * This program is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
>> + * GNU General Public License for more details.
>> + *
>> + * You should have received a copy of the GNU General Public License
>> + * along with this program.  If not, see <http://www.gnu.org/licenses/>.
>> + */
>> +
>> +#include <xen/config.h>
>> +#include <xen/init.h>
>> +#include <xen/types.h>
>> +#include <xen/kernel.h>
>> +#include <xen/mm.h>
>> +#include <xen/vmap.h>
>> +#include <xen/smp.h>
>> +#include <xen/stop_machine.h>
>> +#include <asm/alternative.h>
>> +#include <asm/atomic.h>
>> +#include <asm/byteorder.h>
>> +#include <asm/cpufeature.h>
>> +#include <asm/insn.h>
>> +#include <asm/page.h>
>> +
>> +#define __ALT_PTR(a,f)      (u32 *)((void *)&(a)->f + (a)->f)
>> +#define ALT_ORIG_PTR(a)     __ALT_PTR(a, orig_offset)
>> +#define ALT_REPL_PTR(a)     __ALT_PTR(a, alt_offset)
>> +
>> +extern const struct alt_instr __alt_instructions[], __alt_instructions_end[];
>> +
>> +struct alt_region {
>> +    const struct alt_instr *begin;
>> +    const struct alt_instr *end;
>> +};
>> +
>> +/*
>> + * Check if the target PC is within an alternative block.
>> + */
>> +static bool_t branch_insn_requires_update(const struct alt_instr *alt,
>
> __init?

No, xSplice will likely contain some alternatives, which Xen will need 
to apply.

I have in mind invasive errata such as the one implemented in patch #13 
and #14. Furthermore, new ARMv8.1 features will requires some to add 
alternatives in spinlock and atomic helpers.

>> +{
>> +    static int patched = 0;
>> +    struct alt_region region = {
>
> const ?

sure.

>> +        .begin = __alt_instructions,
>> +        .end = __alt_instructions_end,
>> +    };
>> +
>> +    /* We always have a CPU 0 at this point (__init) */
>> +    if ( smp_processor_id() )
>> +    {
>> +        while ( !read_atomic(&patched) )
>> +            cpu_relax();
>> +        isb();
>> +    }
>> +    else
>> +    {
>> +        int ret;
>> +
>> +        BUG_ON(patched);
>> +        ret = __apply_alternatives(&region);
>> +        /* The patching is not expected to fail during boot. */
>> +        BUG_ON(ret != 0);
>> +
>> +        /* Barriers provided by the cache flushing */
>> +        write_atomic(&patched, 1);
>> +    }
>> +
>> +    return 0;
>> +}
>> +
>> +void __init apply_alternatives_all(void)
>> +{
>> +    int ret;
>> +
>> +	/* better not try code patching on a live SMP system */
>> +    ret = stop_machine_run(__apply_alternatives_multi_stop, NULL, NR_CPUS);
>
> NR_CPUS? Not 'num_online_cpus' ?

The 3rd argument of stop_machine_run is the CPU where the callback will 
be executed. If NR_CPUS is passed, the callback will be executed on all 
the CPUs.

>
> And I am bit confused. The comment says that 'stop_machine' state machine
> may be patched, but here you use the stop_machine_run which is part of
> stop_machine?!

Yes, hence the callback __apply_alternatives_multi_stop is executed on 
every online CPUs. CPU0 will patch the code whilst the other will spin 
until the code has been patched.

>
>> +
>> +    /* stop_machine_run should never fail at this stage of the boot */
>> +    BUG_ON(ret);
>> +}
>> +
>> +int apply_alternatives(void *start, size_t length)
>
> const void *start?

Could be.

>> +{
>
> const ?

Yes.

>> +    struct alt_region region = {
>> +        .begin = start,
>> +        .end = start + length,
>> +    };
>> +
>> +    return __apply_alternatives(&region);
>> +}
>> +
>> +/*
>> + * Local variables:
>> + * mode: C
>> + * c-file-style: "BSD"
>> + * c-basic-offset: 4
>> + * indent-tabs-mode: nil
>> + * End:
>> + */
>> diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
>> index 09ff1ea..c4389ef 100644
>> --- a/xen/arch/arm/setup.c
>> +++ b/xen/arch/arm/setup.c
>> @@ -38,6 +38,7 @@
>>   #include <xen/vmap.h>
>>   #include <xen/libfdt/libfdt.h>
>>   #include <xen/acpi.h>
>> +#include <asm/alternative.h>
>>   #include <asm/page.h>
>>   #include <asm/current.h>
>>   #include <asm/setup.h>
>> @@ -834,6 +835,13 @@ void __init start_xen(unsigned long boot_phys_offset,
>>
>>       do_initcalls();
>>
>> +    /*
>> +     * It needs to be called after do_initcalls to be able to use
>> +     * stop_machine (tasklets initialized via an initcall).
>> +     * XXX: Is it too late?
>
> What else is going to run in initcalls? Why can't this patching be done
> when you only have one CPU?

See my answer above. Xen needs to know the list of errata that affect 
each CPU before patching.

Regards,
Julien Grall May 23, 2016, 11:11 a.m. UTC | #2
Hi Stefano,

On 21/05/16 16:09, Stefano Stabellini wrote:
> On Thu, 5 May 2016, Julien Grall wrote:
>> +void __init apply_alternatives_all(void)
>> +{
>> +    int ret;
>> +
>> +	/* better not try code patching on a live SMP system */
>> +    ret = stop_machine_run(__apply_alternatives_multi_stop, NULL, NR_CPUS);
>
> Why not just call stop_machine_run, passing 0 as the cpu to run
> __apply_alternatives_multi_stop on? Given that you already have
> secondary cpus spinning in __apply_alternatives_multi_stop.  What am I
> missing?

Someone as to call __apply_alternatives_multi_stop on secondary CPUs. 
apply_alternatives is called by the boot CPU, the rest are spinning 
within the idle_loop.

The function stop_machine_run will stop all the CPUs, and call 
__apply_alternatives_multi_stop on each of them.

Regards,
Julien Grall May 30, 2016, 4:42 p.m. UTC | #3
Hi Stefano,

On 30/05/2016 15:45, Stefano Stabellini wrote:
> On Mon, 23 May 2016, Julien Grall wrote:
>> Hi Stefano,
>>
>> On 21/05/16 16:09, Stefano Stabellini wrote:
>>> On Thu, 5 May 2016, Julien Grall wrote:
>>>> +void __init apply_alternatives_all(void)
>>>> +{
>>>> +    int ret;
>>>> +
>>>> +	/* better not try code patching on a live SMP system */
>>>> +    ret = stop_machine_run(__apply_alternatives_multi_stop, NULL,
>>>> NR_CPUS);
>>>
>>> Why not just call stop_machine_run, passing 0 as the cpu to run
>>> __apply_alternatives_multi_stop on? Given that you already have
>>> secondary cpus spinning in __apply_alternatives_multi_stop.  What am I
>>> missing?
>>
>> Someone as to call __apply_alternatives_multi_stop on secondary CPUs.
>
> Why? Secondary cpus would be just left spinning at the beginning of the
> function. You might as well leave them spinning in
> stopmachine_wait_state.
>

Because, we may need to patch the stop_machine state machine 
(spinlock,...). So the safest place whilst the code is patched is 
__apply_alternatives_multi_stop.

Note that there is a comment on top of __apply_alternatives_multi_stop 
to explain the reason.

Regards,
Julien Grall May 31, 2016, 10:24 a.m. UTC | #4
Hi Stefano,

On 31/05/16 10:21, Stefano Stabellini wrote:
> On Mon, 30 May 2016, Julien Grall wrote:
>> Hi Stefano,
>>
>> On 30/05/2016 15:45, Stefano Stabellini wrote:
>>> On Mon, 23 May 2016, Julien Grall wrote:
>>>> Hi Stefano,
>>>>
>>>> On 21/05/16 16:09, Stefano Stabellini wrote:
>>>>> On Thu, 5 May 2016, Julien Grall wrote:
>>>>>> +void __init apply_alternatives_all(void)
>>>>>> +{
>>>>>> +    int ret;
>>>>>> +
>>>>>> +	/* better not try code patching on a live SMP system */
>>>>>> +    ret = stop_machine_run(__apply_alternatives_multi_stop, NULL,
>>>>>> NR_CPUS);
>>>>>
>>>>> Why not just call stop_machine_run, passing 0 as the cpu to run
>>>>> __apply_alternatives_multi_stop on? Given that you already have
>>>>> secondary cpus spinning in __apply_alternatives_multi_stop.  What am I
>>>>> missing?
>>>>
>>>> Someone as to call __apply_alternatives_multi_stop on secondary CPUs.
>>>
>>> Why? Secondary cpus would be just left spinning at the beginning of the
>>> function. You might as well leave them spinning in
>>> stopmachine_wait_state.
>>>
>>
>> Because, we may need to patch the stop_machine state machine (spinlock,...).
>> So the safest place whilst the code is patched is
>> __apply_alternatives_multi_stop.
>>
>> Note that there is a comment on top of __apply_alternatives_multi_stop to
>> explain the reason.
>
> Have you tried patching stop_machine? What if you need to patch
> __apply_alternatives_multi_stop? :-)

We have to define a safe place where the CPUs could spin. I.e the 
instructions will not be patched.
Whilst stop_machine may not be patched today, it is common code and we 
don't know how this will evolve in the future.

By spinning in __apply_alternatives_multi_stop we protect us against 
modification in the common code and tricky bug (yes it might be 
difficult to hit and debug).

Regards,
Julien Grall June 2, 2016, 3:04 p.m. UTC | #5
Hi Konrad,

On 02/06/16 15:46, Konrad Rzeszutek Wilk wrote:
> On Tue, May 31, 2016 at 11:24:10AM +0100, Julien Grall wrote:
>> On 31/05/16 10:21, Stefano Stabellini wrote:
>>> On Mon, 30 May 2016, Julien Grall wrote:
>> By spinning in __apply_alternatives_multi_stop we protect us against
>> modification in the common code and tricky bug (yes it might be difficult to
>> hit and debug).
>
> I feel that you are moving down the stack to try to make the
> impact of doing modifications have no impact (or as low as possible).
>
> And it would work now, but I am concerned that in the future it may be
> not soo future proof.

Can you detail here?

> Would it perhaps make sense to make some of the livepatching mechanism
> be exposed as general code? And use it for alternative asm as well?

The code to sync the CPU looks very similar to stop_machine, so why 
would we want to get yet another mechanism to sync the CPUs and execute 
a specific function?

Regards,
Julien Grall June 2, 2016, 3:14 p.m. UTC | #6
On 02/06/16 16:04, Julien Grall wrote:
> Hi Konrad,
>
> On 02/06/16 15:46, Konrad Rzeszutek Wilk wrote:
>> On Tue, May 31, 2016 at 11:24:10AM +0100, Julien Grall wrote:
>>> On 31/05/16 10:21, Stefano Stabellini wrote:
>>>> On Mon, 30 May 2016, Julien Grall wrote:
>>> By spinning in __apply_alternatives_multi_stop we protect us against
>>> modification in the common code and tricky bug (yes it might be
>>> difficult to
>>> hit and debug).
>>
>> I feel that you are moving down the stack to try to make the
>> impact of doing modifications have no impact (or as low as possible).
>>
>> And it would work now, but I am concerned that in the future it may be
>> not soo future proof.
>
> Can you detail here?
>
>> Would it perhaps make sense to make some of the livepatching mechanism
>> be exposed as general code? And use it for alternative asm as well?
>
> The code to sync the CPU looks very similar to stop_machine, so why
> would we want to get yet another mechanism to sync the CPUs and execute
> a specific function?

I forgot to mention that apply_alternatives_all is only called one time 
during the boot and before CPU0 goes in idle_loop. If we have to patch 
afterward, we would use apply_alternatives which consider that all the 
CPUs have been parked somewhere else.

Regards,
Julien Grall June 6, 2016, 2:18 p.m. UTC | #7
Hi Konrad,

On 06/06/16 15:17, Konrad Rzeszutek Wilk wrote:
> On Thu, Jun 02, 2016 at 04:14:04PM +0100, Julien Grall wrote:
>>
>>
>> On 02/06/16 16:04, Julien Grall wrote:
>>> Hi Konrad,
>>>
>>> On 02/06/16 15:46, Konrad Rzeszutek Wilk wrote:
>>>> On Tue, May 31, 2016 at 11:24:10AM +0100, Julien Grall wrote:
>>>>> On 31/05/16 10:21, Stefano Stabellini wrote:
>>>>>> On Mon, 30 May 2016, Julien Grall wrote:
>>>>> By spinning in __apply_alternatives_multi_stop we protect us against
>>>>> modification in the common code and tricky bug (yes it might be
>>>>> difficult to
>>>>> hit and debug).
>>>>
>>>> I feel that you are moving down the stack to try to make the
>>>> impact of doing modifications have no impact (or as low as possible).
>>>>
>>>> And it would work now, but I am concerned that in the future it may be
>>>> not soo future proof.
>>>
>>> Can you detail here?
>>>
>>>> Would it perhaps make sense to make some of the livepatching mechanism
>>>> be exposed as general code? And use it for alternative asm as well?
>>>
>>> The code to sync the CPU looks very similar to stop_machine, so why
>>> would we want to get yet another mechanism to sync the CPUs and execute
>>> a specific function?
>>
>> I forgot to mention that apply_alternatives_all is only called one time
>> during the boot and before CPU0 goes in idle_loop. If we have to patch
>> afterward, we would use apply_alternatives which consider that all the CPUs
>> have been parked somewhere else.
>
> Oh, in that case please just mention that in the commit description.

Will do in the next version.

Cheers,
diff mbox

Patch

diff --git a/xen/arch/arm/Kconfig b/xen/arch/arm/Kconfig
index 6231cd5..9b3e66b 100644
--- a/xen/arch/arm/Kconfig
+++ b/xen/arch/arm/Kconfig
@@ -14,6 +14,7 @@  config ARM_64
 	def_bool y
 	depends on 64BIT
 	select HAS_GICV3
+    select ALTERNATIVE
 
 config ARM
 	def_bool y
@@ -46,6 +47,10 @@  config ACPI
 config HAS_GICV3
 	bool
 
+# Select ALTERNATIVE if the architecture supports runtime patching
+config ALTERNATIVE
+    bool
+
 endmenu
 
 source "common/Kconfig"
diff --git a/xen/arch/arm/Makefile b/xen/arch/arm/Makefile
index 9122f78..2d330aa 100644
--- a/xen/arch/arm/Makefile
+++ b/xen/arch/arm/Makefile
@@ -4,6 +4,7 @@  subdir-y += platforms
 subdir-$(CONFIG_ARM_64) += efi
 subdir-$(CONFIG_ACPI) += acpi
 
+obj-$(CONFIG_ALTERNATIVE) += alternative.o
 obj-y += bootfdt.o
 obj-y += cpu.o
 obj-y += cpufeature.o
diff --git a/xen/arch/arm/alternative.c b/xen/arch/arm/alternative.c
new file mode 100644
index 0000000..0a5d1c8
--- /dev/null
+++ b/xen/arch/arm/alternative.c
@@ -0,0 +1,217 @@ 
+/*
+ * alternative runtime patching
+ * inspired by the x86 version
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <xen/config.h>
+#include <xen/init.h>
+#include <xen/types.h>
+#include <xen/kernel.h>
+#include <xen/mm.h>
+#include <xen/vmap.h>
+#include <xen/smp.h>
+#include <xen/stop_machine.h>
+#include <asm/alternative.h>
+#include <asm/atomic.h>
+#include <asm/byteorder.h>
+#include <asm/cpufeature.h>
+#include <asm/insn.h>
+#include <asm/page.h>
+
+#define __ALT_PTR(a,f)      (u32 *)((void *)&(a)->f + (a)->f)
+#define ALT_ORIG_PTR(a)     __ALT_PTR(a, orig_offset)
+#define ALT_REPL_PTR(a)     __ALT_PTR(a, alt_offset)
+
+extern const struct alt_instr __alt_instructions[], __alt_instructions_end[];
+
+struct alt_region {
+    const struct alt_instr *begin;
+    const struct alt_instr *end;
+};
+
+/*
+ * Check if the target PC is within an alternative block.
+ */
+static bool_t branch_insn_requires_update(const struct alt_instr *alt,
+                                          unsigned long pc)
+{
+    unsigned long replptr;
+
+    if ( is_active_kernel_text(pc) )
+        return 1;
+
+    replptr = (unsigned long)ALT_REPL_PTR(alt);
+    if ( pc >= replptr && pc <= (replptr + alt->alt_len) )
+        return 0;
+
+    /*
+     * Branching into *another* alternate sequence is doomed, and
+     * we're not even trying to fix it up.
+     */
+    BUG();
+}
+
+static u32 get_alt_insn(const struct alt_instr *alt,
+                        const u32 *insnptr, const u32 *altinsnptr)
+{
+    u32 insn;
+
+    insn = le32_to_cpu(*altinsnptr);
+
+    if ( insn_is_branch_imm(insn) )
+    {
+        s32 offset = insn_get_branch_offset(insn);
+        unsigned long target;
+
+        target = (unsigned long)altinsnptr + offset;
+
+        /*
+         * If we're branching inside the alternate sequence,
+         * do not rewrite the instruction, as it is already
+         * correct. Otherwise, generate the new instruction.
+         */
+        if ( branch_insn_requires_update(alt, target) )
+        {
+            offset = target - (unsigned long)insnptr;
+            insn = insn_set_branch_offset(insn, offset);
+        }
+    }
+
+    return insn;
+}
+
+static int __apply_alternatives(const struct alt_region *region)
+{
+    const struct alt_instr *alt;
+    const u32 *origptr, *replptr;
+    u32 *writeptr, *writemap;
+    mfn_t text_mfn = _mfn(virt_to_mfn(_stext));
+    unsigned int text_order = get_order_from_bytes(_end - _start);
+
+    printk("Patching kernel code\n");
+
+    /*
+     * The text section is read-only. So re-map Xen to be able to patch
+     * the code.
+     */
+    writemap = __vmap(&text_mfn, 1 << text_order, 1, 1, PAGE_HYPERVISOR,
+                      VMAP_DEFAULT);
+    if ( !writemap )
+    {
+        printk("alternatives: Unable to map the text section\n");
+        return -ENOMEM;
+    }
+
+    for ( alt = region->begin; alt < region->end; alt++ )
+    {
+        u32 insn;
+        int i, nr_inst;
+
+        if ( !cpus_have_cap(alt->cpufeature) )
+            continue;
+
+        BUG_ON(alt->alt_len != alt->orig_len);
+
+        origptr = ALT_ORIG_PTR(alt);
+        writeptr = origptr - (u32 *)_start + writemap;
+        replptr = ALT_REPL_PTR(alt);
+
+        nr_inst = alt->alt_len / sizeof(insn);
+
+        for ( i = 0; i < nr_inst; i++ )
+        {
+            insn = get_alt_insn(alt, origptr + i, replptr + i);
+            *(writeptr + i) = cpu_to_le32(insn);
+        }
+
+        /* Ensure the new instructions reached the memory and nuke */
+        clean_and_invalidate_dcache_va_range(writeptr,
+                                             (sizeof (*writeptr) * nr_inst));
+    }
+
+    /* Nuke the instruction cache */
+    invalidate_icache();
+
+    vunmap(writemap);
+
+    return 0;
+}
+
+/*
+ * We might be patching the stop_machine state machine, so implement a
+ * really simple polling protocol here.
+ */
+static int __apply_alternatives_multi_stop(void *unused)
+{
+    static int patched = 0;
+    struct alt_region region = {
+        .begin = __alt_instructions,
+        .end = __alt_instructions_end,
+    };
+
+    /* We always have a CPU 0 at this point (__init) */
+    if ( smp_processor_id() )
+    {
+        while ( !read_atomic(&patched) )
+            cpu_relax();
+        isb();
+    }
+    else
+    {
+        int ret;
+
+        BUG_ON(patched);
+        ret = __apply_alternatives(&region);
+        /* The patching is not expected to fail during boot. */
+        BUG_ON(ret != 0);
+
+        /* Barriers provided by the cache flushing */
+        write_atomic(&patched, 1);
+    }
+
+    return 0;
+}
+
+void __init apply_alternatives_all(void)
+{
+    int ret;
+
+	/* better not try code patching on a live SMP system */
+    ret = stop_machine_run(__apply_alternatives_multi_stop, NULL, NR_CPUS);
+
+    /* stop_machine_run should never fail at this stage of the boot */
+    BUG_ON(ret);
+}
+
+int apply_alternatives(void *start, size_t length)
+{
+    struct alt_region region = {
+        .begin = start,
+        .end = start + length,
+    };
+
+    return __apply_alternatives(&region);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
index 09ff1ea..c4389ef 100644
--- a/xen/arch/arm/setup.c
+++ b/xen/arch/arm/setup.c
@@ -38,6 +38,7 @@ 
 #include <xen/vmap.h>
 #include <xen/libfdt/libfdt.h>
 #include <xen/acpi.h>
+#include <asm/alternative.h>
 #include <asm/page.h>
 #include <asm/current.h>
 #include <asm/setup.h>
@@ -834,6 +835,13 @@  void __init start_xen(unsigned long boot_phys_offset,
 
     do_initcalls();
 
+    /*
+     * It needs to be called after do_initcalls to be able to use
+     * stop_machine (tasklets initialized via an initcall).
+     * XXX: Is it too late?
+     */
+    apply_alternatives_all();
+
     /* Create initial domain 0. */
     /* The vGIC for DOM0 is exactly emulating the hardware GIC */
     config.gic_version = XEN_DOMCTL_CONFIG_GIC_NATIVE;
diff --git a/xen/arch/arm/xen.lds.S b/xen/arch/arm/xen.lds.S
index 1f010bd..80d9703 100644
--- a/xen/arch/arm/xen.lds.S
+++ b/xen/arch/arm/xen.lds.S
@@ -167,6 +167,13 @@  SECTIONS
        *(.xsm_initcall.init)
        __xsm_initcall_end = .;
   } :text
+#ifdef CONFIG_ALTERNATIVE
+  .init.alternatives : {
+      __alt_instructions = .;
+      *(.altinstructions)
+      __alt_instructions_end = .;
+  }
+#endif
   __init_end_efi = .;
   . = ALIGN(STACK_SIZE);
   __init_end = .;
diff --git a/xen/include/asm-arm/alternative.h b/xen/include/asm-arm/alternative.h
new file mode 100644
index 0000000..7e3a610
--- /dev/null
+++ b/xen/include/asm-arm/alternative.h
@@ -0,0 +1,165 @@ 
+#ifndef __ASM_ALTERNATIVE_H
+#define __ASM_ALTERNATIVE_H
+
+#include <asm/cpufeature.h>
+#include <xen/config.h>
+#include <xen/kconfig.h>
+
+#ifdef CONFIG_ALTERNATIVE
+
+#ifndef __ASSEMBLY__
+
+#include <xen/init.h>
+#include <xen/types.h>
+#include <xen/stringify.h>
+
+struct alt_instr {
+	s32 orig_offset;	/* offset to original instruction */
+	s32 alt_offset;		/* offset to replacement instruction */
+	u16 cpufeature;		/* cpufeature bit set for replacement */
+	u8  orig_len;		/* size of original instruction(s) */
+	u8  alt_len;		/* size of new instruction(s), <= orig_len */
+};
+
+void __init apply_alternatives_all(void);
+int apply_alternatives(void *start, size_t length);
+
+#define ALTINSTR_ENTRY(feature)						      \
+	" .word 661b - .\n"				/* label           */ \
+	" .word 663f - .\n"				/* new instruction */ \
+	" .hword " __stringify(feature) "\n"		/* feature bit     */ \
+	" .byte 662b-661b\n"				/* source len      */ \
+	" .byte 664f-663f\n"				/* replacement len */
+
+/*
+ * alternative assembly primitive:
+ *
+ * If any of these .org directive fail, it means that insn1 and insn2
+ * don't have the same length. This used to be written as
+ *
+ * .if ((664b-663b) != (662b-661b))
+ * 	.error "Alternatives instruction length mismatch"
+ * .endif
+ *
+ * but most assemblers die if insn1 or insn2 have a .inst. This should
+ * be fixed in a binutils release posterior to 2.25.51.0.2 (anything
+ * containing commit 4e4d08cf7399b606 or c1baaddf8861).
+ */
+#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled)	\
+	".if "__stringify(cfg_enabled)" == 1\n"				\
+	"661:\n\t"							\
+	oldinstr "\n"							\
+	"662:\n"							\
+	".pushsection .altinstructions,\"a\"\n"				\
+	ALTINSTR_ENTRY(feature)						\
+	".popsection\n"							\
+	".pushsection .altinstr_replacement, \"a\"\n"			\
+	"663:\n\t"							\
+	newinstr "\n"							\
+	"664:\n\t"							\
+	".popsection\n\t"						\
+	".org	. - (664b-663b) + (662b-661b)\n\t"			\
+	".org	. - (662b-661b) + (664b-663b)\n"			\
+	".endif\n"
+
+#define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...)	\
+	__ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg))
+
+#else
+
+#include <asm/asm_defns.h>
+
+.macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len
+	.word \orig_offset - .
+	.word \alt_offset - .
+	.hword \feature
+	.byte \orig_len
+	.byte \alt_len
+.endm
+
+.macro alternative_insn insn1, insn2, cap, enable = 1
+	.if \enable
+661:	\insn1
+662:	.pushsection .altinstructions, "a"
+	altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f
+	.popsection
+	.pushsection .altinstr_replacement, "ax"
+663:	\insn2
+664:	.popsection
+	.org	. - (664b-663b) + (662b-661b)
+	.org	. - (662b-661b) + (664b-663b)
+	.endif
+.endm
+
+/*
+ * Begin an alternative code sequence.
+ *
+ * The code that follows this macro will be assembled and linked as
+ * normal. There are no restrictions on this code.
+ */
+.macro alternative_if_not cap, enable = 1
+	.if \enable
+	.pushsection .altinstructions, "a"
+	altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f
+	.popsection
+661:
+	.endif
+.endm
+
+/*
+ * Provide the alternative code sequence.
+ *
+ * The code that follows this macro is assembled into a special
+ * section to be used for dynamic patching. Code that follows this
+ * macro must:
+ *
+ * 1. Be exactly the same length (in bytes) as the default code
+ *    sequence.
+ *
+ * 2. Not contain a branch target that is used outside of the
+ *    alternative sequence it is defined in (branches into an
+ *    alternative sequence are not fixed up).
+ */
+.macro alternative_else
+662:	.pushsection .altinstr_replacement, "ax"
+663:
+.endm
+
+/*
+ * Complete an alternative code sequence.
+ */
+.macro alternative_endif
+664:	.popsection
+	.org	. - (664b-663b) + (662b-661b)
+	.org	. - (662b-661b) + (664b-663b)
+.endm
+
+#define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...)	\
+	alternative_insn insn1, insn2, cap, IS_ENABLED(cfg)
+
+#endif  /*  __ASSEMBLY__  */
+
+/*
+ * Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature));
+ *
+ * Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature, CONFIG_FOO));
+ * N.B. If CONFIG_FOO is specified, but not selected, the whole block
+ *      will be omitted, including oldinstr.
+ */
+#define ALTERNATIVE(oldinstr, newinstr, ...)   \
+	_ALTERNATIVE_CFG(oldinstr, newinstr, __VA_ARGS__, 1)
+
+#else /* !CONFIG_ALTERNATIVE */
+
+static inline void apply_alternatives_all(void)
+{
+}
+
+int apply_alternatives(void *start, size_t lenght)
+{
+    return 0;
+}
+
+#endif
+
+#endif /* __ASM_ALTERNATIVE_H */
diff --git a/xen/include/asm-arm/arm64/insn.h b/xen/include/asm-arm/arm64/insn.h
index cfcdbe9..6ce37be 100644
--- a/xen/include/asm-arm/arm64/insn.h
+++ b/xen/include/asm-arm/arm64/insn.h
@@ -61,6 +61,22 @@  u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
 s32 aarch64_get_branch_offset(u32 insn);
 u32 aarch64_set_branch_offset(u32 insn, s32 offset);
 
+/* Wrapper for common code */
+static inline bool insn_is_branch_imm(u32 insn)
+{
+    return aarch64_insn_is_branch_imm(insn);
+}
+
+static inline s32 insn_get_branch_offset(u32 insn)
+{
+    return aarch64_get_branch_offset(insn);
+}
+
+static inline u32 insn_set_branch_offset(u32 insn, s32 offset)
+{
+    return aarch64_set_branch_offset(insn, offset);
+}
+
 #endif /* !__ARCH_ARM_ARM64_INSN */
 /*
  * Local variables: