diff mbox

[AArch64,PATCHv2,1/3] Add missing Poly64_t intrinsics to GCC

Message ID VI1PR0801MB2031A3C73308CC39BA810B56FFA70@VI1PR0801MB2031.eurprd08.prod.outlook.com
State New
Headers show

Commit Message

Tamar Christina Nov. 7, 2016, 1:55 p.m. UTC
Hi all,

This patch (1 of 3) adds the following NEON intrinsics
to the Aarch64 back-end of GCC:

* vsli_n_p64
* vsliq_n_p64

* vld1_p64
* vld1q_p64
* vld1_dup_p64
* vld1q_dup_p64

* vst1_p64
* vst1q_p64
  
* vld2_p64
* vld3_p64
* vld4_p64
* vld2q_p64
* vld3q_p64
* vld4q_p64

* vld2_dup_p64
* vld3_dup_p64james.greenhalgh@arm.com
* vld4_dup_p64

* __aarch64_vdup_lane_p64
* __aarch64_vdup_laneq_p64
* __aarch64_vdupq_lane_p64
* __aarch64_vdupq_laneq_p64

* vget_lane_p64
* vgetq_lane_p64

* vreinterpret_p8_p64
* vreinterpretq_p8_p64
* vreinterpret_p16_p64
* vreinterpretq_p16_p64

* vreinterpret_p64_f16
* vreinterpret_p64_f64
* vreinterpret_p64_s8
* vreinterpret_p64_s16
* vreinterpret_p64_s32
* vreinterpret_p64_s64
* vreinterpret_p64_f32
* vreinterpret_p64_u8
* vreinterpret_p64_u16
* vreinterpret_p64_u32
* vreinterpret_p64_u64
* vreinterpret_p64_p8

* vreinterpretq_p64_f64
* vreinterpretq_p64_s8
* vreinterpretq_p64_s16
* vreinterpretq_p64_s32
* vreinterpretq_p64_s64
* vreinterpretq_p64_f16
* vreinterpretq_p64_f32
* vreinterpretq_p64_u8
* vreinterpretq_p64_u16
* vreinterpretq_p64_u32
* vreinterpretq_p64_u64
* vreinterpretq_p64_p8

* vreinterpret_f16_p64
* vreinterpretq_f16_p64
* vreinterpret_f32_p64
* vreinterpretq_f32_p64
* vreinterpret_f64_p64
* vreinterpretq_f64_p64
* vreinterpret_s64_p64
* vreinterpretq_s64_p64
* vreinterpret_u64_p64
* vreinterpretq_u64_p64
* vreinterpret_s8_p64
* vreinterpretq_s8_p64
* vreinterpret_s16_p64
* vreinterpret_s32_p64
* vreinterpretq_s32_p64
* vreinterpret_u8_p64
* vreinterpret_u16_p64
* vreinterpretq_u16_p64
* vreinterpret_u32_p64
* vreinterpretq_u32_p64

* vset_lane_p64
* vsetq_lane_p64

* vget_low_p64
* vget_high_p64

* vcombine_p64
* vcreate_p64

* vst2_lane_p64
* vst3_lane_p64
* vst4_lane_p64
* vst2q_lane_p64
* vst3q_lane_p64
* vst4q_lane_p64

* vget_lane_p64
* vget_laneq_p64
* vset_lane_p64
* vset_laneq_p64

* vcopy_lane_p64
* vcopy_laneq_p64  

* vdup_n_p64
* vdupq_n_p64
* vdup_lane_p64
* vdup_laneq_p64

* vld1_p64
* vld1q_p64
* vld1_dup_p64
* vld1q_dup_p64
* vld1q_dup_p64
* vmov_n_p64
* vmovq_n_p64
* vst3q_p64
* vst4q_p64

* vld1_lane_p64
* vld1q_lane_p64
* vst1_lane_p64
* vst1q_lane_p64
* vcopy_laneq_p64
* vcopyq_laneq_p64
* vdupq_laneq_p64

Added new tests for these and ran regression tests on aarch64-none-linux-gnu
and on arm-none-linux-gnueabihf.

Ok for trunk?

Thanks,
Tamar

gcc/
2016-11-04  Tamar Christina  <tamar.christina@arm.com>

	* config/aarch64/aarch64-builtins.c (TYPES_SETREGP): Added poly type.
	(TYPES_GETREGP): Likewise.
	(TYPES_SHIFTINSERTP): Likewise.
	(TYPES_COMBINEP): Likewise.
	(TYPES_STORE1P): Likewise.
	* config/aarch64/aarch64-simd-builtins.def
	(combine): Added poly generator.
	(get_dregoi): Likewise.
	(get_dregci): Likewise.
	(get_dregxi): Likewise.
	(ssli_n): Likewise.
	(ld1): Likewise.
	(st1): Likewise.
	* config/aarch64/arm_neon.h
	(poly64x1x2_t, poly64x1x3_t): New.
	(poly64x1x4_t, poly64x2x2_t): Likewise.
	(poly64x2x3_t, poly64x2x4_t): Likewise.
	(poly64x1_t): Likewise.
	(vcreate_p64, vcombine_p64): Likewise.
	(vdup_n_p64, vdupq_n_p64): Likewise.
	(vld2_p64, vld2q_p64): Likewise.
	(vld3_p64, vld3q_p64): Likewise.
	(vld4_p64, vld4q_p64): Likewise.
	(vld2_dup_p64, vld3_dup_p64): Likewise.
	(vld4_dup_p64, vsli_n_p64): Likewise.
	(vsliq_n_p64, vst1_p64): Likewise.
	(vst1q_p64, vst2_p64): Likewise.
	(vst3_p64, vst4_p64): Likewise.
	(__aarch64_vdup_lane_p64, __aarch64_vdup_laneq_p64): Likewise.
	(__aarch64_vdupq_lane_p64, __aarch64_vdupq_laneq_p64): Likewise.
	(vget_lane_p64, vgetq_lane_p64): Likewise.
	(vreinterpret_p8_p64, vreinterpretq_p8_p64): Likewise.
	(vreinterpret_p16_p64, vreinterpretq_p16_p64): Likewise.
	(vreinterpret_p64_f16, vreinterpret_p64_f64): Likewise.
	(vreinterpret_p64_s8, vreinterpret_p64_s16): Likewise.
	(vreinterpret_p64_s32, vreinterpret_p64_s64): Likewise.
	(vreinterpret_p64_f32, vreinterpret_p64_u8): Likewise.
	(vreinterpret_p64_u16, vreinterpret_p64_u32): Likewise.
	(vreinterpret_p64_u64, vreinterpret_p64_p8): Likewise.
	(vreinterpretq_p64_f64, vreinterpretq_p64_s8): Likewise.
	(vreinterpretq_p64_s16, vreinterpretq_p64_s32): Likewise.
	(vreinterpretq_p64_s64, vreinterpretq_p64_f16): Likewise.
	(vreinterpretq_p64_f32, vreinterpretq_p64_u8): Likewise.
	(vreinterpretq_p64_u16, vreinterpretq_p64_u32): Likewise.
	(vreinterpretq_p64_u64, vreinterpretq_p64_p8): Likewise.
	(vreinterpret_f16_p64, vreinterpretq_f16_p64): Likewise.
	(vreinterpret_f32_p64, vreinterpretq_f32_p64): Likewise.
	(vreinterpret_f64_p64, vreinterpretq_f64_p64): Likewise.
	(vreinterpret_s64_p64, vreinterpretq_s64_p64): Likewise.
	(vreinterpret_u64_p64, vreinterpretq_u64_p64): Likewise.
	(vreinterpret_s8_p64, vreinterpretq_s8_p64): Likewise.
	(vreinterpret_s16_p64, vreinterpret_s32_p64): Likewise.
	(vreinterpretq_s32_p64, vreinterpret_u8_p64): Likewise.
	(vreinterpret_u16_p64, vreinterpretq_u16_p64): Likewise.
	(vreinterpret_u32_p64, vreinterpretq_u32_p64): Likewise.
	(vset_lane_p64, vsetq_lane_p64): Likewise.
	(vget_low_p64, vget_high_p64): Likewise.
	(vcombine_p64, vst2_lane_p64): Likewise.
	(vst3_lane_p64, vst4_lane_p64): Likewise.
	(vst2q_lane_p64, vst3q_lane_p64): Likewise.
	(vst4q_lane_p64, vget_lane_p64): Likewise.
	(vget_laneq_p64, vset_lane_p64): Likewise.
	(vset_laneq_p64, vcopy_lane_p64): Likewise.
	(vcopy_laneq_p64, vdup_n_p64): Likewise.
	(vdupq_n_p64, vdup_lane_p64): Likewise.
	(vdup_laneq_p64, vld1_p64): Likewise.
	(vld1q_p64, vld1_dup_p64): Likewise.
	(vld1q_dup_p64, vld1q_dup_p64): Likewise.
	(vmov_n_p64, vmovq_n_p64): Likewise.
	(vst3q_p64, vst4q_p64): Likewise.
	(vld1_lane_p64, vld1q_lane_p64): Likewise.
	(vst1_lane_p64, vst1q_lane_p64): Likewise.
	(vcopy_laneq_p64, vcopyq_laneq_p64): Likewise.
	(vdupq_laneq_p64): Likewise.

Comments

James Greenhalgh Nov. 8, 2016, 10:15 a.m. UTC | #1
On Mon, Nov 07, 2016 at 01:55:15PM +0000, Tamar Christina wrote:
> Hi all,

> 

> Added new tests for these and ran regression tests on aarch64-none-linux-gnu

> and on arm-none-linux-gnueabihf.

> 

> Ok for trunk?


OK.

Thanks,
James

> gcc/

> 2016-11-04  Tamar Christina  <tamar.christina@arm.com>

> 

> 	* config/aarch64/aarch64-builtins.c (TYPES_SETREGP): Added poly type.

> 	(TYPES_GETREGP): Likewise.

> 	(TYPES_SHIFTINSERTP): Likewise.

> 	(TYPES_COMBINEP): Likewise.

> 	(TYPES_STORE1P): Likewise.

> 	* config/aarch64/aarch64-simd-builtins.def

> 	(combine): Added poly generator.

> 	(get_dregoi): Likewise.

> 	(get_dregci): Likewise.

> 	(get_dregxi): Likewise.

> 	(ssli_n): Likewise.

> 	(ld1): Likewise.

> 	(st1): Likewise.

> 	* config/aarch64/arm_neon.h

> 	(poly64x1x2_t, poly64x1x3_t): New.

> 	(poly64x1x4_t, poly64x2x2_t): Likewise.

> 	(poly64x2x3_t, poly64x2x4_t): Likewise.

> 	(poly64x1_t): Likewise.

> 	(vcreate_p64, vcombine_p64): Likewise.

> 	(vdup_n_p64, vdupq_n_p64): Likewise.

> 	(vld2_p64, vld2q_p64): Likewise.

> 	(vld3_p64, vld3q_p64): Likewise.

> 	(vld4_p64, vld4q_p64): Likewise.

> 	(vld2_dup_p64, vld3_dup_p64): Likewise.

> 	(vld4_dup_p64, vsli_n_p64): Likewise.

> 	(vsliq_n_p64, vst1_p64): Likewise.

> 	(vst1q_p64, vst2_p64): Likewise.

> 	(vst3_p64, vst4_p64): Likewise.

> 	(__aarch64_vdup_lane_p64, __aarch64_vdup_laneq_p64): Likewise.

> 	(__aarch64_vdupq_lane_p64, __aarch64_vdupq_laneq_p64): Likewise.

> 	(vget_lane_p64, vgetq_lane_p64): Likewise.

> 	(vreinterpret_p8_p64, vreinterpretq_p8_p64): Likewise.

> 	(vreinterpret_p16_p64, vreinterpretq_p16_p64): Likewise.

> 	(vreinterpret_p64_f16, vreinterpret_p64_f64): Likewise.

> 	(vreinterpret_p64_s8, vreinterpret_p64_s16): Likewise.

> 	(vreinterpret_p64_s32, vreinterpret_p64_s64): Likewise.

> 	(vreinterpret_p64_f32, vreinterpret_p64_u8): Likewise.

> 	(vreinterpret_p64_u16, vreinterpret_p64_u32): Likewise.

> 	(vreinterpret_p64_u64, vreinterpret_p64_p8): Likewise.

> 	(vreinterpretq_p64_f64, vreinterpretq_p64_s8): Likewise.

> 	(vreinterpretq_p64_s16, vreinterpretq_p64_s32): Likewise.

> 	(vreinterpretq_p64_s64, vreinterpretq_p64_f16): Likewise.

> 	(vreinterpretq_p64_f32, vreinterpretq_p64_u8): Likewise.

> 	(vreinterpretq_p64_u16, vreinterpretq_p64_u32): Likewise.

> 	(vreinterpretq_p64_u64, vreinterpretq_p64_p8): Likewise.

> 	(vreinterpret_f16_p64, vreinterpretq_f16_p64): Likewise.

> 	(vreinterpret_f32_p64, vreinterpretq_f32_p64): Likewise.

> 	(vreinterpret_f64_p64, vreinterpretq_f64_p64): Likewise.

> 	(vreinterpret_s64_p64, vreinterpretq_s64_p64): Likewise.

> 	(vreinterpret_u64_p64, vreinterpretq_u64_p64): Likewise.

> 	(vreinterpret_s8_p64, vreinterpretq_s8_p64): Likewise.

> 	(vreinterpret_s16_p64, vreinterpret_s32_p64): Likewise.

> 	(vreinterpretq_s32_p64, vreinterpret_u8_p64): Likewise.

> 	(vreinterpret_u16_p64, vreinterpretq_u16_p64): Likewise.

> 	(vreinterpret_u32_p64, vreinterpretq_u32_p64): Likewise.

> 	(vset_lane_p64, vsetq_lane_p64): Likewise.

> 	(vget_low_p64, vget_high_p64): Likewise.

> 	(vcombine_p64, vst2_lane_p64): Likewise.

> 	(vst3_lane_p64, vst4_lane_p64): Likewise.

> 	(vst2q_lane_p64, vst3q_lane_p64): Likewise.

> 	(vst4q_lane_p64, vget_lane_p64): Likewise.

> 	(vget_laneq_p64, vset_lane_p64): Likewise.

> 	(vset_laneq_p64, vcopy_lane_p64): Likewise.

> 	(vcopy_laneq_p64, vdup_n_p64): Likewise.

> 	(vdupq_n_p64, vdup_lane_p64): Likewise.

> 	(vdup_laneq_p64, vld1_p64): Likewise.

> 	(vld1q_p64, vld1_dup_p64): Likewise.

> 	(vld1q_dup_p64, vld1q_dup_p64): Likewise.

> 	(vmov_n_p64, vmovq_n_p64): Likewise.

> 	(vst3q_p64, vst4q_p64): Likewise.

> 	(vld1_lane_p64, vld1q_lane_p64): Likewise.

> 	(vst1_lane_p64, vst1q_lane_p64): Likewise.

> 	(vcopy_laneq_p64, vcopyq_laneq_p64): Likewise.

> 	(vdupq_laneq_p64): Likewise.
Christophe Lyon Nov. 8, 2016, 11:20 a.m. UTC | #2
On 07/11/2016 14:55, Tamar Christina wrote:
> Hi all,

>

> This patch (1 of 3) adds the following NEON intrinsics

> to the Aarch64 back-end of GCC:

>

> * vsli_n_p64

> * vsliq_n_p64

I notice that vsrl_n_p64 and vsriq_n_p64 exist on aarch32. Is this an omission in this patch for aarch64?


> * vld1_p64

> * vld1q_p64

> * vld1_dup_p64

> * vld1q_dup_p64

>

> * vst1_p64

> * vst1q_p64

>    

> * vld2_p64

> * vld3_p64

> * vld4_p64

> * vld2q_p64

> * vld3q_p64

> * vld4q_p64

>

> * vld2_dup_p64

> * vld3_dup_p64james.greenhalgh@arm.com

> * vld4_dup_p64

>

> * __aarch64_vdup_lane_p64

> * __aarch64_vdup_laneq_p64

> * __aarch64_vdupq_lane_p64

> * __aarch64_vdupq_laneq_p64

>

> * vget_lane_p64

> * vgetq_lane_p64

>

> * vreinterpret_p8_p64

> * vreinterpretq_p8_p64

> * vreinterpret_p16_p64

> * vreinterpretq_p16_p64

>

> * vreinterpret_p64_f16

> * vreinterpret_p64_f64

> * vreinterpret_p64_s8

> * vreinterpret_p64_s16

> * vreinterpret_p64_s32

> * vreinterpret_p64_s64

> * vreinterpret_p64_f32

> * vreinterpret_p64_u8

> * vreinterpret_p64_u16

> * vreinterpret_p64_u32

> * vreinterpret_p64_u64

> * vreinterpret_p64_p8

>

> * vreinterpretq_p64_f64

> * vreinterpretq_p64_s8

> * vreinterpretq_p64_s16

> * vreinterpretq_p64_s32

> * vreinterpretq_p64_s64

> * vreinterpretq_p64_f16

> * vreinterpretq_p64_f32

> * vreinterpretq_p64_u8

> * vreinterpretq_p64_u16

> * vreinterpretq_p64_u32

> * vreinterpretq_p64_u64

> * vreinterpretq_p64_p8

>

> * vreinterpret_f16_p64

> * vreinterpretq_f16_p64

> * vreinterpret_f32_p64

> * vreinterpretq_f32_p64

> * vreinterpret_f64_p64

> * vreinterpretq_f64_p64

> * vreinterpret_s64_p64

> * vreinterpretq_s64_p64

> * vreinterpret_u64_p64

> * vreinterpretq_u64_p64

> * vreinterpret_s8_p64

> * vreinterpretq_s8_p64

> * vreinterpret_s16_p64

> * vreinterpret_s32_p64

> * vreinterpretq_s32_p64

> * vreinterpret_u8_p64

> * vreinterpret_u16_p64

> * vreinterpretq_u16_p64

> * vreinterpret_u32_p64

> * vreinterpretq_u32_p64

>

> * vset_lane_p64

> * vsetq_lane_p64

>

> * vget_low_p64

> * vget_high_p64

>

> * vcombine_p64

> * vcreate_p64

>

> * vst2_lane_p64

> * vst3_lane_p64

> * vst4_lane_p64

> * vst2q_lane_p64

> * vst3q_lane_p64

> * vst4q_lane_p64

>

> * vget_lane_p64

> * vget_laneq_p64

> * vset_lane_p64

> * vset_laneq_p64

>

> * vcopy_lane_p64

> * vcopy_laneq_p64

>

> * vdup_n_p64

> * vdupq_n_p64

> * vdup_lane_p64

> * vdup_laneq_p64

>

> * vld1_p64

> * vld1q_p64

> * vld1_dup_p64

> * vld1q_dup_p64

> * vld1q_dup_p64

> * vmov_n_p64

> * vmovq_n_p64

> * vst3q_p64

> * vst4q_p64

>

> * vld1_lane_p64

> * vld1q_lane_p64

> * vst1_lane_p64

> * vst1q_lane_p64

> * vcopy_laneq_p64

> * vcopyq_laneq_p64

> * vdupq_laneq_p64

>

> Added new tests for these and ran regression tests on aarch64-none-linux-gnu

> and on arm-none-linux-gnueabihf.

>

> Ok for trunk?

>

> Thanks,

> Tamar

>

> gcc/

> 2016-11-04  Tamar Christina  <tamar.christina@arm.com>

>

> 	* config/aarch64/aarch64-builtins.c (TYPES_SETREGP): Added poly type.

> 	(TYPES_GETREGP): Likewise.

> 	(TYPES_SHIFTINSERTP): Likewise.

> 	(TYPES_COMBINEP): Likewise.

> 	(TYPES_STORE1P): Likewise.

> 	* config/aarch64/aarch64-simd-builtins.def

> 	(combine): Added poly generator.

> 	(get_dregoi): Likewise.

> 	(get_dregci): Likewise.

> 	(get_dregxi): Likewise.

> 	(ssli_n): Likewise.

> 	(ld1): Likewise.

> 	(st1): Likewise.

> 	* config/aarch64/arm_neon.h

> 	(poly64x1x2_t, poly64x1x3_t): New.

> 	(poly64x1x4_t, poly64x2x2_t): Likewise.

> 	(poly64x2x3_t, poly64x2x4_t): Likewise.

> 	(poly64x1_t): Likewise.

> 	(vcreate_p64, vcombine_p64): Likewise.

> 	(vdup_n_p64, vdupq_n_p64): Likewise.

> 	(vld2_p64, vld2q_p64): Likewise.

> 	(vld3_p64, vld3q_p64): Likewise.

> 	(vld4_p64, vld4q_p64): Likewise.

> 	(vld2_dup_p64, vld3_dup_p64): Likewise.

> 	(vld4_dup_p64, vsli_n_p64): Likewise.

> 	(vsliq_n_p64, vst1_p64): Likewise.

> 	(vst1q_p64, vst2_p64): Likewise.

> 	(vst3_p64, vst4_p64): Likewise.

> 	(__aarch64_vdup_lane_p64, __aarch64_vdup_laneq_p64): Likewise.

> 	(__aarch64_vdupq_lane_p64, __aarch64_vdupq_laneq_p64): Likewise.

> 	(vget_lane_p64, vgetq_lane_p64): Likewise.

> 	(vreinterpret_p8_p64, vreinterpretq_p8_p64): Likewise.

> 	(vreinterpret_p16_p64, vreinterpretq_p16_p64): Likewise.

> 	(vreinterpret_p64_f16, vreinterpret_p64_f64): Likewise.

> 	(vreinterpret_p64_s8, vreinterpret_p64_s16): Likewise.

> 	(vreinterpret_p64_s32, vreinterpret_p64_s64): Likewise.

> 	(vreinterpret_p64_f32, vreinterpret_p64_u8): Likewise.

> 	(vreinterpret_p64_u16, vreinterpret_p64_u32): Likewise.

> 	(vreinterpret_p64_u64, vreinterpret_p64_p8): Likewise.

> 	(vreinterpretq_p64_f64, vreinterpretq_p64_s8): Likewise.

> 	(vreinterpretq_p64_s16, vreinterpretq_p64_s32): Likewise.

> 	(vreinterpretq_p64_s64, vreinterpretq_p64_f16): Likewise.

> 	(vreinterpretq_p64_f32, vreinterpretq_p64_u8): Likewise.

> 	(vreinterpretq_p64_u16, vreinterpretq_p64_u32): Likewise.

> 	(vreinterpretq_p64_u64, vreinterpretq_p64_p8): Likewise.

> 	(vreinterpret_f16_p64, vreinterpretq_f16_p64): Likewise.

> 	(vreinterpret_f32_p64, vreinterpretq_f32_p64): Likewise.

> 	(vreinterpret_f64_p64, vreinterpretq_f64_p64): Likewise.

> 	(vreinterpret_s64_p64, vreinterpretq_s64_p64): Likewise.

> 	(vreinterpret_u64_p64, vreinterpretq_u64_p64): Likewise.

> 	(vreinterpret_s8_p64, vreinterpretq_s8_p64): Likewise.

> 	(vreinterpret_s16_p64, vreinterpret_s32_p64): Likewise.

> 	(vreinterpretq_s32_p64, vreinterpret_u8_p64): Likewise.

> 	(vreinterpret_u16_p64, vreinterpretq_u16_p64): Likewise.

> 	(vreinterpret_u32_p64, vreinterpretq_u32_p64): Likewise.

> 	(vset_lane_p64, vsetq_lane_p64): Likewise.

> 	(vget_low_p64, vget_high_p64): Likewise.

> 	(vcombine_p64, vst2_lane_p64): Likewise.

> 	(vst3_lane_p64, vst4_lane_p64): Likewise.

> 	(vst2q_lane_p64, vst3q_lane_p64): Likewise.

> 	(vst4q_lane_p64, vget_lane_p64): Likewise.

> 	(vget_laneq_p64, vset_lane_p64): Likewise.

> 	(vset_laneq_p64, vcopy_lane_p64): Likewise.

> 	(vcopy_laneq_p64, vdup_n_p64): Likewise.

> 	(vdupq_n_p64, vdup_lane_p64): Likewise.

> 	(vdup_laneq_p64, vld1_p64): Likewise.

> 	(vld1q_p64, vld1_dup_p64): Likewise.

> 	(vld1q_dup_p64, vld1q_dup_p64): Likewise.

> 	(vmov_n_p64, vmovq_n_p64): Likewise.

> 	(vst3q_p64, vst4q_p64): Likewise.

> 	(vld1_lane_p64, vld1q_lane_p64): Likewise.

> 	(vst1_lane_p64, vst1q_lane_p64): Likewise.

> 	(vcopy_laneq_p64, vcopyq_laneq_p64): Likewise.

> 	(vdupq_laneq_p64): Likewise.
James Greenhalgh Nov. 8, 2016, 11:29 a.m. UTC | #3
On Tue, Nov 08, 2016 at 12:20:57PM +0100, Christophe Lyon wrote:
> On 07/11/2016 14:55, Tamar Christina wrote:

> >Hi all,

> >

> >This patch (1 of 3) adds the following NEON intrinsics

> >to the Aarch64 back-end of GCC:

> >

> >* vsli_n_p64

> >* vsliq_n_p64

> I notice that vsrl_n_p64 and vsriq_n_p64 exist on aarch32. Is this an

> omission in this patch for aarch64?


Presumably you meant vsri_n_p64 here?

That looks like an omission in the patch, but I'm still happy for it to go
in as is with a follow-up patch adding the final two intrinsics.

Thanks,
James
Christophe Lyon Nov. 8, 2016, 11:31 a.m. UTC | #4
On 8 November 2016 at 12:29, James Greenhalgh <james.greenhalgh@arm.com> wrote:
> On Tue, Nov 08, 2016 at 12:20:57PM +0100, Christophe Lyon wrote:

>> On 07/11/2016 14:55, Tamar Christina wrote:

>> >Hi all,

>> >

>> >This patch (1 of 3) adds the following NEON intrinsics

>> >to the Aarch64 back-end of GCC:

>> >

>> >* vsli_n_p64

>> >* vsliq_n_p64

>> I notice that vsrl_n_p64 and vsriq_n_p64 exist on aarch32. Is this an

>> omission in this patch for aarch64?

>

> Presumably you meant vsri_n_p64 here?

Yes, sorry for the typo.

>

> That looks like an omission in the patch, but I'm still happy for it to go

> in as is with a follow-up patch adding the final two intrinsics.

>

Indeed

> Thanks,

> James

>
diff mbox

Patch

diff --git a/gcc/config/aarch64/aarch64-builtins.c b/gcc/config/aarch64/aarch64-builtins.c
index 594a6151e69e35e8e5a5a9b0542c81c1610f962e..05cc52eba33053f60fb3a590f7f88d178f9fd905 100644
--- a/gcc/config/aarch64/aarch64-builtins.c
+++ b/gcc/config/aarch64/aarch64-builtins.c
@@ -170,6 +170,10 @@  aarch64_types_quadop_lane_qualifiers[SIMD_MAX_BUILTIN_ARGS]
 #define TYPES_QUADOP_LANE (aarch64_types_quadop_lane_qualifiers)
 
 static enum aarch64_type_qualifiers
+aarch64_types_binop_imm_p_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+  = { qualifier_poly, qualifier_none, qualifier_immediate };
+#define TYPES_GETREGP (aarch64_types_binop_imm_p_qualifiers)
+static enum aarch64_type_qualifiers
 aarch64_types_binop_imm_qualifiers[SIMD_MAX_BUILTIN_ARGS]
   = { qualifier_none, qualifier_none, qualifier_immediate };
 #define TYPES_GETREG (aarch64_types_binop_imm_qualifiers)
@@ -188,11 +192,20 @@  aarch64_types_unsigned_shift_qualifiers[SIMD_MAX_BUILTIN_ARGS]
 #define TYPES_USHIFTIMM (aarch64_types_unsigned_shift_qualifiers)
 
 static enum aarch64_type_qualifiers
-aarch64_types_ternop_imm_qualifiers[SIMD_MAX_BUILTIN_ARGS]
-  = { qualifier_none, qualifier_none, qualifier_none, qualifier_immediate };
-#define TYPES_SETREG (aarch64_types_ternop_imm_qualifiers)
-#define TYPES_SHIFTINSERT (aarch64_types_ternop_imm_qualifiers)
-#define TYPES_SHIFTACC (aarch64_types_ternop_imm_qualifiers)
+aarch64_types_ternop_s_imm_p_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+  = { qualifier_none, qualifier_none, qualifier_poly, qualifier_immediate};
+#define TYPES_SETREGP (aarch64_types_ternop_s_imm_p_qualifiers)
+static enum aarch64_type_qualifiers
+aarch64_types_ternop_s_imm_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+  = { qualifier_none, qualifier_none, qualifier_none, qualifier_immediate};
+#define TYPES_SETREG (aarch64_types_ternop_s_imm_qualifiers)
+#define TYPES_SHIFTINSERT (aarch64_types_ternop_s_imm_qualifiers)
+#define TYPES_SHIFTACC (aarch64_types_ternop_s_imm_qualifiers)
+
+static enum aarch64_type_qualifiers
+aarch64_types_ternop_p_imm_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+  = { qualifier_poly, qualifier_poly, qualifier_poly, qualifier_immediate};
+#define TYPES_SHIFTINSERTP (aarch64_types_ternop_p_imm_qualifiers)
 
 static enum aarch64_type_qualifiers
 aarch64_types_unsigned_shiftacc_qualifiers[SIMD_MAX_BUILTIN_ARGS]
@@ -207,6 +220,11 @@  aarch64_types_combine_qualifiers[SIMD_MAX_BUILTIN_ARGS]
 #define TYPES_COMBINE (aarch64_types_combine_qualifiers)
 
 static enum aarch64_type_qualifiers
+aarch64_types_combine_p_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+  = { qualifier_poly, qualifier_poly, qualifier_poly };
+#define TYPES_COMBINEP (aarch64_types_combine_p_qualifiers)
+
+static enum aarch64_type_qualifiers
 aarch64_types_load1_qualifiers[SIMD_MAX_BUILTIN_ARGS]
   = { qualifier_none, qualifier_const_pointer_map_mode };
 #define TYPES_LOAD1 (aarch64_types_load1_qualifiers)
@@ -239,6 +257,10 @@  aarch64_types_bsl_u_qualifiers[SIMD_MAX_BUILTIN_ARGS]
    qualifier_map_mode | qualifier_pointer to build a pointer to the
    element type of the vector.  */
 static enum aarch64_type_qualifiers
+aarch64_types_store1_p_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+  = { qualifier_void, qualifier_pointer_map_mode, qualifier_poly };
+#define TYPES_STORE1P (aarch64_types_store1_p_qualifiers)
+static enum aarch64_type_qualifiers
 aarch64_types_store1_qualifiers[SIMD_MAX_BUILTIN_ARGS]
   = { qualifier_void, qualifier_pointer_map_mode, qualifier_none };
 #define TYPES_STORE1 (aarch64_types_store1_qualifiers)
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
index e1154b4b27820c0075d9a9edb4f8b48ef4f06b07..bc8a85dcf03cc5e52891ae4300ec721e7a533b9b 100644
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -40,6 +40,7 @@ 
    10 - CODE_FOR_<name><mode>.  */
 
   BUILTIN_VDC (COMBINE, combine, 0)
+  VAR1 (COMBINEP, combine, 0, di)
   BUILTIN_VB (BINOP, pmul, 0)
   BUILTIN_VHSDF_HSDF (BINOP, fmulx, 0)
   BUILTIN_VHSDF_DF (UNOP, sqrt, 2)
@@ -68,14 +69,23 @@ 
   BUILTIN_VDC (GETREG, get_dregoi, 0)
   BUILTIN_VDC (GETREG, get_dregci, 0)
   BUILTIN_VDC (GETREG, get_dregxi, 0)
+  VAR1 (GETREGP, get_dregoi, 0, di)
+  VAR1 (GETREGP, get_dregci, 0, di)
+  VAR1 (GETREGP, get_dregxi, 0, di)
   /* Implemented by aarch64_get_qreg<VSTRUCT:mode><VQ:mode>.  */
   BUILTIN_VQ (GETREG, get_qregoi, 0)
   BUILTIN_VQ (GETREG, get_qregci, 0)
   BUILTIN_VQ (GETREG, get_qregxi, 0)
+  VAR1 (GETREGP, get_qregoi, 0, v2di)
+  VAR1 (GETREGP, get_qregci, 0, v2di)
+  VAR1 (GETREGP, get_qregxi, 0, v2di)
   /* Implemented by aarch64_set_qreg<VSTRUCT:mode><VQ:mode>.  */
   BUILTIN_VQ (SETREG, set_qregoi, 0)
   BUILTIN_VQ (SETREG, set_qregci, 0)
   BUILTIN_VQ (SETREG, set_qregxi, 0)
+  VAR1 (SETREGP, set_qregoi, 0, v2di)
+  VAR1 (SETREGP, set_qregci, 0, v2di)
+  VAR1 (SETREGP, set_qregxi, 0, v2di)
   /* Implemented by aarch64_ld<VSTRUCT:nregs><VDC:mode>.  */
   BUILTIN_VDC (LOADSTRUCT, ld2, 0)
   BUILTIN_VDC (LOADSTRUCT, ld3, 0)
@@ -224,6 +234,7 @@ 
   BUILTIN_VSDQ_I_DI (SHIFTINSERT, ssri_n, 0)
   BUILTIN_VSDQ_I_DI (USHIFTACC, usri_n, 0)
   BUILTIN_VSDQ_I_DI (SHIFTINSERT, ssli_n, 0)
+  VAR2 (SHIFTINSERTP, ssli_n, 0, di, v2di)
   BUILTIN_VSDQ_I_DI (USHIFTACC, usli_n, 0)
   /* Implemented by aarch64_<sur>qshl<u>_n<mode>.  */
   BUILTIN_VSDQ_I (SHIFTIMM_USS, sqshlu_n, 0)
@@ -416,9 +427,11 @@ 
 
   /* Implemented by aarch64_ld1<VALL_F16:mode>.  */
   BUILTIN_VALL_F16 (LOAD1, ld1, 0)
+  VAR1(STORE1P, ld1, 0, v2di)
 
   /* Implemented by aarch64_st1<VALL_F16:mode>.  */
   BUILTIN_VALL_F16 (STORE1, st1, 0)
+  VAR1(STORE1P, st1, 0, v2di)
 
   /* Implemented by fma<mode>4.  */
   BUILTIN_VHSDF (TERNOP, fma, 4)
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index c463e3b698a47b9b5c5a04e0fb7fff1f71817af1..d39adf1f19a5d82f39b638e14e69906fc3d3f713 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -58,6 +58,7 @@  typedef __Float64x2_t float64x2_t;
 typedef __Poly8x16_t poly8x16_t;
 typedef __Poly16x8_t poly16x8_t;
 typedef __Poly64x2_t poly64x2_t;
+typedef __Poly64x1_t poly64x1_t;
 typedef __Uint8x16_t uint8x16_t;
 typedef __Uint16x8_t uint16x8_t;
 typedef __Uint32x4_t uint32x4_t;
@@ -202,6 +203,36 @@  typedef struct poly16x8x2_t
   poly16x8_t val[2];
 } poly16x8x2_t;
 
+typedef struct poly64x1x2_t
+{
+  poly64x1_t val[2];
+} poly64x1x2_t;
+
+typedef struct poly64x1x3_t
+{
+  poly64x1_t val[3];
+} poly64x1x3_t;
+
+typedef struct poly64x1x4_t
+{
+  poly64x1_t val[4];
+} poly64x1x4_t;
+
+typedef struct poly64x2x2_t
+{
+  poly64x2_t val[2];
+} poly64x2x2_t;
+
+typedef struct poly64x2x3_t
+{
+  poly64x2_t val[3];
+} poly64x2x3_t;
+
+typedef struct poly64x2x4_t
+{
+  poly64x2_t val[4];
+} poly64x2x4_t;
+
 typedef struct int8x8x3_t
 {
   int8x8_t val[3];
@@ -476,6 +507,8 @@  typedef struct poly16x8x4_t
    __aarch64_vdup_lane_any (p8, , __a, __b)
 #define __aarch64_vdup_lane_p16(__a, __b) \
    __aarch64_vdup_lane_any (p16, , __a, __b)
+#define __aarch64_vdup_lane_p64(__a, __b) \
+   __aarch64_vdup_lane_any (p64, , __a, __b)
 #define __aarch64_vdup_lane_s8(__a, __b) \
    __aarch64_vdup_lane_any (s8, , __a, __b)
 #define __aarch64_vdup_lane_s16(__a, __b) \
@@ -504,6 +537,8 @@  typedef struct poly16x8x4_t
    __aarch64_vdup_lane_any (p8, , __a, __b)
 #define __aarch64_vdup_laneq_p16(__a, __b) \
    __aarch64_vdup_lane_any (p16, , __a, __b)
+#define __aarch64_vdup_laneq_p64(__a, __b) \
+   __aarch64_vdup_lane_any (p64, , __a, __b)
 #define __aarch64_vdup_laneq_s8(__a, __b) \
    __aarch64_vdup_lane_any (s8, , __a, __b)
 #define __aarch64_vdup_laneq_s16(__a, __b) \
@@ -532,6 +567,8 @@  typedef struct poly16x8x4_t
    __aarch64_vdup_lane_any (p8, q, __a, __b)
 #define __aarch64_vdupq_lane_p16(__a, __b) \
    __aarch64_vdup_lane_any (p16, q, __a, __b)
+#define __aarch64_vdupq_lane_p64(__a, __b) \
+   __aarch64_vdup_lane_any (p64, q, __a, __b)
 #define __aarch64_vdupq_lane_s8(__a, __b) \
    __aarch64_vdup_lane_any (s8, q, __a, __b)
 #define __aarch64_vdupq_lane_s16(__a, __b) \
@@ -560,6 +597,8 @@  typedef struct poly16x8x4_t
    __aarch64_vdup_lane_any (p8, q, __a, __b)
 #define __aarch64_vdupq_laneq_p16(__a, __b) \
    __aarch64_vdup_lane_any (p16, q, __a, __b)
+#define __aarch64_vdupq_laneq_p64(__a, __b) \
+   __aarch64_vdup_lane_any (p64, q, __a, __b)
 #define __aarch64_vdupq_laneq_s8(__a, __b) \
    __aarch64_vdup_lane_any (s8, q, __a, __b)
 #define __aarch64_vdupq_laneq_s16(__a, __b) \
@@ -3076,6 +3115,13 @@  vcreate_p16 (uint64_t __a)
   return (poly16x4_t) __a;
 }
 
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcreate_p64 (uint64_t __a)
+{
+  return (poly64x1_t) __a;
+}
+
 /* vget_lane  */
 
 __extension__ extern __inline float16_t
@@ -3113,6 +3159,13 @@  vget_lane_p16 (poly16x4_t __a, const int __b)
   return __aarch64_vget_lane_any (__a, __b);
 }
 
+__extension__ extern __inline poly64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_lane_p64 (poly64x1_t __a, const int __b)
+{
+  return __aarch64_vget_lane_any (__a, __b);
+}
+
 __extension__ extern __inline int8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vget_lane_s8 (int8x8_t __a, const int __b)
@@ -3206,6 +3259,13 @@  vgetq_lane_p16 (poly16x8_t __a, const int __b)
   return __aarch64_vget_lane_any (__a, __b);
 }
 
+__extension__ extern __inline poly64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vgetq_lane_p64 (poly64x2_t __a, const int __b)
+{
+  return __aarch64_vget_lane_any (__a, __b);
+}
+
 __extension__ extern __inline int8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vgetq_lane_s8 (int8x16_t __a, const int __b)
@@ -3348,6 +3408,13 @@  vreinterpret_p8_p16 (poly16x4_t __a)
   return (poly8x8_t) __a;
 }
 
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p8_p64 (poly64x1_t __a)
+{
+  return (poly8x8_t) __a;
+}
+
 __extension__ extern __inline poly8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vreinterpretq_p8_f64 (float64x2_t __a)
@@ -3432,6 +3499,13 @@  vreinterpretq_p8_p16 (poly16x8_t __a)
   return (poly8x16_t) __a;
 }
 
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p8_p64 (poly64x2_t __a)
+{
+  return (poly8x16_t) __a;
+}
+
 __extension__ extern __inline poly16x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vreinterpret_p16_f16 (float16x4_t __a)
@@ -3516,6 +3590,13 @@  vreinterpret_p16_p8 (poly8x8_t __a)
   return (poly16x4_t) __a;
 }
 
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p16_p64 (poly64x1_t __a)
+{
+  return (poly16x4_t) __a;
+}
+
 __extension__ extern __inline poly16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vreinterpretq_p16_f64 (float64x2_t __a)
@@ -3600,6 +3681,181 @@  vreinterpretq_p16_p8 (poly8x16_t __a)
   return (poly16x8_t) __a;
 }
 
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p16_p64 (poly64x2_t __a)
+{
+  return (poly16x8_t) __a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_f16 (float16x4_t __a)
+{
+  return (poly64x1_t) __a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_f64 (float64x1_t __a)
+{
+  return (poly64x1_t) __a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_s8 (int8x8_t __a)
+{
+  return (poly64x1_t) __a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_s16 (int16x4_t __a)
+{
+  return (poly64x1_t) __a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_s32 (int32x2_t __a)
+{
+  return (poly64x1_t) __a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_s64 (int64x1_t __a)
+{
+  return (poly64x1_t) __a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_f32 (float32x2_t __a)
+{
+  return (poly64x1_t) __a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_u8 (uint8x8_t __a)
+{
+  return (poly64x1_t) __a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_u16 (uint16x4_t __a)
+{
+  return (poly64x1_t) __a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_u32 (uint32x2_t __a)
+{
+  return (poly64x1_t) __a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_u64 (uint64x1_t __a)
+{
+  return (poly64x1_t) __a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_p8 (poly8x8_t __a)
+{
+  return (poly64x1_t) __a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_f64 (float64x2_t __a)
+{
+  return (poly64x2_t) __a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_s8 (int8x16_t __a)
+{
+  return (poly64x2_t) __a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_s16 (int16x8_t __a)
+{
+  return (poly64x2_t) __a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_s32 (int32x4_t __a)
+{
+  return (poly64x2_t) __a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_s64 (int64x2_t __a)
+{
+  return (poly64x2_t) __a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_f16 (float16x8_t __a)
+{
+  return (poly64x2_t) __a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_f32 (float32x4_t __a)
+{
+  return (poly64x2_t) __a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_u8 (uint8x16_t __a)
+{
+  return (poly64x2_t) __a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_u16 (uint16x8_t __a)
+{
+  return (poly64x2_t) __a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_u32 (uint32x4_t __a)
+{
+  return (poly64x2_t) __a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_u64 (uint64x2_t __a)
+{
+  return (poly64x2_t) __a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_p8 (poly8x16_t __a)
+{
+  return (poly64x2_t) __a;
+}
+
 __extension__ extern __inline float16x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vreinterpret_f16_f64 (float64x1_t __a)
@@ -3684,6 +3940,13 @@  vreinterpret_f16_p16 (poly16x4_t __a)
   return (float16x4_t) __a;
 }
 
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_f16_p64 (poly64x1_t __a)
+{
+  return (float16x4_t) __a;
+}
+
 __extension__ extern __inline float16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vreinterpretq_f16_f64 (float64x2_t __a)
@@ -3768,6 +4031,13 @@  vreinterpretq_f16_p16 (poly16x8_t __a)
   return (float16x8_t) __a;
 }
 
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f16_p64 (poly64x2_t __a)
+{
+  return (float16x8_t) __a;
+}
+
 __extension__ extern __inline float32x2_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vreinterpret_f32_f16 (float16x4_t __a)
@@ -3852,6 +4122,13 @@  vreinterpret_f32_p16 (poly16x4_t __a)
   return (float32x2_t) __a;
 }
 
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_f32_p64 (poly64x1_t __a)
+{
+  return (float32x2_t) __a;
+}
+
 __extension__ extern __inline float32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vreinterpretq_f32_f16 (float16x8_t __a)
@@ -3936,6 +4213,13 @@  vreinterpretq_f32_p16 (poly16x8_t __a)
   return (float32x4_t) __a;
 }
 
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f32_p64 (poly64x2_t __a)
+{
+  return (float32x4_t) __a;
+}
+
 __extension__ extern __inline float64x1_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vreinterpret_f64_f16 (float16x4_t __a)
@@ -3966,6 +4250,13 @@  vreinterpret_f64_p16 (poly16x4_t __a)
 
 __extension__ extern __inline float64x1_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_f64_p64 (poly64x1_t __a)
+{
+  return (float64x1_t) __a;
+}
+
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vreinterpret_f64_s8 (int8x8_t __a)
 {
   return (float64x1_t) __a;
@@ -4050,6 +4341,13 @@  vreinterpretq_f64_p16 (poly16x8_t __a)
 
 __extension__ extern __inline float64x2_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f64_p64 (poly64x2_t __a)
+{
+  return (float64x2_t) __a;
+}
+
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vreinterpretq_f64_s8 (int8x16_t __a)
 {
   return (float64x2_t) __a;
@@ -4188,6 +4486,13 @@  vreinterpret_s64_p16 (poly16x4_t __a)
   return (int64x1_t) __a;
 }
 
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s64_p64 (poly64x1_t __a)
+{
+  return (int64x1_t) __a;
+}
+
 __extension__ extern __inline int64x2_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vreinterpretq_s64_f64 (float64x2_t __a)
@@ -4272,6 +4577,13 @@  vreinterpretq_s64_p16 (poly16x8_t __a)
   return (int64x2_t) __a;
 }
 
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s64_p64 (poly64x2_t __a)
+{
+  return (int64x2_t) __a;
+}
+
 __extension__ extern __inline uint64x1_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vreinterpret_u64_f16 (float16x4_t __a)
@@ -4356,6 +4668,13 @@  vreinterpret_u64_p16 (poly16x4_t __a)
   return (uint64x1_t) __a;
 }
 
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u64_p64 (poly64x1_t __a)
+{
+  return (uint64x1_t) __a;
+}
+
 __extension__ extern __inline uint64x2_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vreinterpretq_u64_f64 (float64x2_t __a)
@@ -4440,6 +4759,13 @@  vreinterpretq_u64_p16 (poly16x8_t __a)
   return (uint64x2_t) __a;
 }
 
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u64_p64 (poly64x2_t __a)
+{
+  return (uint64x2_t) __a;
+}
+
 __extension__ extern __inline int8x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vreinterpret_s8_f16 (float16x4_t __a)
@@ -4524,6 +4850,13 @@  vreinterpret_s8_p16 (poly16x4_t __a)
   return (int8x8_t) __a;
 }
 
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s8_p64 (poly64x1_t __a)
+{
+  return (int8x8_t) __a;
+}
+
 __extension__ extern __inline int8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vreinterpretq_s8_f64 (float64x2_t __a)
@@ -4608,6 +4941,13 @@  vreinterpretq_s8_p16 (poly16x8_t __a)
   return (int8x16_t) __a;
 }
 
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s8_p64 (poly64x2_t __a)
+{
+  return (int8x16_t) __a;
+}
+
 __extension__ extern __inline int16x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vreinterpret_s16_f16 (float16x4_t __a)
@@ -4692,6 +5032,13 @@  vreinterpret_s16_p16 (poly16x4_t __a)
   return (int16x4_t) __a;
 }
 
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s16_p64 (poly64x1_t __a)
+{
+  return (int16x4_t) __a;
+}
+
 __extension__ extern __inline int16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vreinterpretq_s16_f64 (float64x2_t __a)
@@ -4776,6 +5123,13 @@  vreinterpretq_s16_p16 (poly16x8_t __a)
   return (int16x8_t) __a;
 }
 
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s16_p64 (poly64x2_t __a)
+{
+  return (int16x8_t) __a;
+}
+
 __extension__ extern __inline int32x2_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vreinterpret_s32_f16 (float16x4_t __a)
@@ -4860,6 +5214,13 @@  vreinterpret_s32_p16 (poly16x4_t __a)
   return (int32x2_t) __a;
 }
 
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s32_p64 (poly64x1_t __a)
+{
+  return (int32x2_t) __a;
+}
+
 __extension__ extern __inline int32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vreinterpretq_s32_f64 (float64x2_t __a)
@@ -4944,6 +5305,13 @@  vreinterpretq_s32_p16 (poly16x8_t __a)
   return (int32x4_t) __a;
 }
 
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s32_p64 (poly64x2_t __a)
+{
+  return (int32x4_t) __a;
+}
+
 __extension__ extern __inline uint8x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vreinterpret_u8_f16 (float16x4_t __a)
@@ -5028,6 +5396,13 @@  vreinterpret_u8_p16 (poly16x4_t __a)
   return (uint8x8_t) __a;
 }
 
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u8_p64 (poly64x1_t __a)
+{
+  return (uint8x8_t) __a;
+}
+
 __extension__ extern __inline uint8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vreinterpretq_u8_f64 (float64x2_t __a)
@@ -5112,6 +5487,13 @@  vreinterpretq_u8_p16 (poly16x8_t __a)
   return (uint8x16_t) __a;
 }
 
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u8_p64 (poly64x2_t __a)
+{
+  return (uint8x16_t) __a;
+}
+
 __extension__ extern __inline uint16x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vreinterpret_u16_f16 (float16x4_t __a)
@@ -5196,6 +5578,13 @@  vreinterpret_u16_p16 (poly16x4_t __a)
   return (uint16x4_t) __a;
 }
 
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u16_p64 (poly64x1_t __a)
+{
+  return (uint16x4_t) __a;
+}
+
 __extension__ extern __inline uint16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vreinterpretq_u16_f64 (float64x2_t __a)
@@ -5280,6 +5669,13 @@  vreinterpretq_u16_p16 (poly16x8_t __a)
   return (uint16x8_t) __a;
 }
 
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u16_p64 (poly64x2_t __a)
+{
+  return (uint16x8_t) __a;
+}
+
 __extension__ extern __inline uint32x2_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vreinterpret_u32_f16 (float16x4_t __a)
@@ -5364,6 +5760,13 @@  vreinterpret_u32_p16 (poly16x4_t __a)
   return (uint32x2_t) __a;
 }
 
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u32_p64 (poly64x1_t __a)
+{
+  return (uint32x2_t) __a;
+}
+
 __extension__ extern __inline uint32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vreinterpretq_u32_f64 (float64x2_t __a)
@@ -5448,6 +5851,13 @@  vreinterpretq_u32_p16 (poly16x8_t __a)
   return (uint32x4_t) __a;
 }
 
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u32_p64 (poly64x2_t __a)
+{
+  return (uint32x4_t) __a;
+}
+
 /* vset_lane  */
 
 __extension__ extern __inline float16x4_t
@@ -5485,6 +5895,13 @@  vset_lane_p16 (poly16_t __elem, poly16x4_t __vec, const int __index)
   return __aarch64_vset_lane_any (__elem, __vec, __index);
 }
 
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vset_lane_p64 (poly64_t __elem, poly64x1_t __vec, const int __index)
+{
+  return __aarch64_vset_lane_any (__elem, __vec, __index);
+}
+
 __extension__ extern __inline int8x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vset_lane_s8 (int8_t __elem, int8x8_t __vec, const int __index)
@@ -5571,9 +5988,16 @@  vsetq_lane_p8 (poly8_t __elem, poly8x16_t __vec, const int __index)
   return __aarch64_vset_lane_any (__elem, __vec, __index);
 }
 
-__extension__ extern __inline poly16x8_t
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsetq_lane_p16 (poly16_t __elem, poly16x8_t __vec, const int __index)
+{
+  return __aarch64_vset_lane_any (__elem, __vec, __index);
+}
+
+__extension__ extern __inline poly64x2_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-vsetq_lane_p16 (poly16_t __elem, poly16x8_t __vec, const int __index)
+vsetq_lane_p64 (poly64_t __elem, poly64x2_t __vec, const int __index)
 {
   return __aarch64_vset_lane_any (__elem, __vec, __index);
 }
@@ -5674,6 +6098,13 @@  vget_low_p16 (poly16x8_t __a)
   __GET_LOW (p16);
 }
 
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_low_p64 (poly64x2_t __a)
+{
+  __GET_LOW (p64);
+}
+
 __extension__ extern __inline int8x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vget_low_s8 (int8x16_t __a)
@@ -5772,6 +6203,13 @@  vget_high_p16 (poly16x8_t __a)
   __GET_HIGH (p16);
 }
 
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_high_p64 (poly64x2_t __a)
+{
+  __GET_HIGH (p64);
+}
+
 __extension__ extern __inline int8x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vget_high_s8 (int8x16_t __a)
@@ -5926,6 +6364,13 @@  vcombine_p16 (poly16x4_t __a, poly16x4_t __b)
 						     (int16x4_t) __b);
 }
 
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcombine_p64 (poly64x1_t __a, poly64x1_t __b)
+{
+  return (poly64x2_t) __builtin_aarch64_combinedi_ppp (__a[0], __b[0]);
+}
+
 /* Start of temporary inline asm implementations.  */
 
 __extension__ extern __inline int8x8_t
@@ -10357,6 +10802,8 @@  __ST2_LANE_FUNC (poly8x8x2_t, poly8x16x2_t, poly8_t, v8qi, v16qi, qi, p8,
 		 int8x16_t)
 __ST2_LANE_FUNC (poly16x4x2_t, poly16x8x2_t, poly16_t, v4hi, v8hi, hi, p16,
 		 int16x8_t)
+__ST2_LANE_FUNC (poly64x1x2_t, poly64x2x2_t, poly64_t, di, v2di_ssps, di, p64,
+		 poly64x2_t)
 __ST2_LANE_FUNC (int8x8x2_t, int8x16x2_t, int8_t, v8qi, v16qi, qi, s8,
 		 int8x16_t)
 __ST2_LANE_FUNC (int16x4x2_t, int16x8x2_t, int16_t, v4hi, v8hi, hi, s16,
@@ -10392,6 +10839,7 @@  __ST2_LANE_FUNC (float32x4x2_t, float32_t, v4sf, sf, f32)
 __ST2_LANE_FUNC (float64x2x2_t, float64_t, v2df, df, f64)
 __ST2_LANE_FUNC (poly8x16x2_t, poly8_t, v16qi, qi, p8)
 __ST2_LANE_FUNC (poly16x8x2_t, poly16_t, v8hi, hi, p16)
+__ST2_LANE_FUNC (poly64x2x2_t, poly64_t, v2di, di, p64)
 __ST2_LANE_FUNC (int8x16x2_t, int8_t, v16qi, qi, s8)
 __ST2_LANE_FUNC (int16x8x2_t, int16_t, v8hi, hi, s16)
 __ST2_LANE_FUNC (int32x4x2_t, int32_t, v4si, si, s32)
@@ -10439,6 +10887,8 @@  __ST3_LANE_FUNC (poly8x8x3_t, poly8x16x3_t, poly8_t, v8qi, v16qi, qi, p8,
 		 int8x16_t)
 __ST3_LANE_FUNC (poly16x4x3_t, poly16x8x3_t, poly16_t, v4hi, v8hi, hi, p16,
 		 int16x8_t)
+__ST3_LANE_FUNC (poly64x1x3_t, poly64x2x3_t, poly64_t, di, v2di_ssps, di, p64,
+		 poly64x2_t)
 __ST3_LANE_FUNC (int8x8x3_t, int8x16x3_t, int8_t, v8qi, v16qi, qi, s8,
 		 int8x16_t)
 __ST3_LANE_FUNC (int16x4x3_t, int16x8x3_t, int16_t, v4hi, v8hi, hi, s16,
@@ -10474,6 +10924,7 @@  __ST3_LANE_FUNC (float32x4x3_t, float32_t, v4sf, sf, f32)
 __ST3_LANE_FUNC (float64x2x3_t, float64_t, v2df, df, f64)
 __ST3_LANE_FUNC (poly8x16x3_t, poly8_t, v16qi, qi, p8)
 __ST3_LANE_FUNC (poly16x8x3_t, poly16_t, v8hi, hi, p16)
+__ST3_LANE_FUNC (poly64x2x3_t, poly64_t, v2di, di, p64)
 __ST3_LANE_FUNC (int8x16x3_t, int8_t, v16qi, qi, s8)
 __ST3_LANE_FUNC (int16x8x3_t, int16_t, v8hi, hi, s16)
 __ST3_LANE_FUNC (int32x4x3_t, int32_t, v4si, si, s32)
@@ -10526,6 +10977,8 @@  __ST4_LANE_FUNC (poly8x8x4_t, poly8x16x4_t, poly8_t, v8qi, v16qi, qi, p8,
 		 int8x16_t)
 __ST4_LANE_FUNC (poly16x4x4_t, poly16x8x4_t, poly16_t, v4hi, v8hi, hi, p16,
 		 int16x8_t)
+__ST4_LANE_FUNC (poly64x1x4_t, poly64x2x4_t, poly64_t, di, v2di_ssps, di, p64,
+		 poly64x2_t)
 __ST4_LANE_FUNC (int8x8x4_t, int8x16x4_t, int8_t, v8qi, v16qi, qi, s8,
 		 int8x16_t)
 __ST4_LANE_FUNC (int16x4x4_t, int16x8x4_t, int16_t, v4hi, v8hi, hi, s16,
@@ -10561,6 +11014,7 @@  __ST4_LANE_FUNC (float32x4x4_t, float32_t, v4sf, sf, f32)
 __ST4_LANE_FUNC (float64x2x4_t, float64_t, v2df, df, f64)
 __ST4_LANE_FUNC (poly8x16x4_t, poly8_t, v16qi, qi, p8)
 __ST4_LANE_FUNC (poly16x8x4_t, poly16_t, v8hi, hi, p16)
+__ST4_LANE_FUNC (poly64x2x4_t, poly64_t, v2di, di, p64)
 __ST4_LANE_FUNC (int8x16x4_t, int8_t, v16qi, qi, s8)
 __ST4_LANE_FUNC (int16x8x4_t, int16_t, v8hi, hi, s16)
 __ST4_LANE_FUNC (int32x4x4_t, int32_t, v4si, si, s32)
@@ -13644,6 +14098,15 @@  vcopy_lane_p16 (poly16x4_t __a, const int __lane1,
 				  __a, __lane1);
 }
 
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopy_lane_p64 (poly64x1_t __a, const int __lane1,
+		poly64x1_t __b, const int __lane2)
+{
+  return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+				  __a, __lane1);
+}
+
 __extension__ extern __inline int8x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vcopy_lane_s8 (int8x8_t __a, const int __lane1,
@@ -13754,6 +14217,15 @@  vcopy_laneq_p16 (poly16x4_t __a, const int __lane1,
 				  __a, __lane1);
 }
 
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopy_laneq_p64 (poly64x1_t __a, const int __lane1,
+		 poly64x2_t __b, const int __lane2)
+{
+  return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+				  __a, __lane1);
+}
+
 __extension__ extern __inline int8x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vcopy_laneq_s8 (int8x8_t __a, const int __lane1,
@@ -13864,6 +14336,15 @@  vcopyq_lane_p16 (poly16x8_t __a, const int __lane1,
 				   __a, __lane1);
 }
 
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopyq_lane_p64 (poly64x2_t __a, const int __lane1,
+		 poly64x1_t __b, const int __lane2)
+{
+  return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+				   __a, __lane1);
+}
+
 __extension__ extern __inline int8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vcopyq_lane_s8 (int8x16_t __a, const int __lane1,
@@ -13974,6 +14455,15 @@  vcopyq_laneq_p16 (poly16x8_t __a, const int __lane1,
 				   __a, __lane1);
 }
 
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopyq_laneq_p64 (poly64x2_t __a, const int __lane1,
+		  poly64x2_t __b, const int __lane2)
+{
+  return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+				   __a, __lane1);
+}
+
 __extension__ extern __inline int8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vcopyq_laneq_s8 (int8x16_t __a, const int __lane1,
@@ -14836,6 +15326,13 @@  vdup_n_p16 (poly16_t __a)
   return (poly16x4_t) {__a, __a, __a, __a};
 }
 
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_n_p64 (poly64_t __a)
+{
+  return (poly64x1_t) {__a};
+}
+
 __extension__ extern __inline int8x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vdup_n_s8 (int8_t __a)
@@ -14930,6 +15427,13 @@  vdupq_n_p16 (uint32_t __a)
   return (poly16x8_t) {__a, __a, __a, __a, __a, __a, __a, __a};
 }
 
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_n_p64 (uint64_t __a)
+{
+  return (poly64x2_t) {__a, __a};
+}
+
 __extension__ extern __inline int8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vdupq_n_s8 (int32_t __a)
@@ -15025,6 +15529,13 @@  vdup_lane_p16 (poly16x4_t __a, const int __b)
   return __aarch64_vdup_lane_p16 (__a, __b);
 }
 
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_lane_p64 (poly64x1_t __a, const int __b)
+{
+  return __aarch64_vdup_lane_p64 (__a, __b);
+}
+
 __extension__ extern __inline int8x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vdup_lane_s8 (int8x8_t __a, const int __b)
@@ -15118,6 +15629,13 @@  vdup_laneq_p16 (poly16x8_t __a, const int __b)
   return __aarch64_vdup_laneq_p16 (__a, __b);
 }
 
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_laneq_p64 (poly64x2_t __a, const int __b)
+{
+  return __aarch64_vdup_laneq_p64 (__a, __b);
+}
+
 __extension__ extern __inline int8x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vdup_laneq_s8 (int8x16_t __a, const int __b)
@@ -15211,6 +15729,13 @@  vdupq_lane_p16 (poly16x4_t __a, const int __b)
   return __aarch64_vdupq_lane_p16 (__a, __b);
 }
 
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_lane_p64 (poly64x1_t __a, const int __b)
+{
+  return __aarch64_vdupq_lane_p64 (__a, __b);
+}
+
 __extension__ extern __inline int8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vdupq_lane_s8 (int8x8_t __a, const int __b)
@@ -15304,6 +15829,13 @@  vdupq_laneq_p16 (poly16x8_t __a, const int __b)
   return __aarch64_vdupq_laneq_p16 (__a, __b);
 }
 
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_laneq_p64 (poly64x2_t __a, const int __b)
+{
+  return __aarch64_vdupq_laneq_p64 (__a, __b);
+}
+
 __extension__ extern __inline int8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vdupq_laneq_s8 (int8x16_t __a, const int __b)
@@ -16283,6 +16815,13 @@  vld1_p16 (const poly16_t *a)
     __builtin_aarch64_ld1v4hi ((const __builtin_aarch64_simd_hi *) a);
 }
 
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_p64 (const poly64_t *a)
+{
+  return (poly64x1_t) {*a};
+}
+
 __extension__ extern __inline int8x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vld1_s8 (const int8_t *a)
@@ -16381,6 +16920,14 @@  vld1q_p16 (const poly16_t *a)
     __builtin_aarch64_ld1v8hi ((const __builtin_aarch64_simd_hi *) a);
 }
 
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_p64 (const poly64_t *a)
+{
+  return (poly64x2_t)
+    __builtin_aarch64_ld1v2di ((const __builtin_aarch64_simd_di *) a);
+}
+
 __extension__ extern __inline int8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vld1q_s8 (const int8_t *a)
@@ -16478,6 +17025,13 @@  vld1_dup_p16 (const poly16_t* __a)
   return vdup_n_p16 (*__a);
 }
 
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_dup_p64 (const poly64_t* __a)
+{
+  return vdup_n_p64 (*__a);
+}
+
 __extension__ extern __inline int8x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vld1_dup_s8 (const int8_t* __a)
@@ -16571,7 +17125,14 @@  vld1q_dup_p16 (const poly16_t* __a)
   return vdupq_n_p16 (*__a);
 }
 
-__extension__ extern __inline int8x16_t
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_dup_p64 (const poly64_t* __a)
+{
+  return vdupq_n_p64 (*__a);
+}
+
+ __extension__ extern __inline int8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vld1q_dup_s8 (const int8_t* __a)
 {
@@ -16664,6 +17225,13 @@  vld1_lane_p16 (const poly16_t *__src, poly16x4_t __vec, const int __lane)
   return __aarch64_vset_lane_any (*__src, __vec, __lane);
 }
 
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_lane_p64 (const poly64_t *__src, poly64x1_t __vec, const int __lane)
+{
+  return __aarch64_vset_lane_any (*__src, __vec, __lane);
+}
+
 __extension__ extern __inline int8x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vld1_lane_s8 (const int8_t *__src, int8x8_t __vec, const int __lane)
@@ -16757,6 +17325,13 @@  vld1q_lane_p16 (const poly16_t *__src, poly16x8_t __vec, const int __lane)
   return __aarch64_vset_lane_any (*__src, __vec, __lane);
 }
 
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_lane_p64 (const poly64_t *__src, poly64x2_t __vec, const int __lane)
+{
+  return __aarch64_vset_lane_any (*__src, __vec, __lane);
+}
+
 __extension__ extern __inline int8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vld1q_lane_s8 (const int8_t *__src, int8x16_t __vec, const int __lane)
@@ -16875,6 +17450,18 @@  vld2_p8 (const poly8_t * __a)
   return ret;
 }
 
+__extension__ extern __inline poly64x1x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_p64 (const poly64_t * __a)
+{
+  poly64x1x2_t ret;
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_ld2di ((const __builtin_aarch64_simd_di *) __a);
+  ret.val[0] = (poly64x1_t) __builtin_aarch64_get_dregoidi_pss (__o, 0);
+  ret.val[1] = (poly64x1_t) __builtin_aarch64_get_dregoidi_pss (__o, 1);
+  return ret;
+}
+
 __extension__ extern __inline int16x4x2_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vld2_s16 (const int16_t * __a)
@@ -17019,6 +17606,18 @@  vld2q_p16 (const poly16_t * __a)
   return ret;
 }
 
+__extension__ extern __inline poly64x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2q_p64 (const poly64_t * __a)
+{
+  poly64x2x2_t ret;
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_ld2v2di ((const __builtin_aarch64_simd_di *) __a);
+  ret.val[0] = (poly64x2_t) __builtin_aarch64_get_qregoiv2di_pss (__o, 0);
+  ret.val[1] = (poly64x2_t) __builtin_aarch64_get_qregoiv2di_pss (__o, 1);
+  return ret;
+}
+
 __extension__ extern __inline int32x4x2_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vld2q_s32 (const int32_t * __a)
@@ -17296,6 +17895,19 @@  vld3_f32 (const float32_t * __a)
   return ret;
 }
 
+__extension__ extern __inline poly64x1x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_p64 (const poly64_t * __a)
+{
+  poly64x1x3_t ret;
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_ld3di ((const __builtin_aarch64_simd_di *) __a);
+  ret.val[0] = (poly64x1_t) __builtin_aarch64_get_dregcidi_pss (__o, 0);
+  ret.val[1] = (poly64x1_t) __builtin_aarch64_get_dregcidi_pss (__o, 1);
+  ret.val[2] = (poly64x1_t) __builtin_aarch64_get_dregcidi_pss (__o, 2);
+  return ret;
+}
+
 __extension__ extern __inline int8x16x3_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vld3q_s8 (const int8_t * __a)
@@ -17465,6 +18077,19 @@  vld3q_f64 (const float64_t * __a)
   return ret;
 }
 
+__extension__ extern __inline poly64x2x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3q_p64 (const poly64_t * __a)
+{
+  poly64x2x3_t ret;
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_ld3v2di ((const __builtin_aarch64_simd_di *) __a);
+  ret.val[0] = (poly64x2_t) __builtin_aarch64_get_qregciv2di_pss (__o, 0);
+  ret.val[1] = (poly64x2_t) __builtin_aarch64_get_qregciv2di_pss (__o, 1);
+  ret.val[2] = (poly64x2_t) __builtin_aarch64_get_qregciv2di_pss (__o, 2);
+  return ret;
+}
+
 __extension__ extern __inline int64x1x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vld4_s64 (const int64_t * __a)
@@ -17647,6 +18272,20 @@  vld4_f32 (const float32_t * __a)
   return ret;
 }
 
+__extension__ extern __inline poly64x1x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_p64 (const poly64_t * __a)
+{
+  poly64x1x4_t  ret;
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_ld4di ((const __builtin_aarch64_simd_di *) __a);
+  ret.val[0] = (poly64x1_t) __builtin_aarch64_get_dregxidi_pss (__o, 0);
+  ret.val[1] = (poly64x1_t) __builtin_aarch64_get_dregxidi_pss (__o, 1);
+  ret.val[2] = (poly64x1_t) __builtin_aarch64_get_dregxidi_pss (__o, 2);
+  ret.val[3] = (poly64x1_t) __builtin_aarch64_get_dregxidi_pss (__o, 3);
+  return ret;
+}
+
 __extension__ extern __inline int8x16x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vld4q_s8 (const int8_t * __a)
@@ -17829,6 +18468,20 @@  vld4q_f64 (const float64_t * __a)
   return ret;
 }
 
+__extension__ extern __inline poly64x2x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4q_p64 (const poly64_t * __a)
+{
+  poly64x2x4_t  ret;
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_ld4v2di ((const __builtin_aarch64_simd_di *) __a);
+  ret.val[0] = (poly64x2_t) __builtin_aarch64_get_qregxiv2di_pss (__o, 0);
+  ret.val[1] = (poly64x2_t) __builtin_aarch64_get_qregxiv2di_pss (__o, 1);
+  ret.val[2] = (poly64x2_t) __builtin_aarch64_get_qregxiv2di_pss (__o, 2);
+  ret.val[3] = (poly64x2_t) __builtin_aarch64_get_qregxiv2di_pss (__o, 3);
+  return ret;
+}
+
 /* vldn_dup */
 
 __extension__ extern __inline int8x8x2_t
@@ -17963,6 +18616,19 @@  vld2_dup_p16 (const poly16_t * __a)
   return ret;
 }
 
+__extension__ extern __inline poly64x1x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_dup_p64 (const poly64_t * __a)
+{
+  poly64x1x2_t ret;
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_ld2rv2di ((const __builtin_aarch64_simd_di *) __a);
+  ret.val[0] = (poly64x1_t) __builtin_aarch64_get_dregoidi_pss (__o, 0);
+  ret.val[1] = (poly64x1_t) __builtin_aarch64_get_dregoidi_pss (__o, 1);
+  return ret;
+}
+
+
 __extension__ extern __inline int64x1x2_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vld2_dup_s64 (const int64_t * __a)
@@ -18143,6 +18809,18 @@  vld2q_dup_f64 (const float64_t * __a)
   return ret;
 }
 
+__extension__ extern __inline poly64x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2q_dup_p64 (const poly64_t * __a)
+{
+  poly64x2x2_t ret;
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_ld2rv2di ((const __builtin_aarch64_simd_di *) __a);
+  ret.val[0] = (poly64x2_t) __builtin_aarch64_get_qregoiv2di_pss (__o, 0);
+  ret.val[1] = (poly64x2_t) __builtin_aarch64_get_qregoiv2di_pss (__o, 1);
+  return ret;
+}
+
 __extension__ extern __inline int64x1x3_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vld3_dup_s64 (const int64_t * __a)
@@ -18312,6 +18990,19 @@  vld3_dup_f32 (const float32_t * __a)
   return ret;
 }
 
+__extension__ extern __inline poly64x1x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_dup_p64 (const poly64_t * __a)
+{
+  poly64x1x3_t ret;
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_ld3rv2di ((const __builtin_aarch64_simd_di *) __a);
+  ret.val[0] = (poly64x1_t) __builtin_aarch64_get_dregcidi_pss (__o, 0);
+  ret.val[1] = (poly64x1_t) __builtin_aarch64_get_dregcidi_pss (__o, 1);
+  ret.val[2] = (poly64x1_t) __builtin_aarch64_get_dregcidi_pss (__o, 2);
+  return ret;
+}
+
 __extension__ extern __inline int8x16x3_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vld3q_dup_s8 (const int8_t * __a)
@@ -18481,6 +19172,19 @@  vld3q_dup_f64 (const float64_t * __a)
   return ret;
 }
 
+__extension__ extern __inline poly64x2x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3q_dup_p64 (const poly64_t * __a)
+{
+  poly64x2x3_t ret;
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_ld3rv2di ((const __builtin_aarch64_simd_di *) __a);
+  ret.val[0] = (poly64x2_t) __builtin_aarch64_get_qregciv2di_pss (__o, 0);
+  ret.val[1] = (poly64x2_t) __builtin_aarch64_get_qregciv2di_pss (__o, 1);
+  ret.val[2] = (poly64x2_t) __builtin_aarch64_get_qregciv2di_pss (__o, 2);
+  return ret;
+}
+
 __extension__ extern __inline int64x1x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vld4_dup_s64 (const int64_t * __a)
@@ -18663,6 +19367,20 @@  vld4_dup_f32 (const float32_t * __a)
   return ret;
 }
 
+__extension__ extern __inline poly64x1x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_dup_p64 (const poly64_t * __a)
+{
+  poly64x1x4_t ret;
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_ld4rv2di ((const __builtin_aarch64_simd_di *) __a);
+  ret.val[0] = (poly64x1_t) __builtin_aarch64_get_dregxidi_pss (__o, 0);
+  ret.val[1] = (poly64x1_t) __builtin_aarch64_get_dregxidi_pss (__o, 1);
+  ret.val[2] = (poly64x1_t) __builtin_aarch64_get_dregxidi_pss (__o, 2);
+  ret.val[3] = (poly64x1_t) __builtin_aarch64_get_dregxidi_pss (__o, 3);
+  return ret;
+}
+
 __extension__ extern __inline int8x16x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vld4q_dup_s8 (const int8_t * __a)
@@ -18845,6 +19563,20 @@  vld4q_dup_f64 (const float64_t * __a)
   return ret;
 }
 
+__extension__ extern __inline poly64x2x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4q_dup_p64 (const poly64_t * __a)
+{
+  poly64x2x4_t ret;
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_ld4rv2di ((const __builtin_aarch64_simd_di *) __a);
+  ret.val[0] = (poly64x2_t) __builtin_aarch64_get_qregxiv2di_pss (__o, 0);
+  ret.val[1] = (poly64x2_t) __builtin_aarch64_get_qregxiv2di_pss (__o, 1);
+  ret.val[2] = (poly64x2_t) __builtin_aarch64_get_qregxiv2di_pss (__o, 2);
+  ret.val[3] = (poly64x2_t) __builtin_aarch64_get_qregxiv2di_pss (__o, 3);
+  return ret;
+}
+
 /* vld2_lane */
 
 #define __LD2_LANE_FUNC(intype, vectype, largetype, ptrtype, mode,	   \
@@ -18882,6 +19614,8 @@  __LD2_LANE_FUNC (poly8x8x2_t, poly8x8_t, poly8x16x2_t, poly8_t, v8qi, v16qi, qi,
 		 int8x16_t)
 __LD2_LANE_FUNC (poly16x4x2_t, poly16x4_t, poly16x8x2_t, poly16_t, v4hi, v8hi, hi,
 		 p16, int16x8_t)
+__LD2_LANE_FUNC (poly64x1x2_t, poly64x1_t, poly64x2x2_t, poly64_t, di,
+		 v2di_ssps, di, p64, poly64x2_t)
 __LD2_LANE_FUNC (int8x8x2_t, int8x8_t, int8x16x2_t, int8_t, v8qi, v16qi, qi, s8,
 		 int8x16_t)
 __LD2_LANE_FUNC (int16x4x2_t, int16x4_t, int16x8x2_t, int16_t, v4hi, v8hi, hi, s16,
@@ -18924,6 +19658,7 @@  __LD2_LANE_FUNC (float32x4x2_t, float32x4_t, float32_t, v4sf, sf, f32)
 __LD2_LANE_FUNC (float64x2x2_t, float64x2_t, float64_t, v2df, df, f64)
 __LD2_LANE_FUNC (poly8x16x2_t, poly8x16_t, poly8_t, v16qi, qi, p8)
 __LD2_LANE_FUNC (poly16x8x2_t, poly16x8_t, poly16_t, v8hi, hi, p16)
+__LD2_LANE_FUNC (poly64x2x2_t, poly64x2_t, poly64_t, v2di, di, p64)
 __LD2_LANE_FUNC (int8x16x2_t, int8x16_t, int8_t, v16qi, qi, s8)
 __LD2_LANE_FUNC (int16x8x2_t, int16x8_t, int16_t, v8hi, hi, s16)
 __LD2_LANE_FUNC (int32x4x2_t, int32x4_t, int32_t, v4si, si, s32)
@@ -18978,6 +19713,8 @@  __LD3_LANE_FUNC (poly8x8x3_t, poly8x8_t, poly8x16x3_t, poly8_t, v8qi, v16qi, qi,
 		 int8x16_t)
 __LD3_LANE_FUNC (poly16x4x3_t, poly16x4_t, poly16x8x3_t, poly16_t, v4hi, v8hi, hi,
 		 p16, int16x8_t)
+__LD3_LANE_FUNC (poly64x1x3_t, poly64x1_t, poly64x2x3_t, poly64_t, di,
+		 v2di_ssps, di, p64, poly64x2_t)
 __LD3_LANE_FUNC (int8x8x3_t, int8x8_t, int8x16x3_t, int8_t, v8qi, v16qi, qi, s8,
 		 int8x16_t)
 __LD3_LANE_FUNC (int16x4x3_t, int16x4_t, int16x8x3_t, int16_t, v4hi, v8hi, hi, s16,
@@ -19022,6 +19759,7 @@  __LD3_LANE_FUNC (float32x4x3_t, float32x4_t, float32_t, v4sf, sf, f32)
 __LD3_LANE_FUNC (float64x2x3_t, float64x2_t, float64_t, v2df, df, f64)
 __LD3_LANE_FUNC (poly8x16x3_t, poly8x16_t, poly8_t, v16qi, qi, p8)
 __LD3_LANE_FUNC (poly16x8x3_t, poly16x8_t, poly16_t, v8hi, hi, p16)
+__LD3_LANE_FUNC (poly64x2x3_t, poly64x2_t, poly64_t, v2di, di, p64)
 __LD3_LANE_FUNC (int8x16x3_t, int8x16_t, int8_t, v16qi, qi, s8)
 __LD3_LANE_FUNC (int16x8x3_t, int16x8_t, int16_t, v8hi, hi, s16)
 __LD3_LANE_FUNC (int32x4x3_t, int32x4_t, int32_t, v4si, si, s32)
@@ -19084,6 +19822,8 @@  __LD4_LANE_FUNC (poly8x8x4_t, poly8x8_t, poly8x16x4_t, poly8_t, v8qi, v16qi, qi,
 		 int8x16_t)
 __LD4_LANE_FUNC (poly16x4x4_t, poly16x4_t, poly16x8x4_t, poly16_t, v4hi, v8hi, hi,
 		 p16, int16x8_t)
+__LD4_LANE_FUNC (poly64x1x4_t, poly64x1_t, poly64x2x4_t, poly64_t, di,
+		 v2di_ssps, di, p64, poly64x2_t)
 __LD4_LANE_FUNC (int8x8x4_t, int8x8_t, int8x16x4_t, int8_t, v8qi, v16qi, qi, s8,
 		 int8x16_t)
 __LD4_LANE_FUNC (int16x4x4_t, int16x4_t, int16x8x4_t, int16_t, v4hi, v8hi, hi, s16,
@@ -19130,6 +19870,7 @@  __LD4_LANE_FUNC (float32x4x4_t, float32x4_t, float32_t, v4sf, sf, f32)
 __LD4_LANE_FUNC (float64x2x4_t, float64x2_t, float64_t, v2df, df, f64)
 __LD4_LANE_FUNC (poly8x16x4_t, poly8x16_t, poly8_t, v16qi, qi, p8)
 __LD4_LANE_FUNC (poly16x8x4_t, poly16x8_t, poly16_t, v8hi, hi, p16)
+__LD4_LANE_FUNC (poly64x2x4_t, poly64x2_t, poly64_t, v2di, di, p64)
 __LD4_LANE_FUNC (int8x16x4_t, int8x16_t, int8_t, v16qi, qi, s8)
 __LD4_LANE_FUNC (int16x8x4_t, int16x8_t, int16_t, v8hi, hi, s16)
 __LD4_LANE_FUNC (int32x4x4_t, int32x4_t, int32_t, v4si, si, s32)
@@ -20596,6 +21337,13 @@  vmov_n_p16 (poly16_t __a)
   return vdup_n_p16 (__a);
 }
 
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmov_n_p64 (poly64_t __a)
+{
+  return vdup_n_p64 (__a);
+}
+
 __extension__ extern __inline int8x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vmov_n_s8 (int8_t __a)
@@ -20687,6 +21435,13 @@  vmovq_n_p16 (poly16_t __a)
   return vdupq_n_p16 (__a);
 }
 
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmovq_n_p64 (poly64_t __a)
+{
+  return vdupq_n_p64 (__a);
+}
+
 __extension__ extern __inline int8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vmovq_n_s8 (int8_t __a)
@@ -25275,6 +26030,13 @@  vsli_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
   return (uint64x1_t) {__builtin_aarch64_usli_ndi_uuus (__a[0], __b[0], __c)};
 }
 
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsli_n_p64 (poly64x1_t __a, poly64x1_t __b, const int __c)
+{
+  return (poly64x1_t) {__builtin_aarch64_ssli_ndi_ppps (__a[0], __b[0], __c)};
+}
+
 __extension__ extern __inline int8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vsliq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
@@ -25331,6 +26093,13 @@  vsliq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
   return __builtin_aarch64_usli_nv2di_uuus (__a, __b, __c);
 }
 
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsliq_n_p64 (poly64x2_t __a, poly64x2_t __b, const int __c)
+{
+  return __builtin_aarch64_ssli_nv2di_ppps (__a, __b, __c);
+}
+
 __extension__ extern __inline int64_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vslid_n_s64 (int64_t __a, int64_t __b, const int __c)
@@ -25757,6 +26526,13 @@  vst1_p16 (poly16_t *a, poly16x4_t b)
 
 __extension__ extern __inline void
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1_p64 (poly64_t *a, poly64x1_t b)
+{
+  *a = b[0];
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vst1_s8 (int8_t *a, int8x8_t b)
 {
   __builtin_aarch64_st1v8qi ((__builtin_aarch64_simd_qi *) a, b);
@@ -25855,6 +26631,14 @@  vst1q_p16 (poly16_t *a, poly16x8_t b)
 
 __extension__ extern __inline void
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1q_p64 (poly64_t *a, poly64x2_t b)
+{
+  __builtin_aarch64_st1v2di_sp ((__builtin_aarch64_simd_di *) a,
+				(poly64x2_t) b);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vst1q_s8 (int8_t *a, int8x16_t b)
 {
   __builtin_aarch64_st1v16qi ((__builtin_aarch64_simd_qi *) a, b);
@@ -25952,6 +26736,13 @@  vst1_lane_p16 (poly16_t *__a, poly16x4_t __b, const int __lane)
 
 __extension__ extern __inline void
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1_lane_p64 (poly64_t *__a, poly64x1_t __b, const int __lane)
+{
+  *__a = __aarch64_vget_lane_any (__b, __lane);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vst1_lane_s8 (int8_t *__a, int8x8_t __b, const int __lane)
 {
   *__a = __aarch64_vget_lane_any (__b, __lane);
@@ -26045,6 +26836,13 @@  vst1q_lane_p16 (poly16_t *__a, poly16x8_t __b, const int __lane)
 
 __extension__ extern __inline void
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1q_lane_p64 (poly64_t *__a, poly64x2_t __b, const int __lane)
+{
+  *__a = __aarch64_vget_lane_any (__b, __lane);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vst1q_lane_s8 (int8_t *__a, int8x16_t __b, const int __lane)
 {
   *__a = __aarch64_vget_lane_any (__b, __lane);
@@ -26272,6 +27070,21 @@  vst2_f32 (float32_t * __a, float32x2x2_t val)
 
 __extension__ extern __inline void
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2_p64 (poly64_t * __a, poly64x1x2_t val)
+{
+  __builtin_aarch64_simd_oi __o;
+  poly64x2x2_t temp;
+  temp.val[0] = vcombine_p64 (val.val[0], vcreate_p64 (__AARCH64_UINT64_C (0)));
+  temp.val[1] = vcombine_p64 (val.val[1], vcreate_p64 (__AARCH64_UINT64_C (0)));
+  __o = __builtin_aarch64_set_qregoiv2di_ssps (__o,
+					       (poly64x2_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregoiv2di_ssps (__o,
+					       (poly64x2_t) temp.val[1], 1);
+  __builtin_aarch64_st2di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vst2q_s8 (int8_t * __a, int8x16x2_t val)
 {
   __builtin_aarch64_simd_oi __o;
@@ -26402,6 +27215,18 @@  vst2q_f64 (float64_t * __a, float64x2x2_t val)
 
 __extension__ extern __inline void
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2q_p64 (poly64_t * __a, poly64x2x2_t val)
+{
+  __builtin_aarch64_simd_oi __o;
+  __o = __builtin_aarch64_set_qregoiv2di_ssps (__o,
+					       (poly64x2_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregoiv2di_ssps (__o,
+					       (poly64x2_t) val.val[1], 1);
+  __builtin_aarch64_st2v2di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vst3_s64 (int64_t * __a, int64x1x3_t val)
 {
   __builtin_aarch64_simd_ci __o;
@@ -26597,6 +27422,24 @@  vst3_f32 (float32_t * __a, float32x2x3_t val)
 
 __extension__ extern __inline void
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3_p64 (poly64_t * __a, poly64x1x3_t val)
+{
+  __builtin_aarch64_simd_ci __o;
+  poly64x2x3_t temp;
+  temp.val[0] = vcombine_p64 (val.val[0], vcreate_p64 (__AARCH64_UINT64_C (0)));
+  temp.val[1] = vcombine_p64 (val.val[1], vcreate_p64 (__AARCH64_UINT64_C (0)));
+  temp.val[2] = vcombine_p64 (val.val[2], vcreate_p64 (__AARCH64_UINT64_C (0)));
+  __o = __builtin_aarch64_set_qregciv2di_ssps (__o,
+					       (poly64x2_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregciv2di_ssps (__o,
+					       (poly64x2_t) temp.val[1], 1);
+  __o = __builtin_aarch64_set_qregciv2di_ssps (__o,
+					       (poly64x2_t) temp.val[2], 2);
+  __builtin_aarch64_st3di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vst3q_s8 (int8_t * __a, int8x16x3_t val)
 {
   __builtin_aarch64_simd_ci __o;
@@ -26740,6 +27583,20 @@  vst3q_f64 (float64_t * __a, float64x2x3_t val)
 
 __extension__ extern __inline void
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3q_p64 (poly64_t * __a, poly64x2x3_t val)
+{
+  __builtin_aarch64_simd_ci __o;
+  __o = __builtin_aarch64_set_qregciv2di_ssps (__o,
+					       (poly64x2_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregciv2di_ssps (__o,
+					       (poly64x2_t) val.val[1], 1);
+  __o = __builtin_aarch64_set_qregciv2di_ssps (__o,
+					       (poly64x2_t) val.val[2], 2);
+  __builtin_aarch64_st3v2di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vst4_s64 (int64_t * __a, int64x1x4_t val)
 {
   __builtin_aarch64_simd_xi __o;
@@ -26961,6 +27818,27 @@  vst4_f32 (float32_t * __a, float32x2x4_t val)
 
 __extension__ extern __inline void
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4_p64 (poly64_t * __a, poly64x1x4_t val)
+{
+  __builtin_aarch64_simd_xi __o;
+  poly64x2x4_t temp;
+  temp.val[0] = vcombine_p64 (val.val[0], vcreate_p64 (__AARCH64_UINT64_C (0)));
+  temp.val[1] = vcombine_p64 (val.val[1], vcreate_p64 (__AARCH64_UINT64_C (0)));
+  temp.val[2] = vcombine_p64 (val.val[2], vcreate_p64 (__AARCH64_UINT64_C (0)));
+  temp.val[3] = vcombine_p64 (val.val[3], vcreate_p64 (__AARCH64_UINT64_C (0)));
+  __o = __builtin_aarch64_set_qregxiv2di_ssps (__o,
+					       (poly64x2_t) temp.val[0], 0);
+  __o = __builtin_aarch64_set_qregxiv2di_ssps (__o,
+					       (poly64x2_t) temp.val[1], 1);
+  __o = __builtin_aarch64_set_qregxiv2di_ssps (__o,
+					       (poly64x2_t) temp.val[2], 2);
+  __o = __builtin_aarch64_set_qregxiv2di_ssps (__o,
+					       (poly64x2_t) temp.val[3], 3);
+  __builtin_aarch64_st4di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vst4q_s8 (int8_t * __a, int8x16x4_t val)
 {
   __builtin_aarch64_simd_xi __o;
@@ -27115,6 +27993,22 @@  vst4q_f64 (float64_t * __a, float64x2x4_t val)
   __builtin_aarch64_st4v2df ((__builtin_aarch64_simd_df *) __a, __o);
 }
 
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4q_p64 (poly64_t * __a, poly64x2x4_t val)
+{
+  __builtin_aarch64_simd_xi __o;
+  __o = __builtin_aarch64_set_qregxiv2di_ssps (__o,
+					       (poly64x2_t) val.val[0], 0);
+  __o = __builtin_aarch64_set_qregxiv2di_ssps (__o,
+					       (poly64x2_t) val.val[1], 1);
+  __o = __builtin_aarch64_set_qregxiv2di_ssps (__o,
+					       (poly64x2_t) val.val[2], 2);
+  __o = __builtin_aarch64_set_qregxiv2di_ssps (__o,
+					       (poly64x2_t) val.val[3], 3);
+  __builtin_aarch64_st4v2di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
 /* vsub */
 
 __extension__ extern __inline int64_t