/*===---- arm_neon.h - ARM Neon intrinsics ---------------------------------=== * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * *===-----------------------------------------------------------------------=== */ #ifndef __ARM_NEON_H #define __ARM_NEON_H #ifndef __ARM_FP #error "NEON intrinsics not available with the soft-float ABI. Please use -mfloat-abi=softfp or -mfloat-abi=hard" #else #if !defined(__ARM_NEON) #error "NEON support not enabled" #else #include #ifdef __ARM_FEATURE_BF16 #include typedef __bf16 bfloat16_t; #endif typedef float float32_t; typedef __fp16 float16_t; #ifdef __aarch64__ typedef double float64_t; #endif #ifdef __aarch64__ typedef uint8_t poly8_t; typedef uint16_t poly16_t; typedef uint64_t poly64_t; typedef __uint128_t poly128_t; #else typedef int8_t poly8_t; typedef int16_t poly16_t; typedef int64_t poly64_t; #endif typedef __attribute__((neon_vector_type(8))) int8_t int8x8_t; typedef __attribute__((neon_vector_type(16))) int8_t int8x16_t; typedef __attribute__((neon_vector_type(4))) int16_t int16x4_t; typedef __attribute__((neon_vector_type(8))) int16_t int16x8_t; typedef __attribute__((neon_vector_type(2))) int32_t int32x2_t; typedef __attribute__((neon_vector_type(4))) int32_t int32x4_t; typedef __attribute__((neon_vector_type(1))) int64_t int64x1_t; typedef __attribute__((neon_vector_type(2))) int64_t int64x2_t; typedef __attribute__((neon_vector_type(8))) uint8_t uint8x8_t; typedef __attribute__((neon_vector_type(16))) uint8_t uint8x16_t; typedef __attribute__((neon_vector_type(4))) uint16_t uint16x4_t; typedef __attribute__((neon_vector_type(8))) uint16_t uint16x8_t; typedef __attribute__((neon_vector_type(2))) uint32_t uint32x2_t; typedef __attribute__((neon_vector_type(4))) uint32_t uint32x4_t; typedef __attribute__((neon_vector_type(1))) uint64_t uint64x1_t; typedef __attribute__((neon_vector_type(2))) uint64_t uint64x2_t; typedef __attribute__((neon_vector_type(4))) float16_t float16x4_t; typedef __attribute__((neon_vector_type(8))) float16_t float16x8_t; typedef __attribute__((neon_vector_type(2))) float32_t float32x2_t; typedef __attribute__((neon_vector_type(4))) float32_t float32x4_t; #ifdef __aarch64__ typedef __attribute__((neon_vector_type(1))) float64_t float64x1_t; typedef __attribute__((neon_vector_type(2))) float64_t float64x2_t; #endif typedef __attribute__((neon_polyvector_type(8))) poly8_t poly8x8_t; typedef __attribute__((neon_polyvector_type(16))) poly8_t poly8x16_t; typedef __attribute__((neon_polyvector_type(4))) poly16_t poly16x4_t; typedef __attribute__((neon_polyvector_type(8))) poly16_t poly16x8_t; typedef __attribute__((neon_polyvector_type(1))) poly64_t poly64x1_t; typedef __attribute__((neon_polyvector_type(2))) poly64_t poly64x2_t; typedef struct int8x8x2_t { int8x8_t val[2]; } int8x8x2_t; typedef struct int8x16x2_t { int8x16_t val[2]; } int8x16x2_t; typedef struct int16x4x2_t { int16x4_t val[2]; } int16x4x2_t; typedef struct int16x8x2_t { int16x8_t val[2]; } int16x8x2_t; typedef struct int32x2x2_t { int32x2_t val[2]; } int32x2x2_t; typedef struct int32x4x2_t { int32x4_t val[2]; } int32x4x2_t; typedef struct int64x1x2_t { int64x1_t val[2]; } int64x1x2_t; typedef struct int64x2x2_t { int64x2_t val[2]; } int64x2x2_t; typedef struct uint8x8x2_t { uint8x8_t val[2]; } uint8x8x2_t; typedef struct uint8x16x2_t { uint8x16_t val[2]; } uint8x16x2_t; typedef struct uint16x4x2_t { uint16x4_t val[2]; } uint16x4x2_t; typedef struct uint16x8x2_t { uint16x8_t val[2]; } uint16x8x2_t; typedef struct uint32x2x2_t { uint32x2_t val[2]; } uint32x2x2_t; typedef struct uint32x4x2_t { uint32x4_t val[2]; } uint32x4x2_t; typedef struct uint64x1x2_t { uint64x1_t val[2]; } uint64x1x2_t; typedef struct uint64x2x2_t { uint64x2_t val[2]; } uint64x2x2_t; typedef struct float16x4x2_t { float16x4_t val[2]; } float16x4x2_t; typedef struct float16x8x2_t { float16x8_t val[2]; } float16x8x2_t; typedef struct float32x2x2_t { float32x2_t val[2]; } float32x2x2_t; typedef struct float32x4x2_t { float32x4_t val[2]; } float32x4x2_t; #ifdef __aarch64__ typedef struct float64x1x2_t { float64x1_t val[2]; } float64x1x2_t; typedef struct float64x2x2_t { float64x2_t val[2]; } float64x2x2_t; #endif typedef struct poly8x8x2_t { poly8x8_t val[2]; } poly8x8x2_t; typedef struct poly8x16x2_t { poly8x16_t val[2]; } poly8x16x2_t; typedef struct poly16x4x2_t { poly16x4_t val[2]; } poly16x4x2_t; typedef struct poly16x8x2_t { poly16x8_t val[2]; } poly16x8x2_t; typedef struct poly64x1x2_t { poly64x1_t val[2]; } poly64x1x2_t; typedef struct poly64x2x2_t { poly64x2_t val[2]; } poly64x2x2_t; typedef struct int8x8x3_t { int8x8_t val[3]; } int8x8x3_t; typedef struct int8x16x3_t { int8x16_t val[3]; } int8x16x3_t; typedef struct int16x4x3_t { int16x4_t val[3]; } int16x4x3_t; typedef struct int16x8x3_t { int16x8_t val[3]; } int16x8x3_t; typedef struct int32x2x3_t { int32x2_t val[3]; } int32x2x3_t; typedef struct int32x4x3_t { int32x4_t val[3]; } int32x4x3_t; typedef struct int64x1x3_t { int64x1_t val[3]; } int64x1x3_t; typedef struct int64x2x3_t { int64x2_t val[3]; } int64x2x3_t; typedef struct uint8x8x3_t { uint8x8_t val[3]; } uint8x8x3_t; typedef struct uint8x16x3_t { uint8x16_t val[3]; } uint8x16x3_t; typedef struct uint16x4x3_t { uint16x4_t val[3]; } uint16x4x3_t; typedef struct uint16x8x3_t { uint16x8_t val[3]; } uint16x8x3_t; typedef struct uint32x2x3_t { uint32x2_t val[3]; } uint32x2x3_t; typedef struct uint32x4x3_t { uint32x4_t val[3]; } uint32x4x3_t; typedef struct uint64x1x3_t { uint64x1_t val[3]; } uint64x1x3_t; typedef struct uint64x2x3_t { uint64x2_t val[3]; } uint64x2x3_t; typedef struct float16x4x3_t { float16x4_t val[3]; } float16x4x3_t; typedef struct float16x8x3_t { float16x8_t val[3]; } float16x8x3_t; typedef struct float32x2x3_t { float32x2_t val[3]; } float32x2x3_t; typedef struct float32x4x3_t { float32x4_t val[3]; } float32x4x3_t; #ifdef __aarch64__ typedef struct float64x1x3_t { float64x1_t val[3]; } float64x1x3_t; typedef struct float64x2x3_t { float64x2_t val[3]; } float64x2x3_t; #endif typedef struct poly8x8x3_t { poly8x8_t val[3]; } poly8x8x3_t; typedef struct poly8x16x3_t { poly8x16_t val[3]; } poly8x16x3_t; typedef struct poly16x4x3_t { poly16x4_t val[3]; } poly16x4x3_t; typedef struct poly16x8x3_t { poly16x8_t val[3]; } poly16x8x3_t; typedef struct poly64x1x3_t { poly64x1_t val[3]; } poly64x1x3_t; typedef struct poly64x2x3_t { poly64x2_t val[3]; } poly64x2x3_t; typedef struct int8x8x4_t { int8x8_t val[4]; } int8x8x4_t; typedef struct int8x16x4_t { int8x16_t val[4]; } int8x16x4_t; typedef struct int16x4x4_t { int16x4_t val[4]; } int16x4x4_t; typedef struct int16x8x4_t { int16x8_t val[4]; } int16x8x4_t; typedef struct int32x2x4_t { int32x2_t val[4]; } int32x2x4_t; typedef struct int32x4x4_t { int32x4_t val[4]; } int32x4x4_t; typedef struct int64x1x4_t { int64x1_t val[4]; } int64x1x4_t; typedef struct int64x2x4_t { int64x2_t val[4]; } int64x2x4_t; typedef struct uint8x8x4_t { uint8x8_t val[4]; } uint8x8x4_t; typedef struct uint8x16x4_t { uint8x16_t val[4]; } uint8x16x4_t; typedef struct uint16x4x4_t { uint16x4_t val[4]; } uint16x4x4_t; typedef struct uint16x8x4_t { uint16x8_t val[4]; } uint16x8x4_t; typedef struct uint32x2x4_t { uint32x2_t val[4]; } uint32x2x4_t; typedef struct uint32x4x4_t { uint32x4_t val[4]; } uint32x4x4_t; typedef struct uint64x1x4_t { uint64x1_t val[4]; } uint64x1x4_t; typedef struct uint64x2x4_t { uint64x2_t val[4]; } uint64x2x4_t; typedef struct float16x4x4_t { float16x4_t val[4]; } float16x4x4_t; typedef struct float16x8x4_t { float16x8_t val[4]; } float16x8x4_t; typedef struct float32x2x4_t { float32x2_t val[4]; } float32x2x4_t; typedef struct float32x4x4_t { float32x4_t val[4]; } float32x4x4_t; #ifdef __aarch64__ typedef struct float64x1x4_t { float64x1_t val[4]; } float64x1x4_t; typedef struct float64x2x4_t { float64x2_t val[4]; } float64x2x4_t; #endif typedef struct poly8x8x4_t { poly8x8_t val[4]; } poly8x8x4_t; typedef struct poly8x16x4_t { poly8x16_t val[4]; } poly8x16x4_t; typedef struct poly16x4x4_t { poly16x4_t val[4]; } poly16x4x4_t; typedef struct poly16x8x4_t { poly16x8_t val[4]; } poly16x8x4_t; typedef struct poly64x1x4_t { poly64x1_t val[4]; } poly64x1x4_t; typedef struct poly64x2x4_t { poly64x2_t val[4]; } poly64x2x4_t; #ifdef __ARM_FEATURE_BF16 typedef __attribute__((neon_vector_type(4))) bfloat16_t bfloat16x4_t; typedef __attribute__((neon_vector_type(8))) bfloat16_t bfloat16x8_t; typedef struct bfloat16x4x2_t { bfloat16x4_t val[2]; } bfloat16x4x2_t; typedef struct bfloat16x8x2_t { bfloat16x8_t val[2]; } bfloat16x8x2_t; typedef struct bfloat16x4x3_t { bfloat16x4_t val[3]; } bfloat16x4x3_t; typedef struct bfloat16x8x3_t { bfloat16x8_t val[3]; } bfloat16x8x3_t; typedef struct bfloat16x4x4_t { bfloat16x4_t val[4]; } bfloat16x4x4_t; typedef struct bfloat16x8x4_t { bfloat16x8_t val[4]; } bfloat16x8x4_t; #endif #define __ai static __inline__ __attribute__((__always_inline__, __nodebug__)) #ifdef __LITTLE_ENDIAN__ #define splat_lane_p8(__p0, __p1) __extension__ ({ \ poly8x8_t __ret; \ poly8x8_t __s0 = __p0; \ __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 4); \ __ret; \ }) #else #define splat_lane_p8(__p0, __p1) __extension__ ({ \ poly8x8_t __ret; \ poly8x8_t __s0 = __p0; \ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 4); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splat_lane_p8(__p0, __p1) __extension__ ({ \ poly8x8_t __ret; \ poly8x8_t __s0 = __p0; \ __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 4); \ __ret; \ }) #endif #define splat_lane_p64(__p0, __p1) __extension__ ({ \ poly64x1_t __ret; \ poly64x1_t __s0 = __p0; \ __ret = (poly64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 6); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define splat_lane_p16(__p0, __p1) __extension__ ({ \ poly16x4_t __ret; \ poly16x4_t __s0 = __p0; \ __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 5); \ __ret; \ }) #else #define splat_lane_p16(__p0, __p1) __extension__ ({ \ poly16x4_t __ret; \ poly16x4_t __s0 = __p0; \ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 5); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splat_lane_p16(__p0, __p1) __extension__ ({ \ poly16x4_t __ret; \ poly16x4_t __s0 = __p0; \ __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 5); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splatq_lane_p8(__p0, __p1) __extension__ ({ \ poly8x16_t __ret; \ poly8x8_t __s0 = __p0; \ __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 4); \ __ret; \ }) #else #define splatq_lane_p8(__p0, __p1) __extension__ ({ \ poly8x16_t __ret; \ poly8x8_t __s0 = __p0; \ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 4); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splatq_lane_p8(__p0, __p1) __extension__ ({ \ poly8x16_t __ret; \ poly8x8_t __s0 = __p0; \ __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 4); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splatq_lane_p64(__p0, __p1) __extension__ ({ \ poly64x2_t __ret; \ poly64x1_t __s0 = __p0; \ __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \ __ret; \ }) #else #define splatq_lane_p64(__p0, __p1) __extension__ ({ \ poly64x2_t __ret; \ poly64x1_t __s0 = __p0; \ __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_splatq_lane_p64(__p0, __p1) __extension__ ({ \ poly64x2_t __ret; \ poly64x1_t __s0 = __p0; \ __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splatq_lane_p16(__p0, __p1) __extension__ ({ \ poly16x8_t __ret; \ poly16x4_t __s0 = __p0; \ __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 5); \ __ret; \ }) #else #define splatq_lane_p16(__p0, __p1) __extension__ ({ \ poly16x8_t __ret; \ poly16x4_t __s0 = __p0; \ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 5); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splatq_lane_p16(__p0, __p1) __extension__ ({ \ poly16x8_t __ret; \ poly16x4_t __s0 = __p0; \ __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 5); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splatq_lane_u8(__p0, __p1) __extension__ ({ \ uint8x16_t __ret; \ uint8x8_t __s0 = __p0; \ __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 16); \ __ret; \ }) #else #define splatq_lane_u8(__p0, __p1) __extension__ ({ \ uint8x16_t __ret; \ uint8x8_t __s0 = __p0; \ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 16); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splatq_lane_u8(__p0, __p1) __extension__ ({ \ uint8x16_t __ret; \ uint8x8_t __s0 = __p0; \ __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 16); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splatq_lane_u32(__p0, __p1) __extension__ ({ \ uint32x4_t __ret; \ uint32x2_t __s0 = __p0; \ __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 18); \ __ret; \ }) #else #define splatq_lane_u32(__p0, __p1) __extension__ ({ \ uint32x4_t __ret; \ uint32x2_t __s0 = __p0; \ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 18); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splatq_lane_u32(__p0, __p1) __extension__ ({ \ uint32x4_t __ret; \ uint32x2_t __s0 = __p0; \ __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 18); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splatq_lane_u64(__p0, __p1) __extension__ ({ \ uint64x2_t __ret; \ uint64x1_t __s0 = __p0; \ __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \ __ret; \ }) #else #define splatq_lane_u64(__p0, __p1) __extension__ ({ \ uint64x2_t __ret; \ uint64x1_t __s0 = __p0; \ __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_splatq_lane_u64(__p0, __p1) __extension__ ({ \ uint64x2_t __ret; \ uint64x1_t __s0 = __p0; \ __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splatq_lane_u16(__p0, __p1) __extension__ ({ \ uint16x8_t __ret; \ uint16x4_t __s0 = __p0; \ __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 17); \ __ret; \ }) #else #define splatq_lane_u16(__p0, __p1) __extension__ ({ \ uint16x8_t __ret; \ uint16x4_t __s0 = __p0; \ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 17); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splatq_lane_u16(__p0, __p1) __extension__ ({ \ uint16x8_t __ret; \ uint16x4_t __s0 = __p0; \ __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 17); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splatq_lane_s8(__p0, __p1) __extension__ ({ \ int8x16_t __ret; \ int8x8_t __s0 = __p0; \ __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 0); \ __ret; \ }) #else #define splatq_lane_s8(__p0, __p1) __extension__ ({ \ int8x16_t __ret; \ int8x8_t __s0 = __p0; \ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 0); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splatq_lane_s8(__p0, __p1) __extension__ ({ \ int8x16_t __ret; \ int8x8_t __s0 = __p0; \ __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splatq_lane_f64(__p0, __p1) __extension__ ({ \ float64x2_t __ret; \ float64x1_t __s0 = __p0; \ __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \ __ret; \ }) #else #define splatq_lane_f64(__p0, __p1) __extension__ ({ \ float64x2_t __ret; \ float64x1_t __s0 = __p0; \ __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_splatq_lane_f64(__p0, __p1) __extension__ ({ \ float64x2_t __ret; \ float64x1_t __s0 = __p0; \ __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splatq_lane_f32(__p0, __p1) __extension__ ({ \ float32x4_t __ret; \ float32x2_t __s0 = __p0; \ __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 9); \ __ret; \ }) #else #define splatq_lane_f32(__p0, __p1) __extension__ ({ \ float32x4_t __ret; \ float32x2_t __s0 = __p0; \ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 9); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splatq_lane_f32(__p0, __p1) __extension__ ({ \ float32x4_t __ret; \ float32x2_t __s0 = __p0; \ __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 9); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splatq_lane_f16(__p0, __p1) __extension__ ({ \ float16x8_t __ret; \ float16x4_t __s0 = __p0; \ __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 8); \ __ret; \ }) #else #define splatq_lane_f16(__p0, __p1) __extension__ ({ \ float16x8_t __ret; \ float16x4_t __s0 = __p0; \ float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 8); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splatq_lane_f16(__p0, __p1) __extension__ ({ \ float16x8_t __ret; \ float16x4_t __s0 = __p0; \ __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 8); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splatq_lane_s32(__p0, __p1) __extension__ ({ \ int32x4_t __ret; \ int32x2_t __s0 = __p0; \ __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 2); \ __ret; \ }) #else #define splatq_lane_s32(__p0, __p1) __extension__ ({ \ int32x4_t __ret; \ int32x2_t __s0 = __p0; \ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 2); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splatq_lane_s32(__p0, __p1) __extension__ ({ \ int32x4_t __ret; \ int32x2_t __s0 = __p0; \ __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 2); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splatq_lane_s64(__p0, __p1) __extension__ ({ \ int64x2_t __ret; \ int64x1_t __s0 = __p0; \ __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \ __ret; \ }) #else #define splatq_lane_s64(__p0, __p1) __extension__ ({ \ int64x2_t __ret; \ int64x1_t __s0 = __p0; \ __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_splatq_lane_s64(__p0, __p1) __extension__ ({ \ int64x2_t __ret; \ int64x1_t __s0 = __p0; \ __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splatq_lane_s16(__p0, __p1) __extension__ ({ \ int16x8_t __ret; \ int16x4_t __s0 = __p0; \ __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 1); \ __ret; \ }) #else #define splatq_lane_s16(__p0, __p1) __extension__ ({ \ int16x8_t __ret; \ int16x4_t __s0 = __p0; \ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 1); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splatq_lane_s16(__p0, __p1) __extension__ ({ \ int16x8_t __ret; \ int16x4_t __s0 = __p0; \ __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splat_lane_u8(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint8x8_t __s0 = __p0; \ __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 16); \ __ret; \ }) #else #define splat_lane_u8(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint8x8_t __s0 = __p0; \ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 16); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splat_lane_u8(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint8x8_t __s0 = __p0; \ __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 16); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splat_lane_u32(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint32x2_t __s0 = __p0; \ __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 18); \ __ret; \ }) #else #define splat_lane_u32(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint32x2_t __s0 = __p0; \ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 18); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_splat_lane_u32(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint32x2_t __s0 = __p0; \ __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 18); \ __ret; \ }) #endif #define splat_lane_u64(__p0, __p1) __extension__ ({ \ uint64x1_t __ret; \ uint64x1_t __s0 = __p0; \ __ret = (uint64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 19); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define splat_lane_u16(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ uint16x4_t __s0 = __p0; \ __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 17); \ __ret; \ }) #else #define splat_lane_u16(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ uint16x4_t __s0 = __p0; \ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 17); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splat_lane_u16(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ uint16x4_t __s0 = __p0; \ __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 17); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splat_lane_s8(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int8x8_t __s0 = __p0; \ __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 0); \ __ret; \ }) #else #define splat_lane_s8(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int8x8_t __s0 = __p0; \ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 0); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splat_lane_s8(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int8x8_t __s0 = __p0; \ __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 0); \ __ret; \ }) #endif #define splat_lane_f64(__p0, __p1) __extension__ ({ \ float64x1_t __ret; \ float64x1_t __s0 = __p0; \ __ret = (float64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 10); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define splat_lane_f32(__p0, __p1) __extension__ ({ \ float32x2_t __ret; \ float32x2_t __s0 = __p0; \ __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 9); \ __ret; \ }) #else #define splat_lane_f32(__p0, __p1) __extension__ ({ \ float32x2_t __ret; \ float32x2_t __s0 = __p0; \ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 9); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_splat_lane_f32(__p0, __p1) __extension__ ({ \ float32x2_t __ret; \ float32x2_t __s0 = __p0; \ __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 9); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splat_lane_f16(__p0, __p1) __extension__ ({ \ float16x4_t __ret; \ float16x4_t __s0 = __p0; \ __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 8); \ __ret; \ }) #else #define splat_lane_f16(__p0, __p1) __extension__ ({ \ float16x4_t __ret; \ float16x4_t __s0 = __p0; \ float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 8); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splat_lane_f16(__p0, __p1) __extension__ ({ \ float16x4_t __ret; \ float16x4_t __s0 = __p0; \ __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 8); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splat_lane_s32(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 2); \ __ret; \ }) #else #define splat_lane_s32(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_splat_lane_s32(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 2); \ __ret; \ }) #endif #define splat_lane_s64(__p0, __p1) __extension__ ({ \ int64x1_t __ret; \ int64x1_t __s0 = __p0; \ __ret = (int64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 3); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define splat_lane_s16(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 1); \ __ret; \ }) #else #define splat_lane_s16(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 1); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splat_lane_s16(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splat_laneq_p8(__p0, __p1) __extension__ ({ \ poly8x8_t __ret; \ poly8x16_t __s0 = __p0; \ __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 36); \ __ret; \ }) #else #define splat_laneq_p8(__p0, __p1) __extension__ ({ \ poly8x8_t __ret; \ poly8x16_t __s0 = __p0; \ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 36); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splat_laneq_p8(__p0, __p1) __extension__ ({ \ poly8x8_t __ret; \ poly8x16_t __s0 = __p0; \ __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 36); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splat_laneq_p64(__p0, __p1) __extension__ ({ \ poly64x1_t __ret; \ poly64x2_t __s0 = __p0; \ __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 38); \ __ret; \ }) #else #define splat_laneq_p64(__p0, __p1) __extension__ ({ \ poly64x1_t __ret; \ poly64x2_t __s0 = __p0; \ poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 38); \ __ret; \ }) #define __noswap_splat_laneq_p64(__p0, __p1) __extension__ ({ \ poly64x1_t __ret; \ poly64x2_t __s0 = __p0; \ __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 38); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splat_laneq_p16(__p0, __p1) __extension__ ({ \ poly16x4_t __ret; \ poly16x8_t __s0 = __p0; \ __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 37); \ __ret; \ }) #else #define splat_laneq_p16(__p0, __p1) __extension__ ({ \ poly16x4_t __ret; \ poly16x8_t __s0 = __p0; \ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 37); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splat_laneq_p16(__p0, __p1) __extension__ ({ \ poly16x4_t __ret; \ poly16x8_t __s0 = __p0; \ __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 37); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splatq_laneq_p8(__p0, __p1) __extension__ ({ \ poly8x16_t __ret; \ poly8x16_t __s0 = __p0; \ __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 36); \ __ret; \ }) #else #define splatq_laneq_p8(__p0, __p1) __extension__ ({ \ poly8x16_t __ret; \ poly8x16_t __s0 = __p0; \ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 36); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splatq_laneq_p8(__p0, __p1) __extension__ ({ \ poly8x16_t __ret; \ poly8x16_t __s0 = __p0; \ __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 36); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splatq_laneq_p64(__p0, __p1) __extension__ ({ \ poly64x2_t __ret; \ poly64x2_t __s0 = __p0; \ __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 38); \ __ret; \ }) #else #define splatq_laneq_p64(__p0, __p1) __extension__ ({ \ poly64x2_t __ret; \ poly64x2_t __s0 = __p0; \ poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 38); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_splatq_laneq_p64(__p0, __p1) __extension__ ({ \ poly64x2_t __ret; \ poly64x2_t __s0 = __p0; \ __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 38); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splatq_laneq_p16(__p0, __p1) __extension__ ({ \ poly16x8_t __ret; \ poly16x8_t __s0 = __p0; \ __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 37); \ __ret; \ }) #else #define splatq_laneq_p16(__p0, __p1) __extension__ ({ \ poly16x8_t __ret; \ poly16x8_t __s0 = __p0; \ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 37); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splatq_laneq_p16(__p0, __p1) __extension__ ({ \ poly16x8_t __ret; \ poly16x8_t __s0 = __p0; \ __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 37); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splatq_laneq_u8(__p0, __p1) __extension__ ({ \ uint8x16_t __ret; \ uint8x16_t __s0 = __p0; \ __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 48); \ __ret; \ }) #else #define splatq_laneq_u8(__p0, __p1) __extension__ ({ \ uint8x16_t __ret; \ uint8x16_t __s0 = __p0; \ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 48); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splatq_laneq_u8(__p0, __p1) __extension__ ({ \ uint8x16_t __ret; \ uint8x16_t __s0 = __p0; \ __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 48); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splatq_laneq_u32(__p0, __p1) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 50); \ __ret; \ }) #else #define splatq_laneq_u32(__p0, __p1) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 50); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splatq_laneq_u32(__p0, __p1) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 50); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splatq_laneq_u64(__p0, __p1) __extension__ ({ \ uint64x2_t __ret; \ uint64x2_t __s0 = __p0; \ __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 51); \ __ret; \ }) #else #define splatq_laneq_u64(__p0, __p1) __extension__ ({ \ uint64x2_t __ret; \ uint64x2_t __s0 = __p0; \ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 51); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_splatq_laneq_u64(__p0, __p1) __extension__ ({ \ uint64x2_t __ret; \ uint64x2_t __s0 = __p0; \ __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 51); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splatq_laneq_u16(__p0, __p1) __extension__ ({ \ uint16x8_t __ret; \ uint16x8_t __s0 = __p0; \ __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 49); \ __ret; \ }) #else #define splatq_laneq_u16(__p0, __p1) __extension__ ({ \ uint16x8_t __ret; \ uint16x8_t __s0 = __p0; \ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 49); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splatq_laneq_u16(__p0, __p1) __extension__ ({ \ uint16x8_t __ret; \ uint16x8_t __s0 = __p0; \ __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 49); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splatq_laneq_s8(__p0, __p1) __extension__ ({ \ int8x16_t __ret; \ int8x16_t __s0 = __p0; \ __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 32); \ __ret; \ }) #else #define splatq_laneq_s8(__p0, __p1) __extension__ ({ \ int8x16_t __ret; \ int8x16_t __s0 = __p0; \ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 32); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splatq_laneq_s8(__p0, __p1) __extension__ ({ \ int8x16_t __ret; \ int8x16_t __s0 = __p0; \ __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 32); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splatq_laneq_f64(__p0, __p1) __extension__ ({ \ float64x2_t __ret; \ float64x2_t __s0 = __p0; \ __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 42); \ __ret; \ }) #else #define splatq_laneq_f64(__p0, __p1) __extension__ ({ \ float64x2_t __ret; \ float64x2_t __s0 = __p0; \ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 42); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_splatq_laneq_f64(__p0, __p1) __extension__ ({ \ float64x2_t __ret; \ float64x2_t __s0 = __p0; \ __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 42); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splatq_laneq_f32(__p0, __p1) __extension__ ({ \ float32x4_t __ret; \ float32x4_t __s0 = __p0; \ __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 41); \ __ret; \ }) #else #define splatq_laneq_f32(__p0, __p1) __extension__ ({ \ float32x4_t __ret; \ float32x4_t __s0 = __p0; \ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 41); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splatq_laneq_f32(__p0, __p1) __extension__ ({ \ float32x4_t __ret; \ float32x4_t __s0 = __p0; \ __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 41); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splatq_laneq_f16(__p0, __p1) __extension__ ({ \ float16x8_t __ret; \ float16x8_t __s0 = __p0; \ __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 40); \ __ret; \ }) #else #define splatq_laneq_f16(__p0, __p1) __extension__ ({ \ float16x8_t __ret; \ float16x8_t __s0 = __p0; \ float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 40); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splatq_laneq_f16(__p0, __p1) __extension__ ({ \ float16x8_t __ret; \ float16x8_t __s0 = __p0; \ __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 40); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splatq_laneq_s32(__p0, __p1) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 34); \ __ret; \ }) #else #define splatq_laneq_s32(__p0, __p1) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 34); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splatq_laneq_s32(__p0, __p1) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 34); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splatq_laneq_s64(__p0, __p1) __extension__ ({ \ int64x2_t __ret; \ int64x2_t __s0 = __p0; \ __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 35); \ __ret; \ }) #else #define splatq_laneq_s64(__p0, __p1) __extension__ ({ \ int64x2_t __ret; \ int64x2_t __s0 = __p0; \ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 35); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_splatq_laneq_s64(__p0, __p1) __extension__ ({ \ int64x2_t __ret; \ int64x2_t __s0 = __p0; \ __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 35); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splatq_laneq_s16(__p0, __p1) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 33); \ __ret; \ }) #else #define splatq_laneq_s16(__p0, __p1) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 33); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splatq_laneq_s16(__p0, __p1) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 33); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splat_laneq_u8(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint8x16_t __s0 = __p0; \ __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 48); \ __ret; \ }) #else #define splat_laneq_u8(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint8x16_t __s0 = __p0; \ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 48); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splat_laneq_u8(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint8x16_t __s0 = __p0; \ __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 48); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splat_laneq_u32(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint32x4_t __s0 = __p0; \ __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 50); \ __ret; \ }) #else #define splat_laneq_u32(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 50); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_splat_laneq_u32(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint32x4_t __s0 = __p0; \ __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 50); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splat_laneq_u64(__p0, __p1) __extension__ ({ \ uint64x1_t __ret; \ uint64x2_t __s0 = __p0; \ __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 51); \ __ret; \ }) #else #define splat_laneq_u64(__p0, __p1) __extension__ ({ \ uint64x1_t __ret; \ uint64x2_t __s0 = __p0; \ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 51); \ __ret; \ }) #define __noswap_splat_laneq_u64(__p0, __p1) __extension__ ({ \ uint64x1_t __ret; \ uint64x2_t __s0 = __p0; \ __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 51); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splat_laneq_u16(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ uint16x8_t __s0 = __p0; \ __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 49); \ __ret; \ }) #else #define splat_laneq_u16(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ uint16x8_t __s0 = __p0; \ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 49); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splat_laneq_u16(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ uint16x8_t __s0 = __p0; \ __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 49); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splat_laneq_s8(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int8x16_t __s0 = __p0; \ __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 32); \ __ret; \ }) #else #define splat_laneq_s8(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int8x16_t __s0 = __p0; \ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 32); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splat_laneq_s8(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int8x16_t __s0 = __p0; \ __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 32); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splat_laneq_f64(__p0, __p1) __extension__ ({ \ float64x1_t __ret; \ float64x2_t __s0 = __p0; \ __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 42); \ __ret; \ }) #else #define splat_laneq_f64(__p0, __p1) __extension__ ({ \ float64x1_t __ret; \ float64x2_t __s0 = __p0; \ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 42); \ __ret; \ }) #define __noswap_splat_laneq_f64(__p0, __p1) __extension__ ({ \ float64x1_t __ret; \ float64x2_t __s0 = __p0; \ __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 42); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splat_laneq_f32(__p0, __p1) __extension__ ({ \ float32x2_t __ret; \ float32x4_t __s0 = __p0; \ __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 41); \ __ret; \ }) #else #define splat_laneq_f32(__p0, __p1) __extension__ ({ \ float32x2_t __ret; \ float32x4_t __s0 = __p0; \ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 41); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_splat_laneq_f32(__p0, __p1) __extension__ ({ \ float32x2_t __ret; \ float32x4_t __s0 = __p0; \ __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 41); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splat_laneq_f16(__p0, __p1) __extension__ ({ \ float16x4_t __ret; \ float16x8_t __s0 = __p0; \ __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 40); \ __ret; \ }) #else #define splat_laneq_f16(__p0, __p1) __extension__ ({ \ float16x4_t __ret; \ float16x8_t __s0 = __p0; \ float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 40); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splat_laneq_f16(__p0, __p1) __extension__ ({ \ float16x4_t __ret; \ float16x8_t __s0 = __p0; \ __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 40); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splat_laneq_s32(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int32x4_t __s0 = __p0; \ __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 34); \ __ret; \ }) #else #define splat_laneq_s32(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 34); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_splat_laneq_s32(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int32x4_t __s0 = __p0; \ __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 34); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splat_laneq_s64(__p0, __p1) __extension__ ({ \ int64x1_t __ret; \ int64x2_t __s0 = __p0; \ __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 35); \ __ret; \ }) #else #define splat_laneq_s64(__p0, __p1) __extension__ ({ \ int64x1_t __ret; \ int64x2_t __s0 = __p0; \ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 35); \ __ret; \ }) #define __noswap_splat_laneq_s64(__p0, __p1) __extension__ ({ \ int64x1_t __ret; \ int64x2_t __s0 = __p0; \ __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 35); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splat_laneq_s16(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int16x8_t __s0 = __p0; \ __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 33); \ __ret; \ }) #else #define splat_laneq_s16(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 33); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splat_laneq_s16(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int16x8_t __s0 = __p0; \ __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 33); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else __ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai uint8x16_t __noswap_vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else __ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai uint32x4_t __noswap_vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else __ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai uint16x8_t __noswap_vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else __ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai int8x16_t __noswap_vabdq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else __ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else __ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int32x4_t __noswap_vabdq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else __ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai int16x8_t __noswap_vabdq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else __ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai uint8x8_t __noswap_vabd_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else __ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai uint32x2_t __noswap_vabd_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else __ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai uint16x4_t __noswap_vabd_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else __ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai int8x8_t __noswap_vabd_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else __ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else __ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai int32x2_t __noswap_vabd_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else __ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int16x4_t __noswap_vabd_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vabsq_s8(int8x16_t __p0) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 32); return __ret; } #else __ai int8x16_t vabsq_s8(int8x16_t __p0) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vabsq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 41); return __ret; } #else __ai float32x4_t vabsq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vabsq_s32(int32x4_t __p0) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 34); return __ret; } #else __ai int32x4_t vabsq_s32(int32x4_t __p0) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vabsq_s16(int16x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 33); return __ret; } #else __ai int16x8_t vabsq_s16(int16x8_t __p0) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vabs_s8(int8x8_t __p0) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__p0, 0); return __ret; } #else __ai int8x8_t vabs_s8(int8x8_t __p0) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vabs_f32(float32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 9); return __ret; } #else __ai float32x2_t vabs_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vabs_s32(int32x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 2); return __ret; } #else __ai int32x2_t vabs_s32(int32x2_t __p0) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vabs_s16(int16x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 1); return __ret; } #else __ai int16x4_t vabs_s16(int16x4_t __p0) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = __p0 + __p1; return __ret; } #else __ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 + __rev1; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = __p0 + __p1; return __ret; } #else __ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 + __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = __p0 + __p1; return __ret; } #else __ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 + __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = __p0 + __p1; return __ret; } #else __ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 + __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = __p0 + __p1; return __ret; } #else __ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 + __rev1; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = __p0 + __p1; return __ret; } #else __ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 + __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = __p0 + __p1; return __ret; } #else __ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 + __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = __p0 + __p1; return __ret; } #else __ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 + __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = __p0 + __p1; return __ret; } #else __ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 + __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = __p0 + __p1; return __ret; } #else __ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 + __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = __p0 + __p1; return __ret; } #else __ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 + __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vadd_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x1_t __ret; __ret = __p0 + __p1; return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = __p0 + __p1; return __ret; } #else __ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 + __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = __p0 + __p1; return __ret; } #else __ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 + __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = __p0 + __p1; return __ret; } #else __ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 + __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = __p0 + __p1; return __ret; } #else __ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 + __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai int64x1_t vadd_s64(int64x1_t __p0, int64x1_t __p1) { int64x1_t __ret; __ret = __p0 + __p1; return __ret; } #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = __p0 + __p1; return __ret; } #else __ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 + __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vadd_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vadd_v((int8x8_t)__p0, (int8x8_t)__p1, 4); return __ret; } #else __ai poly8x8_t vadd_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x8_t) __builtin_neon_vadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif __ai poly64x1_t vadd_p64(poly64x1_t __p0, poly64x1_t __p1) { poly64x1_t __ret; __ret = (poly64x1_t) __builtin_neon_vadd_v((int8x8_t)__p0, (int8x8_t)__p1, 6); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai poly16x4_t vadd_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4_t __ret; __ret = (poly16x4_t) __builtin_neon_vadd_v((int8x8_t)__p0, (int8x8_t)__p1, 5); return __ret; } #else __ai poly16x4_t vadd_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4_t __ret; poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (poly16x4_t) __builtin_neon_vadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 5); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x16_t vaddq_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; __ret = (poly8x16_t) __builtin_neon_vaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 36); return __ret; } #else __ai poly8x16_t vaddq_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x16_t) __builtin_neon_vaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly64x2_t vaddq_p64(poly64x2_t __p0, poly64x2_t __p1) { poly64x2_t __ret; __ret = (poly64x2_t) __builtin_neon_vaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 38); return __ret; } #else __ai poly64x2_t vaddq_p64(poly64x2_t __p0, poly64x2_t __p1) { poly64x2_t __ret; poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (poly64x2_t) __builtin_neon_vaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 38); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly16x8_t vaddq_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8_t __ret; __ret = (poly16x8_t) __builtin_neon_vaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 37); return __ret; } #else __ai poly16x8_t vaddq_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8_t __ret; poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly16x8_t) __builtin_neon_vaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 37); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); return __ret; } #else __ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { uint16x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai uint16x4_t __noswap_vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); return __ret; } #else __ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { uint32x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai uint32x2_t __noswap_vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); return __ret; } #else __ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { uint8x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai uint8x8_t __noswap_vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); return __ret; } #else __ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) { int16x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int16x4_t __noswap_vaddhn_s32(int32x4_t __p0, int32x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); return __ret; } #else __ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) { int32x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai int32x2_t __noswap_vaddhn_s64(int64x2_t __p0, int64x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); return __ret; } #else __ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) { int8x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai int8x8_t __noswap_vaddhn_s16(int16x8_t __p0, int16x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = __p0 & __p1; return __ret; } #else __ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 & __rev1; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = __p0 & __p1; return __ret; } #else __ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 & __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = __p0 & __p1; return __ret; } #else __ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 & __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = __p0 & __p1; return __ret; } #else __ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 & __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = __p0 & __p1; return __ret; } #else __ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 & __rev1; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = __p0 & __p1; return __ret; } #else __ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 & __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = __p0 & __p1; return __ret; } #else __ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 & __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = __p0 & __p1; return __ret; } #else __ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 & __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = __p0 & __p1; return __ret; } #else __ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 & __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = __p0 & __p1; return __ret; } #else __ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 & __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vand_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x1_t __ret; __ret = __p0 & __p1; return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = __p0 & __p1; return __ret; } #else __ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 & __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = __p0 & __p1; return __ret; } #else __ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 & __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = __p0 & __p1; return __ret; } #else __ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 & __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai int64x1_t vand_s64(int64x1_t __p0, int64x1_t __p1) { int64x1_t __ret; __ret = __p0 & __p1; return __ret; } #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = __p0 & __p1; return __ret; } #else __ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 & __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = __p0 & ~__p1; return __ret; } #else __ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 & ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = __p0 & ~__p1; return __ret; } #else __ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 & ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = __p0 & ~__p1; return __ret; } #else __ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 & ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = __p0 & ~__p1; return __ret; } #else __ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 & ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = __p0 & ~__p1; return __ret; } #else __ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 & ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = __p0 & ~__p1; return __ret; } #else __ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 & ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = __p0 & ~__p1; return __ret; } #else __ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 & ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = __p0 & ~__p1; return __ret; } #else __ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 & ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = __p0 & ~__p1; return __ret; } #else __ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 & ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = __p0 & ~__p1; return __ret; } #else __ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 & ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vbic_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x1_t __ret; __ret = __p0 & ~__p1; return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = __p0 & ~__p1; return __ret; } #else __ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 & ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = __p0 & ~__p1; return __ret; } #else __ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 & ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = __p0 & ~__p1; return __ret; } #else __ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 & ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai int64x1_t vbic_s64(int64x1_t __p0, int64x1_t __p1) { int64x1_t __ret; __ret = __p0 & ~__p1; return __ret; } #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = __p0 & ~__p1; return __ret; } #else __ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 & ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4); return __ret; } #else __ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) { poly8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) { poly16x4_t __ret; __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 5); return __ret; } #else __ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) { poly16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); poly16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 5); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) { poly8x16_t __ret; __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36); return __ret; } #else __ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) { poly8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) { poly16x8_t __ret; __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 37); return __ret; } #else __ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) { poly16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); poly16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 37); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48); return __ret; } #else __ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #else __ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); return __ret; } #else __ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49); return __ret; } #else __ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32); return __ret; } #else __ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #else __ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); return __ret; } #else __ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35); return __ret; } #else __ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { int64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); return __ret; } #else __ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16); return __ret; } #else __ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18); return __ret; } #else __ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vbsl_u64(uint64x1_t __p0, uint64x1_t __p1, uint64x1_t __p2) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 17); return __ret; } #else __ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0); return __ret; } #else __ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #else __ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); return __ret; } #else __ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai int64x1_t vbsl_s64(uint64x1_t __p0, int64x1_t __p1, int64x1_t __p2) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 3); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); return __ret; } #else __ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else __ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else __ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else __ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else __ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else __ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else __ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else __ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else __ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0 == __p1); return __ret; } #else __ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) { uint8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t)(__rev0 == __rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0 == __p1); return __ret; } #else __ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) { uint8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t)(__rev0 == __rev1); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0 == __p1); return __ret; } #else __ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t)(__rev0 == __rev1); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0 == __p1); return __ret; } #else __ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t)(__rev0 == __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0 == __p1); return __ret; } #else __ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t)(__rev0 == __rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0 == __p1); return __ret; } #else __ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t)(__rev0 == __rev1); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0 == __p1); return __ret; } #else __ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t)(__rev0 == __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0 == __p1); return __ret; } #else __ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t)(__rev0 == __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0 == __p1); return __ret; } #else __ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t)(__rev0 == __rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0 == __p1); return __ret; } #else __ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t)(__rev0 == __rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0 == __p1); return __ret; } #else __ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t)(__rev0 == __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0 == __p1); return __ret; } #else __ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t)(__rev0 == __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0 == __p1); return __ret; } #else __ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t)(__rev0 == __rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0 == __p1); return __ret; } #else __ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t)(__rev0 == __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0 == __p1); return __ret; } #else __ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t)(__rev0 == __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0 == __p1); return __ret; } #else __ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t)(__rev0 == __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0 >= __p1); return __ret; } #else __ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t)(__rev0 >= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0 >= __p1); return __ret; } #else __ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t)(__rev0 >= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0 >= __p1); return __ret; } #else __ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t)(__rev0 >= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0 >= __p1); return __ret; } #else __ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t)(__rev0 >= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0 >= __p1); return __ret; } #else __ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t)(__rev0 >= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0 >= __p1); return __ret; } #else __ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t)(__rev0 >= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0 >= __p1); return __ret; } #else __ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t)(__rev0 >= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0 >= __p1); return __ret; } #else __ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t)(__rev0 >= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0 >= __p1); return __ret; } #else __ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t)(__rev0 >= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0 >= __p1); return __ret; } #else __ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t)(__rev0 >= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0 >= __p1); return __ret; } #else __ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t)(__rev0 >= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0 >= __p1); return __ret; } #else __ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t)(__rev0 >= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0 >= __p1); return __ret; } #else __ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t)(__rev0 >= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0 >= __p1); return __ret; } #else __ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t)(__rev0 >= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0 > __p1); return __ret; } #else __ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t)(__rev0 > __rev1); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0 > __p1); return __ret; } #else __ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t)(__rev0 > __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0 > __p1); return __ret; } #else __ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t)(__rev0 > __rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0 > __p1); return __ret; } #else __ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t)(__rev0 > __rev1); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0 > __p1); return __ret; } #else __ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t)(__rev0 > __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0 > __p1); return __ret; } #else __ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t)(__rev0 > __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0 > __p1); return __ret; } #else __ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t)(__rev0 > __rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0 > __p1); return __ret; } #else __ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t)(__rev0 > __rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0 > __p1); return __ret; } #else __ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t)(__rev0 > __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0 > __p1); return __ret; } #else __ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t)(__rev0 > __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0 > __p1); return __ret; } #else __ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t)(__rev0 > __rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0 > __p1); return __ret; } #else __ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t)(__rev0 > __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0 > __p1); return __ret; } #else __ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t)(__rev0 > __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0 > __p1); return __ret; } #else __ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t)(__rev0 > __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0 <= __p1); return __ret; } #else __ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t)(__rev0 <= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0 <= __p1); return __ret; } #else __ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t)(__rev0 <= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0 <= __p1); return __ret; } #else __ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t)(__rev0 <= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0 <= __p1); return __ret; } #else __ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t)(__rev0 <= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0 <= __p1); return __ret; } #else __ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t)(__rev0 <= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0 <= __p1); return __ret; } #else __ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t)(__rev0 <= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0 <= __p1); return __ret; } #else __ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t)(__rev0 <= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0 <= __p1); return __ret; } #else __ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t)(__rev0 <= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0 <= __p1); return __ret; } #else __ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t)(__rev0 <= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0 <= __p1); return __ret; } #else __ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t)(__rev0 <= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0 <= __p1); return __ret; } #else __ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t)(__rev0 <= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0 <= __p1); return __ret; } #else __ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t)(__rev0 <= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0 <= __p1); return __ret; } #else __ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t)(__rev0 <= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0 <= __p1); return __ret; } #else __ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t)(__rev0 <= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vclsq_u8(uint8x16_t __p0) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 32); return __ret; } #else __ai int8x16_t vclsq_u8(uint8x16_t __p0) { int8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vclsq_u32(uint32x4_t __p0) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 34); return __ret; } #else __ai int32x4_t vclsq_u32(uint32x4_t __p0) { int32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vclsq_u16(uint16x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 33); return __ret; } #else __ai int16x8_t vclsq_u16(uint16x8_t __p0) { int16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vclsq_s8(int8x16_t __p0) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 32); return __ret; } #else __ai int8x16_t vclsq_s8(int8x16_t __p0) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vclsq_s32(int32x4_t __p0) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 34); return __ret; } #else __ai int32x4_t vclsq_s32(int32x4_t __p0) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vclsq_s16(int16x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 33); return __ret; } #else __ai int16x8_t vclsq_s16(int16x8_t __p0) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vcls_u8(uint8x8_t __p0) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__p0, 0); return __ret; } #else __ai int8x8_t vcls_u8(uint8x8_t __p0) { int8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vcls_u32(uint32x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__p0, 2); return __ret; } #else __ai int32x2_t vcls_u32(uint32x2_t __p0) { int32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vcls_u16(uint16x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__p0, 1); return __ret; } #else __ai int16x4_t vcls_u16(uint16x4_t __p0) { int16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vcls_s8(int8x8_t __p0) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__p0, 0); return __ret; } #else __ai int8x8_t vcls_s8(int8x8_t __p0) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vcls_s32(int32x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__p0, 2); return __ret; } #else __ai int32x2_t vcls_s32(int32x2_t __p0) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vcls_s16(int16x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__p0, 1); return __ret; } #else __ai int16x4_t vcls_s16(int16x4_t __p0) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0 < __p1); return __ret; } #else __ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t)(__rev0 < __rev1); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0 < __p1); return __ret; } #else __ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t)(__rev0 < __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0 < __p1); return __ret; } #else __ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t)(__rev0 < __rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0 < __p1); return __ret; } #else __ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t)(__rev0 < __rev1); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0 < __p1); return __ret; } #else __ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t)(__rev0 < __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0 < __p1); return __ret; } #else __ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t)(__rev0 < __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0 < __p1); return __ret; } #else __ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t)(__rev0 < __rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0 < __p1); return __ret; } #else __ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t)(__rev0 < __rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0 < __p1); return __ret; } #else __ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t)(__rev0 < __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0 < __p1); return __ret; } #else __ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t)(__rev0 < __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0 < __p1); return __ret; } #else __ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t)(__rev0 < __rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0 < __p1); return __ret; } #else __ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t)(__rev0 < __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0 < __p1); return __ret; } #else __ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t)(__rev0 < __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0 < __p1); return __ret; } #else __ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t)(__rev0 < __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vclzq_u8(uint8x16_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 48); return __ret; } #else __ai uint8x16_t vclzq_u8(uint8x16_t __p0) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vclzq_u32(uint32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 50); return __ret; } #else __ai uint32x4_t vclzq_u32(uint32x4_t __p0) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vclzq_u16(uint16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 49); return __ret; } #else __ai uint16x8_t vclzq_u16(uint16x8_t __p0) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vclzq_s8(int8x16_t __p0) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 32); return __ret; } #else __ai int8x16_t vclzq_s8(int8x16_t __p0) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vclzq_s32(int32x4_t __p0) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 34); return __ret; } #else __ai int32x4_t vclzq_s32(int32x4_t __p0) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vclzq_s16(int16x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 33); return __ret; } #else __ai int16x8_t vclzq_s16(int16x8_t __p0) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vclz_u8(uint8x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 16); return __ret; } #else __ai uint8x8_t vclz_u8(uint8x8_t __p0) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vclz_u32(uint32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 18); return __ret; } #else __ai uint32x2_t vclz_u32(uint32x2_t __p0) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vclz_u16(uint16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 17); return __ret; } #else __ai uint16x4_t vclz_u16(uint16x4_t __p0) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vclz_s8(int8x8_t __p0) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 0); return __ret; } #else __ai int8x8_t vclz_s8(int8x8_t __p0) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vclz_s32(int32x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 2); return __ret; } #else __ai int32x2_t vclz_s32(int32x2_t __p0) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vclz_s16(int16x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 1); return __ret; } #else __ai int16x4_t vclz_s16(int16x4_t __p0) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vcnt_p8(poly8x8_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 4); return __ret; } #else __ai poly8x8_t vcnt_p8(poly8x8_t __p0) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 4); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x16_t vcntq_p8(poly8x16_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 36); return __ret; } #else __ai poly8x16_t vcntq_p8(poly8x16_t __p0) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 36); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vcntq_u8(uint8x16_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 48); return __ret; } #else __ai uint8x16_t vcntq_u8(uint8x16_t __p0) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vcntq_s8(int8x16_t __p0) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 32); return __ret; } #else __ai int8x16_t vcntq_s8(int8x16_t __p0) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vcnt_u8(uint8x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 16); return __ret; } #else __ai uint8x8_t vcnt_u8(uint8x8_t __p0) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vcnt_s8(int8x8_t __p0) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 0); return __ret; } #else __ai int8x8_t vcnt_s8(int8x8_t __p0) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); return __ret; } #else __ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x16_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); return __ret; } #else __ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x8_t __ret; poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); return __ret; } #else __ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x16_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai uint8x16_t __noswap_vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); return __ret; } #else __ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x4_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai uint32x4_t __noswap_vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1); return __ret; } #else __ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); return __ret; } #else __ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x8_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai uint16x8_t __noswap_vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) { int8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); return __ret; } #else __ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) { int8x16_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai int8x16_t __noswap_vcombine_s8(int8x8_t __p0, int8x8_t __p1) { int8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) { float32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); return __ret; } #else __ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) { float32x4_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai float32x4_t __noswap_vcombine_f32(float32x2_t __p0, float32x2_t __p1) { float32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) { float16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); return __ret; } #else __ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) { float16x8_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai float16x8_t __noswap_vcombine_f16(float16x4_t __p0, float16x4_t __p1) { float16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) { int32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); return __ret; } #else __ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) { int32x4_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int32x4_t __noswap_vcombine_s32(int32x2_t __p0, int32x2_t __p1) { int32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) { int64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1); return __ret; } #else __ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) { int64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) { int16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); return __ret; } #else __ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) { int16x8_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai int16x8_t __noswap_vcombine_s16(int16x4_t __p0, int16x4_t __p1) { int16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); return __ret; } #endif #define vcreate_p8(__p0) __extension__ ({ \ poly8x8_t __ret; \ uint64_t __promote = __p0; \ __ret = (poly8x8_t)(__promote); \ __ret; \ }) #define vcreate_p16(__p0) __extension__ ({ \ poly16x4_t __ret; \ uint64_t __promote = __p0; \ __ret = (poly16x4_t)(__promote); \ __ret; \ }) #define vcreate_u8(__p0) __extension__ ({ \ uint8x8_t __ret; \ uint64_t __promote = __p0; \ __ret = (uint8x8_t)(__promote); \ __ret; \ }) #define vcreate_u32(__p0) __extension__ ({ \ uint32x2_t __ret; \ uint64_t __promote = __p0; \ __ret = (uint32x2_t)(__promote); \ __ret; \ }) #define vcreate_u64(__p0) __extension__ ({ \ uint64x1_t __ret; \ uint64_t __promote = __p0; \ __ret = (uint64x1_t)(__promote); \ __ret; \ }) #define vcreate_u16(__p0) __extension__ ({ \ uint16x4_t __ret; \ uint64_t __promote = __p0; \ __ret = (uint16x4_t)(__promote); \ __ret; \ }) #define vcreate_s8(__p0) __extension__ ({ \ int8x8_t __ret; \ uint64_t __promote = __p0; \ __ret = (int8x8_t)(__promote); \ __ret; \ }) #define vcreate_f32(__p0) __extension__ ({ \ float32x2_t __ret; \ uint64_t __promote = __p0; \ __ret = (float32x2_t)(__promote); \ __ret; \ }) #define vcreate_f16(__p0) __extension__ ({ \ float16x4_t __ret; \ uint64_t __promote = __p0; \ __ret = (float16x4_t)(__promote); \ __ret; \ }) #define vcreate_s32(__p0) __extension__ ({ \ int32x2_t __ret; \ uint64_t __promote = __p0; \ __ret = (int32x2_t)(__promote); \ __ret; \ }) #define vcreate_s64(__p0) __extension__ ({ \ int64x1_t __ret; \ uint64_t __promote = __p0; \ __ret = (int64x1_t)(__promote); \ __ret; \ }) #define vcreate_s16(__p0) __extension__ ({ \ int16x4_t __ret; \ uint64_t __promote = __p0; \ __ret = (int16x4_t)(__promote); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 50); return __ret; } #else __ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) { float32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 34); return __ret; } #else __ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) { float32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 18); return __ret; } #else __ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) { float32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vcvt_f32_s32(int32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 2); return __ret; } #else __ai float32x2_t vcvt_f32_s32(int32x2_t __p0) { float32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \ float32x4_t __ret; \ uint32x4_t __s0 = __p0; \ __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 50); \ __ret; \ }) #else #define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \ float32x4_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 50); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \ float32x4_t __ret; \ int32x4_t __s0 = __p0; \ __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 34); \ __ret; \ }) #else #define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \ float32x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 34); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \ float32x2_t __ret; \ uint32x2_t __s0 = __p0; \ __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 18); \ __ret; \ }) #else #define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \ float32x2_t __ret; \ uint32x2_t __s0 = __p0; \ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 18); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \ float32x2_t __ret; \ int32x2_t __s0 = __p0; \ __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 2); \ __ret; \ }) #else #define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \ float32x2_t __ret; \ int32x2_t __s0 = __p0; \ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \ int32x4_t __ret; \ float32x4_t __s0 = __p0; \ __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__s0, __p1, 34); \ __ret; \ }) #else #define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \ int32x4_t __ret; \ float32x4_t __s0 = __p0; \ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__rev0, __p1, 34); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ float32x2_t __s0 = __p0; \ __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__s0, __p1, 2); \ __ret; \ }) #else #define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ float32x2_t __s0 = __p0; \ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__rev0, __p1, 2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \ uint32x4_t __ret; \ float32x4_t __s0 = __p0; \ __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__s0, __p1, 50); \ __ret; \ }) #else #define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \ uint32x4_t __ret; \ float32x4_t __s0 = __p0; \ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__rev0, __p1, 50); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ float32x2_t __s0 = __p0; \ __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__s0, __p1, 18); \ __ret; \ }) #else #define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ float32x2_t __s0 = __p0; \ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__rev0, __p1, 18); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__p0, 34); return __ret; } #else __ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) { int32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__rev0, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vcvt_s32_f32(float32x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__p0, 2); return __ret; } #else __ai int32x2_t vcvt_s32_f32(float32x2_t __p0) { int32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__rev0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__p0, 50); return __ret; } #else __ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__rev0, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__p0, 18); return __ret; } #else __ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__rev0, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vdup_lane_p8(__p0_0, __p1_0) __extension__ ({ \ poly8x8_t __ret_0; \ poly8x8_t __s0_0 = __p0_0; \ __ret_0 = splat_lane_p8(__s0_0, __p1_0); \ __ret_0; \ }) #else #define vdup_lane_p8(__p0_1, __p1_1) __extension__ ({ \ poly8x8_t __ret_1; \ poly8x8_t __s0_1 = __p0_1; \ poly8x8_t __rev0_1; __rev0_1 = __builtin_shufflevector(__s0_1, __s0_1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_1 = __noswap_splat_lane_p8(__rev0_1, __p1_1); \ __ret_1 = __builtin_shufflevector(__ret_1, __ret_1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_1; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdup_lane_p16(__p0_2, __p1_2) __extension__ ({ \ poly16x4_t __ret_2; \ poly16x4_t __s0_2 = __p0_2; \ __ret_2 = splat_lane_p16(__s0_2, __p1_2); \ __ret_2; \ }) #else #define vdup_lane_p16(__p0_3, __p1_3) __extension__ ({ \ poly16x4_t __ret_3; \ poly16x4_t __s0_3 = __p0_3; \ poly16x4_t __rev0_3; __rev0_3 = __builtin_shufflevector(__s0_3, __s0_3, 3, 2, 1, 0); \ __ret_3 = __noswap_splat_lane_p16(__rev0_3, __p1_3); \ __ret_3 = __builtin_shufflevector(__ret_3, __ret_3, 3, 2, 1, 0); \ __ret_3; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupq_lane_p8(__p0_4, __p1_4) __extension__ ({ \ poly8x16_t __ret_4; \ poly8x8_t __s0_4 = __p0_4; \ __ret_4 = splatq_lane_p8(__s0_4, __p1_4); \ __ret_4; \ }) #else #define vdupq_lane_p8(__p0_5, __p1_5) __extension__ ({ \ poly8x16_t __ret_5; \ poly8x8_t __s0_5 = __p0_5; \ poly8x8_t __rev0_5; __rev0_5 = __builtin_shufflevector(__s0_5, __s0_5, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_5 = __noswap_splatq_lane_p8(__rev0_5, __p1_5); \ __ret_5 = __builtin_shufflevector(__ret_5, __ret_5, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_5; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupq_lane_p16(__p0_6, __p1_6) __extension__ ({ \ poly16x8_t __ret_6; \ poly16x4_t __s0_6 = __p0_6; \ __ret_6 = splatq_lane_p16(__s0_6, __p1_6); \ __ret_6; \ }) #else #define vdupq_lane_p16(__p0_7, __p1_7) __extension__ ({ \ poly16x8_t __ret_7; \ poly16x4_t __s0_7 = __p0_7; \ poly16x4_t __rev0_7; __rev0_7 = __builtin_shufflevector(__s0_7, __s0_7, 3, 2, 1, 0); \ __ret_7 = __noswap_splatq_lane_p16(__rev0_7, __p1_7); \ __ret_7 = __builtin_shufflevector(__ret_7, __ret_7, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_7; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupq_lane_u8(__p0_8, __p1_8) __extension__ ({ \ uint8x16_t __ret_8; \ uint8x8_t __s0_8 = __p0_8; \ __ret_8 = splatq_lane_u8(__s0_8, __p1_8); \ __ret_8; \ }) #else #define vdupq_lane_u8(__p0_9, __p1_9) __extension__ ({ \ uint8x16_t __ret_9; \ uint8x8_t __s0_9 = __p0_9; \ uint8x8_t __rev0_9; __rev0_9 = __builtin_shufflevector(__s0_9, __s0_9, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_9 = __noswap_splatq_lane_u8(__rev0_9, __p1_9); \ __ret_9 = __builtin_shufflevector(__ret_9, __ret_9, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_9; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupq_lane_u32(__p0_10, __p1_10) __extension__ ({ \ uint32x4_t __ret_10; \ uint32x2_t __s0_10 = __p0_10; \ __ret_10 = splatq_lane_u32(__s0_10, __p1_10); \ __ret_10; \ }) #else #define vdupq_lane_u32(__p0_11, __p1_11) __extension__ ({ \ uint32x4_t __ret_11; \ uint32x2_t __s0_11 = __p0_11; \ uint32x2_t __rev0_11; __rev0_11 = __builtin_shufflevector(__s0_11, __s0_11, 1, 0); \ __ret_11 = __noswap_splatq_lane_u32(__rev0_11, __p1_11); \ __ret_11 = __builtin_shufflevector(__ret_11, __ret_11, 3, 2, 1, 0); \ __ret_11; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupq_lane_u64(__p0_12, __p1_12) __extension__ ({ \ uint64x2_t __ret_12; \ uint64x1_t __s0_12 = __p0_12; \ __ret_12 = splatq_lane_u64(__s0_12, __p1_12); \ __ret_12; \ }) #else #define vdupq_lane_u64(__p0_13, __p1_13) __extension__ ({ \ uint64x2_t __ret_13; \ uint64x1_t __s0_13 = __p0_13; \ __ret_13 = __noswap_splatq_lane_u64(__s0_13, __p1_13); \ __ret_13 = __builtin_shufflevector(__ret_13, __ret_13, 1, 0); \ __ret_13; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupq_lane_u16(__p0_14, __p1_14) __extension__ ({ \ uint16x8_t __ret_14; \ uint16x4_t __s0_14 = __p0_14; \ __ret_14 = splatq_lane_u16(__s0_14, __p1_14); \ __ret_14; \ }) #else #define vdupq_lane_u16(__p0_15, __p1_15) __extension__ ({ \ uint16x8_t __ret_15; \ uint16x4_t __s0_15 = __p0_15; \ uint16x4_t __rev0_15; __rev0_15 = __builtin_shufflevector(__s0_15, __s0_15, 3, 2, 1, 0); \ __ret_15 = __noswap_splatq_lane_u16(__rev0_15, __p1_15); \ __ret_15 = __builtin_shufflevector(__ret_15, __ret_15, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_15; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupq_lane_s8(__p0_16, __p1_16) __extension__ ({ \ int8x16_t __ret_16; \ int8x8_t __s0_16 = __p0_16; \ __ret_16 = splatq_lane_s8(__s0_16, __p1_16); \ __ret_16; \ }) #else #define vdupq_lane_s8(__p0_17, __p1_17) __extension__ ({ \ int8x16_t __ret_17; \ int8x8_t __s0_17 = __p0_17; \ int8x8_t __rev0_17; __rev0_17 = __builtin_shufflevector(__s0_17, __s0_17, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_17 = __noswap_splatq_lane_s8(__rev0_17, __p1_17); \ __ret_17 = __builtin_shufflevector(__ret_17, __ret_17, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_17; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupq_lane_f32(__p0_18, __p1_18) __extension__ ({ \ float32x4_t __ret_18; \ float32x2_t __s0_18 = __p0_18; \ __ret_18 = splatq_lane_f32(__s0_18, __p1_18); \ __ret_18; \ }) #else #define vdupq_lane_f32(__p0_19, __p1_19) __extension__ ({ \ float32x4_t __ret_19; \ float32x2_t __s0_19 = __p0_19; \ float32x2_t __rev0_19; __rev0_19 = __builtin_shufflevector(__s0_19, __s0_19, 1, 0); \ __ret_19 = __noswap_splatq_lane_f32(__rev0_19, __p1_19); \ __ret_19 = __builtin_shufflevector(__ret_19, __ret_19, 3, 2, 1, 0); \ __ret_19; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupq_lane_s32(__p0_20, __p1_20) __extension__ ({ \ int32x4_t __ret_20; \ int32x2_t __s0_20 = __p0_20; \ __ret_20 = splatq_lane_s32(__s0_20, __p1_20); \ __ret_20; \ }) #else #define vdupq_lane_s32(__p0_21, __p1_21) __extension__ ({ \ int32x4_t __ret_21; \ int32x2_t __s0_21 = __p0_21; \ int32x2_t __rev0_21; __rev0_21 = __builtin_shufflevector(__s0_21, __s0_21, 1, 0); \ __ret_21 = __noswap_splatq_lane_s32(__rev0_21, __p1_21); \ __ret_21 = __builtin_shufflevector(__ret_21, __ret_21, 3, 2, 1, 0); \ __ret_21; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupq_lane_s64(__p0_22, __p1_22) __extension__ ({ \ int64x2_t __ret_22; \ int64x1_t __s0_22 = __p0_22; \ __ret_22 = splatq_lane_s64(__s0_22, __p1_22); \ __ret_22; \ }) #else #define vdupq_lane_s64(__p0_23, __p1_23) __extension__ ({ \ int64x2_t __ret_23; \ int64x1_t __s0_23 = __p0_23; \ __ret_23 = __noswap_splatq_lane_s64(__s0_23, __p1_23); \ __ret_23 = __builtin_shufflevector(__ret_23, __ret_23, 1, 0); \ __ret_23; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupq_lane_s16(__p0_24, __p1_24) __extension__ ({ \ int16x8_t __ret_24; \ int16x4_t __s0_24 = __p0_24; \ __ret_24 = splatq_lane_s16(__s0_24, __p1_24); \ __ret_24; \ }) #else #define vdupq_lane_s16(__p0_25, __p1_25) __extension__ ({ \ int16x8_t __ret_25; \ int16x4_t __s0_25 = __p0_25; \ int16x4_t __rev0_25; __rev0_25 = __builtin_shufflevector(__s0_25, __s0_25, 3, 2, 1, 0); \ __ret_25 = __noswap_splatq_lane_s16(__rev0_25, __p1_25); \ __ret_25 = __builtin_shufflevector(__ret_25, __ret_25, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_25; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdup_lane_u8(__p0_26, __p1_26) __extension__ ({ \ uint8x8_t __ret_26; \ uint8x8_t __s0_26 = __p0_26; \ __ret_26 = splat_lane_u8(__s0_26, __p1_26); \ __ret_26; \ }) #else #define vdup_lane_u8(__p0_27, __p1_27) __extension__ ({ \ uint8x8_t __ret_27; \ uint8x8_t __s0_27 = __p0_27; \ uint8x8_t __rev0_27; __rev0_27 = __builtin_shufflevector(__s0_27, __s0_27, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_27 = __noswap_splat_lane_u8(__rev0_27, __p1_27); \ __ret_27 = __builtin_shufflevector(__ret_27, __ret_27, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_27; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdup_lane_u32(__p0_28, __p1_28) __extension__ ({ \ uint32x2_t __ret_28; \ uint32x2_t __s0_28 = __p0_28; \ __ret_28 = splat_lane_u32(__s0_28, __p1_28); \ __ret_28; \ }) #else #define vdup_lane_u32(__p0_29, __p1_29) __extension__ ({ \ uint32x2_t __ret_29; \ uint32x2_t __s0_29 = __p0_29; \ uint32x2_t __rev0_29; __rev0_29 = __builtin_shufflevector(__s0_29, __s0_29, 1, 0); \ __ret_29 = __noswap_splat_lane_u32(__rev0_29, __p1_29); \ __ret_29 = __builtin_shufflevector(__ret_29, __ret_29, 1, 0); \ __ret_29; \ }) #endif #define vdup_lane_u64(__p0_30, __p1_30) __extension__ ({ \ uint64x1_t __ret_30; \ uint64x1_t __s0_30 = __p0_30; \ __ret_30 = splat_lane_u64(__s0_30, __p1_30); \ __ret_30; \ }) #ifdef __LITTLE_ENDIAN__ #define vdup_lane_u16(__p0_31, __p1_31) __extension__ ({ \ uint16x4_t __ret_31; \ uint16x4_t __s0_31 = __p0_31; \ __ret_31 = splat_lane_u16(__s0_31, __p1_31); \ __ret_31; \ }) #else #define vdup_lane_u16(__p0_32, __p1_32) __extension__ ({ \ uint16x4_t __ret_32; \ uint16x4_t __s0_32 = __p0_32; \ uint16x4_t __rev0_32; __rev0_32 = __builtin_shufflevector(__s0_32, __s0_32, 3, 2, 1, 0); \ __ret_32 = __noswap_splat_lane_u16(__rev0_32, __p1_32); \ __ret_32 = __builtin_shufflevector(__ret_32, __ret_32, 3, 2, 1, 0); \ __ret_32; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdup_lane_s8(__p0_33, __p1_33) __extension__ ({ \ int8x8_t __ret_33; \ int8x8_t __s0_33 = __p0_33; \ __ret_33 = splat_lane_s8(__s0_33, __p1_33); \ __ret_33; \ }) #else #define vdup_lane_s8(__p0_34, __p1_34) __extension__ ({ \ int8x8_t __ret_34; \ int8x8_t __s0_34 = __p0_34; \ int8x8_t __rev0_34; __rev0_34 = __builtin_shufflevector(__s0_34, __s0_34, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_34 = __noswap_splat_lane_s8(__rev0_34, __p1_34); \ __ret_34 = __builtin_shufflevector(__ret_34, __ret_34, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_34; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdup_lane_f32(__p0_35, __p1_35) __extension__ ({ \ float32x2_t __ret_35; \ float32x2_t __s0_35 = __p0_35; \ __ret_35 = splat_lane_f32(__s0_35, __p1_35); \ __ret_35; \ }) #else #define vdup_lane_f32(__p0_36, __p1_36) __extension__ ({ \ float32x2_t __ret_36; \ float32x2_t __s0_36 = __p0_36; \ float32x2_t __rev0_36; __rev0_36 = __builtin_shufflevector(__s0_36, __s0_36, 1, 0); \ __ret_36 = __noswap_splat_lane_f32(__rev0_36, __p1_36); \ __ret_36 = __builtin_shufflevector(__ret_36, __ret_36, 1, 0); \ __ret_36; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdup_lane_s32(__p0_37, __p1_37) __extension__ ({ \ int32x2_t __ret_37; \ int32x2_t __s0_37 = __p0_37; \ __ret_37 = splat_lane_s32(__s0_37, __p1_37); \ __ret_37; \ }) #else #define vdup_lane_s32(__p0_38, __p1_38) __extension__ ({ \ int32x2_t __ret_38; \ int32x2_t __s0_38 = __p0_38; \ int32x2_t __rev0_38; __rev0_38 = __builtin_shufflevector(__s0_38, __s0_38, 1, 0); \ __ret_38 = __noswap_splat_lane_s32(__rev0_38, __p1_38); \ __ret_38 = __builtin_shufflevector(__ret_38, __ret_38, 1, 0); \ __ret_38; \ }) #endif #define vdup_lane_s64(__p0_39, __p1_39) __extension__ ({ \ int64x1_t __ret_39; \ int64x1_t __s0_39 = __p0_39; \ __ret_39 = splat_lane_s64(__s0_39, __p1_39); \ __ret_39; \ }) #ifdef __LITTLE_ENDIAN__ #define vdup_lane_s16(__p0_40, __p1_40) __extension__ ({ \ int16x4_t __ret_40; \ int16x4_t __s0_40 = __p0_40; \ __ret_40 = splat_lane_s16(__s0_40, __p1_40); \ __ret_40; \ }) #else #define vdup_lane_s16(__p0_41, __p1_41) __extension__ ({ \ int16x4_t __ret_41; \ int16x4_t __s0_41 = __p0_41; \ int16x4_t __rev0_41; __rev0_41 = __builtin_shufflevector(__s0_41, __s0_41, 3, 2, 1, 0); \ __ret_41 = __noswap_splat_lane_s16(__rev0_41, __p1_41); \ __ret_41 = __builtin_shufflevector(__ret_41, __ret_41, 3, 2, 1, 0); \ __ret_41; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vdup_n_p8(poly8_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else __ai poly8x8_t vdup_n_p8(poly8_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly16x4_t vdup_n_p16(poly16_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; return __ret; } #else __ai poly16x4_t vdup_n_p16(poly16_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x16_t vdupq_n_p8(poly8_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else __ai poly8x16_t vdupq_n_p8(poly8_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly16x8_t vdupq_n_p16(poly16_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else __ai poly16x8_t vdupq_n_p16(poly16_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vdupq_n_u8(uint8_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else __ai uint8x16_t vdupq_n_u8(uint8_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vdupq_n_u32(uint32_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; return __ret; } #else __ai uint32x4_t vdupq_n_u32(uint32_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vdupq_n_u64(uint64_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) {__p0, __p0}; return __ret; } #else __ai uint64x2_t vdupq_n_u64(uint64_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) {__p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vdupq_n_u16(uint16_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else __ai uint16x8_t vdupq_n_u16(uint16_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vdupq_n_s8(int8_t __p0) { int8x16_t __ret; __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else __ai int8x16_t vdupq_n_s8(int8_t __p0) { int8x16_t __ret; __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vdupq_n_f32(float32_t __p0) { float32x4_t __ret; __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; return __ret; } #else __ai float32x4_t vdupq_n_f32(float32_t __p0) { float32x4_t __ret; __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vdupq_n_f16(__p0) __extension__ ({ \ float16x8_t __ret; \ float16_t __s0 = __p0; \ __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ __ret; \ }) #else #define vdupq_n_f16(__p0) __extension__ ({ \ float16x8_t __ret; \ float16_t __s0 = __p0; \ __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vdupq_n_s32(int32_t __p0) { int32x4_t __ret; __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; return __ret; } #else __ai int32x4_t vdupq_n_s32(int32_t __p0) { int32x4_t __ret; __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vdupq_n_s64(int64_t __p0) { int64x2_t __ret; __ret = (int64x2_t) {__p0, __p0}; return __ret; } #else __ai int64x2_t vdupq_n_s64(int64_t __p0) { int64x2_t __ret; __ret = (int64x2_t) {__p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vdupq_n_s16(int16_t __p0) { int16x8_t __ret; __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else __ai int16x8_t vdupq_n_s16(int16_t __p0) { int16x8_t __ret; __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vdup_n_u8(uint8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else __ai uint8x8_t vdup_n_u8(uint8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vdup_n_u32(uint32_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) {__p0, __p0}; return __ret; } #else __ai uint32x2_t vdup_n_u32(uint32_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) {__p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vdup_n_u64(uint64_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) {__p0}; return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vdup_n_u16(uint16_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; return __ret; } #else __ai uint16x4_t vdup_n_u16(uint16_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vdup_n_s8(int8_t __p0) { int8x8_t __ret; __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else __ai int8x8_t vdup_n_s8(int8_t __p0) { int8x8_t __ret; __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vdup_n_f32(float32_t __p0) { float32x2_t __ret; __ret = (float32x2_t) {__p0, __p0}; return __ret; } #else __ai float32x2_t vdup_n_f32(float32_t __p0) { float32x2_t __ret; __ret = (float32x2_t) {__p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vdup_n_f16(__p0) __extension__ ({ \ float16x4_t __ret; \ float16_t __s0 = __p0; \ __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ __ret; \ }) #else #define vdup_n_f16(__p0) __extension__ ({ \ float16x4_t __ret; \ float16_t __s0 = __p0; \ __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vdup_n_s32(int32_t __p0) { int32x2_t __ret; __ret = (int32x2_t) {__p0, __p0}; return __ret; } #else __ai int32x2_t vdup_n_s32(int32_t __p0) { int32x2_t __ret; __ret = (int32x2_t) {__p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai int64x1_t vdup_n_s64(int64_t __p0) { int64x1_t __ret; __ret = (int64x1_t) {__p0}; return __ret; } #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vdup_n_s16(int16_t __p0) { int16x4_t __ret; __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; return __ret; } #else __ai int16x4_t vdup_n_s16(int16_t __p0) { int16x4_t __ret; __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = __p0 ^ __p1; return __ret; } #else __ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 ^ __rev1; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = __p0 ^ __p1; return __ret; } #else __ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 ^ __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = __p0 ^ __p1; return __ret; } #else __ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 ^ __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = __p0 ^ __p1; return __ret; } #else __ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 ^ __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = __p0 ^ __p1; return __ret; } #else __ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 ^ __rev1; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = __p0 ^ __p1; return __ret; } #else __ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 ^ __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = __p0 ^ __p1; return __ret; } #else __ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 ^ __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = __p0 ^ __p1; return __ret; } #else __ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 ^ __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = __p0 ^ __p1; return __ret; } #else __ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 ^ __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = __p0 ^ __p1; return __ret; } #else __ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 ^ __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t veor_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x1_t __ret; __ret = __p0 ^ __p1; return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = __p0 ^ __p1; return __ret; } #else __ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 ^ __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = __p0 ^ __p1; return __ret; } #else __ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 ^ __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = __p0 ^ __p1; return __ret; } #else __ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 ^ __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai int64x1_t veor_s64(int64x1_t __p0, int64x1_t __p1) { int64x1_t __ret; __ret = __p0 ^ __p1; return __ret; } #ifdef __LITTLE_ENDIAN__ __ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = __p0 ^ __p1; return __ret; } #else __ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 ^ __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vext_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x8_t __ret; \ poly8x8_t __s0 = __p0; \ poly8x8_t __s1 = __p1; \ __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \ __ret; \ }) #else #define vext_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x8_t __ret; \ poly8x8_t __s0 = __p0; \ poly8x8_t __s1 = __p1; \ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vext_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x4_t __ret; \ poly16x4_t __s0 = __p0; \ poly16x4_t __s1 = __p1; \ __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \ __ret; \ }) #else #define vext_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x4_t __ret; \ poly16x4_t __s0 = __p0; \ poly16x4_t __s1 = __p1; \ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vextq_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x16_t __ret; \ poly8x16_t __s0 = __p0; \ poly8x16_t __s1 = __p1; \ __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \ __ret; \ }) #else #define vextq_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x16_t __ret; \ poly8x16_t __s0 = __p0; \ poly8x16_t __s1 = __p1; \ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vextq_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x8_t __ret; \ poly16x8_t __s0 = __p0; \ poly16x8_t __s1 = __p1; \ __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \ __ret; \ }) #else #define vextq_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x8_t __ret; \ poly16x8_t __s0 = __p0; \ poly16x8_t __s1 = __p1; \ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vextq_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x16_t __ret; \ uint8x16_t __s0 = __p0; \ uint8x16_t __s1 = __p1; \ __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ __ret; \ }) #else #define vextq_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x16_t __ret; \ uint8x16_t __s0 = __p0; \ uint8x16_t __s1 = __p1; \ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vextq_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __s1 = __p1; \ __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ __ret; \ }) #else #define vextq_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __s1 = __p1; \ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vextq_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2_t __ret; \ uint64x2_t __s0 = __p0; \ uint64x2_t __s1 = __p1; \ __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ __ret; \ }) #else #define vextq_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2_t __ret; \ uint64x2_t __s0 = __p0; \ uint64x2_t __s1 = __p1; \ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vextq_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x8_t __ret; \ uint16x8_t __s0 = __p0; \ uint16x8_t __s1 = __p1; \ __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ __ret; \ }) #else #define vextq_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x8_t __ret; \ uint16x8_t __s0 = __p0; \ uint16x8_t __s1 = __p1; \ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vextq_s8(__p0, __p1, __p2) __extension__ ({ \ int8x16_t __ret; \ int8x16_t __s0 = __p0; \ int8x16_t __s1 = __p1; \ __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ __ret; \ }) #else #define vextq_s8(__p0, __p1, __p2) __extension__ ({ \ int8x16_t __ret; \ int8x16_t __s0 = __p0; \ int8x16_t __s1 = __p1; \ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vextq_f32(__p0, __p1, __p2) __extension__ ({ \ float32x4_t __ret; \ float32x4_t __s0 = __p0; \ float32x4_t __s1 = __p1; \ __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 41); \ __ret; \ }) #else #define vextq_f32(__p0, __p1, __p2) __extension__ ({ \ float32x4_t __ret; \ float32x4_t __s0 = __p0; \ float32x4_t __s1 = __p1; \ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 41); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vextq_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __s1 = __p1; \ __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ __ret; \ }) #else #define vextq_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __s1 = __p1; \ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vextq_s64(__p0, __p1, __p2) __extension__ ({ \ int64x2_t __ret; \ int64x2_t __s0 = __p0; \ int64x2_t __s1 = __p1; \ __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ __ret; \ }) #else #define vextq_s64(__p0, __p1, __p2) __extension__ ({ \ int64x2_t __ret; \ int64x2_t __s0 = __p0; \ int64x2_t __s1 = __p1; \ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vextq_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __s1 = __p1; \ __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ __ret; \ }) #else #define vextq_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __s1 = __p1; \ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vext_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x8_t __ret; \ uint8x8_t __s0 = __p0; \ uint8x8_t __s1 = __p1; \ __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ __ret; \ }) #else #define vext_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x8_t __ret; \ uint8x8_t __s0 = __p0; \ uint8x8_t __s1 = __p1; \ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vext_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x2_t __ret; \ uint32x2_t __s0 = __p0; \ uint32x2_t __s1 = __p1; \ __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ __ret; \ }) #else #define vext_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x2_t __ret; \ uint32x2_t __s0 = __p0; \ uint32x2_t __s1 = __p1; \ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #define vext_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x1_t __ret; \ uint64x1_t __s0 = __p0; \ uint64x1_t __s1 = __p1; \ __ret = (uint64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vext_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x4_t __ret; \ uint16x4_t __s0 = __p0; \ uint16x4_t __s1 = __p1; \ __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ __ret; \ }) #else #define vext_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x4_t __ret; \ uint16x4_t __s0 = __p0; \ uint16x4_t __s1 = __p1; \ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vext_s8(__p0, __p1, __p2) __extension__ ({ \ int8x8_t __ret; \ int8x8_t __s0 = __p0; \ int8x8_t __s1 = __p1; \ __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ __ret; \ }) #else #define vext_s8(__p0, __p1, __p2) __extension__ ({ \ int8x8_t __ret; \ int8x8_t __s0 = __p0; \ int8x8_t __s1 = __p1; \ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vext_f32(__p0, __p1, __p2) __extension__ ({ \ float32x2_t __ret; \ float32x2_t __s0 = __p0; \ float32x2_t __s1 = __p1; \ __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 9); \ __ret; \ }) #else #define vext_f32(__p0, __p1, __p2) __extension__ ({ \ float32x2_t __ret; \ float32x2_t __s0 = __p0; \ float32x2_t __s1 = __p1; \ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 9); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vext_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ int32x2_t __s1 = __p1; \ __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ __ret; \ }) #else #define vext_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ int32x2_t __s1 = __p1; \ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #define vext_s64(__p0, __p1, __p2) __extension__ ({ \ int64x1_t __ret; \ int64x1_t __s0 = __p0; \ int64x1_t __s1 = __p1; \ __ret = (int64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vext_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ int16x4_t __s1 = __p1; \ __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ __ret; \ }) #else #define vext_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ int16x4_t __s1 = __p1; \ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vget_high_p8(poly8x16_t __p0) { poly8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); return __ret; } #else __ai poly8x8_t vget_high_p8(poly8x16_t __p0) { poly8x8_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai poly8x8_t __noswap_vget_high_p8(poly8x16_t __p0) { poly8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly16x4_t vget_high_p16(poly16x8_t __p0) { poly16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); return __ret; } #else __ai poly16x4_t vget_high_p16(poly16x8_t __p0) { poly16x4_t __ret; poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vget_high_u8(uint8x16_t __p0) { uint8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); return __ret; } #else __ai uint8x8_t vget_high_u8(uint8x16_t __p0) { uint8x8_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai uint8x8_t __noswap_vget_high_u8(uint8x16_t __p0) { uint8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vget_high_u32(uint32x4_t __p0) { uint32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 2, 3); return __ret; } #else __ai uint32x2_t vget_high_u32(uint32x4_t __p0) { uint32x2_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai uint32x2_t __noswap_vget_high_u32(uint32x4_t __p0) { uint32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 2, 3); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x1_t vget_high_u64(uint64x2_t __p0) { uint64x1_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1); return __ret; } #else __ai uint64x1_t vget_high_u64(uint64x2_t __p0) { uint64x1_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vget_high_u16(uint16x8_t __p0) { uint16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); return __ret; } #else __ai uint16x4_t vget_high_u16(uint16x8_t __p0) { uint16x4_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai uint16x4_t __noswap_vget_high_u16(uint16x8_t __p0) { uint16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vget_high_s8(int8x16_t __p0) { int8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); return __ret; } #else __ai int8x8_t vget_high_s8(int8x16_t __p0) { int8x8_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai int8x8_t __noswap_vget_high_s8(int8x16_t __p0) { int8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vget_high_f32(float32x4_t __p0) { float32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 2, 3); return __ret; } #else __ai float32x2_t vget_high_f32(float32x4_t __p0) { float32x2_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai float32x2_t __noswap_vget_high_f32(float32x4_t __p0) { float32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 2, 3); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vget_high_f16(float16x8_t __p0) { float16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); return __ret; } #else __ai float16x4_t vget_high_f16(float16x8_t __p0) { float16x4_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai float16x4_t __noswap_vget_high_f16(float16x8_t __p0) { float16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vget_high_s32(int32x4_t __p0) { int32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 2, 3); return __ret; } #else __ai int32x2_t vget_high_s32(int32x4_t __p0) { int32x2_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai int32x2_t __noswap_vget_high_s32(int32x4_t __p0) { int32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 2, 3); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x1_t vget_high_s64(int64x2_t __p0) { int64x1_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1); return __ret; } #else __ai int64x1_t vget_high_s64(int64x2_t __p0) { int64x1_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vget_high_s16(int16x8_t __p0) { int16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); return __ret; } #else __ai int16x4_t vget_high_s16(int16x8_t __p0) { int16x4_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int16x4_t __noswap_vget_high_s16(int16x8_t __p0) { int16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vget_lane_p8(__p0, __p1) __extension__ ({ \ poly8_t __ret; \ poly8x8_t __s0 = __p0; \ __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__s0, __p1); \ __ret; \ }) #else #define vget_lane_p8(__p0, __p1) __extension__ ({ \ poly8_t __ret; \ poly8x8_t __s0 = __p0; \ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__rev0, __p1); \ __ret; \ }) #define __noswap_vget_lane_p8(__p0, __p1) __extension__ ({ \ poly8_t __ret; \ poly8x8_t __s0 = __p0; \ __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vget_lane_p16(__p0, __p1) __extension__ ({ \ poly16_t __ret; \ poly16x4_t __s0 = __p0; \ __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__s0, __p1); \ __ret; \ }) #else #define vget_lane_p16(__p0, __p1) __extension__ ({ \ poly16_t __ret; \ poly16x4_t __s0 = __p0; \ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__rev0, __p1); \ __ret; \ }) #define __noswap_vget_lane_p16(__p0, __p1) __extension__ ({ \ poly16_t __ret; \ poly16x4_t __s0 = __p0; \ __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vgetq_lane_p8(__p0, __p1) __extension__ ({ \ poly8_t __ret; \ poly8x16_t __s0 = __p0; \ __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__s0, __p1); \ __ret; \ }) #else #define vgetq_lane_p8(__p0, __p1) __extension__ ({ \ poly8_t __ret; \ poly8x16_t __s0 = __p0; \ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__rev0, __p1); \ __ret; \ }) #define __noswap_vgetq_lane_p8(__p0, __p1) __extension__ ({ \ poly8_t __ret; \ poly8x16_t __s0 = __p0; \ __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vgetq_lane_p16(__p0, __p1) __extension__ ({ \ poly16_t __ret; \ poly16x8_t __s0 = __p0; \ __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__s0, __p1); \ __ret; \ }) #else #define vgetq_lane_p16(__p0, __p1) __extension__ ({ \ poly16_t __ret; \ poly16x8_t __s0 = __p0; \ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__rev0, __p1); \ __ret; \ }) #define __noswap_vgetq_lane_p16(__p0, __p1) __extension__ ({ \ poly16_t __ret; \ poly16x8_t __s0 = __p0; \ __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vgetq_lane_u8(__p0, __p1) __extension__ ({ \ uint8_t __ret; \ uint8x16_t __s0 = __p0; \ __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ __ret; \ }) #else #define vgetq_lane_u8(__p0, __p1) __extension__ ({ \ uint8_t __ret; \ uint8x16_t __s0 = __p0; \ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \ __ret; \ }) #define __noswap_vgetq_lane_u8(__p0, __p1) __extension__ ({ \ uint8_t __ret; \ uint8x16_t __s0 = __p0; \ __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vgetq_lane_u32(__p0, __p1) __extension__ ({ \ uint32_t __ret; \ uint32x4_t __s0 = __p0; \ __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \ __ret; \ }) #else #define vgetq_lane_u32(__p0, __p1) __extension__ ({ \ uint32_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__rev0, __p1); \ __ret; \ }) #define __noswap_vgetq_lane_u32(__p0, __p1) __extension__ ({ \ uint32_t __ret; \ uint32x4_t __s0 = __p0; \ __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vgetq_lane_u64(__p0, __p1) __extension__ ({ \ uint64_t __ret; \ uint64x2_t __s0 = __p0; \ __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \ __ret; \ }) #else #define vgetq_lane_u64(__p0, __p1) __extension__ ({ \ uint64_t __ret; \ uint64x2_t __s0 = __p0; \ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__rev0, __p1); \ __ret; \ }) #define __noswap_vgetq_lane_u64(__p0, __p1) __extension__ ({ \ uint64_t __ret; \ uint64x2_t __s0 = __p0; \ __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vgetq_lane_u16(__p0, __p1) __extension__ ({ \ uint16_t __ret; \ uint16x8_t __s0 = __p0; \ __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \ __ret; \ }) #else #define vgetq_lane_u16(__p0, __p1) __extension__ ({ \ uint16_t __ret; \ uint16x8_t __s0 = __p0; \ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__rev0, __p1); \ __ret; \ }) #define __noswap_vgetq_lane_u16(__p0, __p1) __extension__ ({ \ uint16_t __ret; \ uint16x8_t __s0 = __p0; \ __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vgetq_lane_s8(__p0, __p1) __extension__ ({ \ int8_t __ret; \ int8x16_t __s0 = __p0; \ __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ __ret; \ }) #else #define vgetq_lane_s8(__p0, __p1) __extension__ ({ \ int8_t __ret; \ int8x16_t __s0 = __p0; \ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \ __ret; \ }) #define __noswap_vgetq_lane_s8(__p0, __p1) __extension__ ({ \ int8_t __ret; \ int8x16_t __s0 = __p0; \ __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vgetq_lane_f32(__p0, __p1) __extension__ ({ \ float32_t __ret; \ float32x4_t __s0 = __p0; \ __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__s0, __p1); \ __ret; \ }) #else #define vgetq_lane_f32(__p0, __p1) __extension__ ({ \ float32_t __ret; \ float32x4_t __s0 = __p0; \ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__rev0, __p1); \ __ret; \ }) #define __noswap_vgetq_lane_f32(__p0, __p1) __extension__ ({ \ float32_t __ret; \ float32x4_t __s0 = __p0; \ __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vgetq_lane_s32(__p0, __p1) __extension__ ({ \ int32_t __ret; \ int32x4_t __s0 = __p0; \ __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \ __ret; \ }) #else #define vgetq_lane_s32(__p0, __p1) __extension__ ({ \ int32_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__rev0, __p1); \ __ret; \ }) #define __noswap_vgetq_lane_s32(__p0, __p1) __extension__ ({ \ int32_t __ret; \ int32x4_t __s0 = __p0; \ __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vgetq_lane_s64(__p0, __p1) __extension__ ({ \ int64_t __ret; \ int64x2_t __s0 = __p0; \ __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \ __ret; \ }) #else #define vgetq_lane_s64(__p0, __p1) __extension__ ({ \ int64_t __ret; \ int64x2_t __s0 = __p0; \ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__rev0, __p1); \ __ret; \ }) #define __noswap_vgetq_lane_s64(__p0, __p1) __extension__ ({ \ int64_t __ret; \ int64x2_t __s0 = __p0; \ __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vgetq_lane_s16(__p0, __p1) __extension__ ({ \ int16_t __ret; \ int16x8_t __s0 = __p0; \ __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \ __ret; \ }) #else #define vgetq_lane_s16(__p0, __p1) __extension__ ({ \ int16_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__rev0, __p1); \ __ret; \ }) #define __noswap_vgetq_lane_s16(__p0, __p1) __extension__ ({ \ int16_t __ret; \ int16x8_t __s0 = __p0; \ __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vget_lane_u8(__p0, __p1) __extension__ ({ \ uint8_t __ret; \ uint8x8_t __s0 = __p0; \ __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ __ret; \ }) #else #define vget_lane_u8(__p0, __p1) __extension__ ({ \ uint8_t __ret; \ uint8x8_t __s0 = __p0; \ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \ __ret; \ }) #define __noswap_vget_lane_u8(__p0, __p1) __extension__ ({ \ uint8_t __ret; \ uint8x8_t __s0 = __p0; \ __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vget_lane_u32(__p0, __p1) __extension__ ({ \ uint32_t __ret; \ uint32x2_t __s0 = __p0; \ __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \ __ret; \ }) #else #define vget_lane_u32(__p0, __p1) __extension__ ({ \ uint32_t __ret; \ uint32x2_t __s0 = __p0; \ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__rev0, __p1); \ __ret; \ }) #define __noswap_vget_lane_u32(__p0, __p1) __extension__ ({ \ uint32_t __ret; \ uint32x2_t __s0 = __p0; \ __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \ __ret; \ }) #endif #define vget_lane_u64(__p0, __p1) __extension__ ({ \ uint64_t __ret; \ uint64x1_t __s0 = __p0; \ __ret = (uint64_t) __builtin_neon_vget_lane_i64((int64x1_t)__s0, __p1); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vget_lane_u16(__p0, __p1) __extension__ ({ \ uint16_t __ret; \ uint16x4_t __s0 = __p0; \ __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \ __ret; \ }) #else #define vget_lane_u16(__p0, __p1) __extension__ ({ \ uint16_t __ret; \ uint16x4_t __s0 = __p0; \ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__rev0, __p1); \ __ret; \ }) #define __noswap_vget_lane_u16(__p0, __p1) __extension__ ({ \ uint16_t __ret; \ uint16x4_t __s0 = __p0; \ __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vget_lane_s8(__p0, __p1) __extension__ ({ \ int8_t __ret; \ int8x8_t __s0 = __p0; \ __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ __ret; \ }) #else #define vget_lane_s8(__p0, __p1) __extension__ ({ \ int8_t __ret; \ int8x8_t __s0 = __p0; \ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \ __ret; \ }) #define __noswap_vget_lane_s8(__p0, __p1) __extension__ ({ \ int8_t __ret; \ int8x8_t __s0 = __p0; \ __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vget_lane_f32(__p0, __p1) __extension__ ({ \ float32_t __ret; \ float32x2_t __s0 = __p0; \ __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__s0, __p1); \ __ret; \ }) #else #define vget_lane_f32(__p0, __p1) __extension__ ({ \ float32_t __ret; \ float32x2_t __s0 = __p0; \ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__rev0, __p1); \ __ret; \ }) #define __noswap_vget_lane_f32(__p0, __p1) __extension__ ({ \ float32_t __ret; \ float32x2_t __s0 = __p0; \ __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vget_lane_s32(__p0, __p1) __extension__ ({ \ int32_t __ret; \ int32x2_t __s0 = __p0; \ __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \ __ret; \ }) #else #define vget_lane_s32(__p0, __p1) __extension__ ({ \ int32_t __ret; \ int32x2_t __s0 = __p0; \ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__rev0, __p1); \ __ret; \ }) #define __noswap_vget_lane_s32(__p0, __p1) __extension__ ({ \ int32_t __ret; \ int32x2_t __s0 = __p0; \ __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \ __ret; \ }) #endif #define vget_lane_s64(__p0, __p1) __extension__ ({ \ int64_t __ret; \ int64x1_t __s0 = __p0; \ __ret = (int64_t) __builtin_neon_vget_lane_i64((int64x1_t)__s0, __p1); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vget_lane_s16(__p0, __p1) __extension__ ({ \ int16_t __ret; \ int16x4_t __s0 = __p0; \ __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \ __ret; \ }) #else #define vget_lane_s16(__p0, __p1) __extension__ ({ \ int16_t __ret; \ int16x4_t __s0 = __p0; \ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__rev0, __p1); \ __ret; \ }) #define __noswap_vget_lane_s16(__p0, __p1) __extension__ ({ \ int16_t __ret; \ int16x4_t __s0 = __p0; \ __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vget_low_p8(poly8x16_t __p0) { poly8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7); return __ret; } #else __ai poly8x8_t vget_low_p8(poly8x16_t __p0) { poly8x8_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly16x4_t vget_low_p16(poly16x8_t __p0) { poly16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); return __ret; } #else __ai poly16x4_t vget_low_p16(poly16x8_t __p0) { poly16x4_t __ret; poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vget_low_u8(uint8x16_t __p0) { uint8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7); return __ret; } #else __ai uint8x8_t vget_low_u8(uint8x16_t __p0) { uint8x8_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vget_low_u32(uint32x4_t __p0) { uint32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 0, 1); return __ret; } #else __ai uint32x2_t vget_low_u32(uint32x4_t __p0) { uint32x2_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x1_t vget_low_u64(uint64x2_t __p0) { uint64x1_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 0); return __ret; } #else __ai uint64x1_t vget_low_u64(uint64x2_t __p0) { uint64x1_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vget_low_u16(uint16x8_t __p0) { uint16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); return __ret; } #else __ai uint16x4_t vget_low_u16(uint16x8_t __p0) { uint16x4_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vget_low_s8(int8x16_t __p0) { int8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7); return __ret; } #else __ai int8x8_t vget_low_s8(int8x16_t __p0) { int8x8_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vget_low_f32(float32x4_t __p0) { float32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 0, 1); return __ret; } #else __ai float32x2_t vget_low_f32(float32x4_t __p0) { float32x2_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vget_low_f16(float16x8_t __p0) { float16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); return __ret; } #else __ai float16x4_t vget_low_f16(float16x8_t __p0) { float16x4_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vget_low_s32(int32x4_t __p0) { int32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 0, 1); return __ret; } #else __ai int32x2_t vget_low_s32(int32x4_t __p0) { int32x2_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x1_t vget_low_s64(int64x2_t __p0) { int64x1_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 0); return __ret; } #else __ai int64x1_t vget_low_s64(int64x2_t __p0) { int64x1_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vget_low_s16(int16x8_t __p0) { int16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); return __ret; } #else __ai int16x4_t vget_low_s16(int16x8_t __p0) { int16x4_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else __ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else __ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else __ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else __ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else __ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else __ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else __ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else __ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else __ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else __ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else __ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else __ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else __ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else __ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else __ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else __ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else __ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else __ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else __ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else __ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else __ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else __ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else __ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else __ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vld1_p8(__p0) __extension__ ({ \ poly8x8_t __ret; \ __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \ __ret; \ }) #else #define vld1_p8(__p0) __extension__ ({ \ poly8x8_t __ret; \ __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_p16(__p0) __extension__ ({ \ poly16x4_t __ret; \ __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \ __ret; \ }) #else #define vld1_p16(__p0) __extension__ ({ \ poly16x4_t __ret; \ __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_p8(__p0) __extension__ ({ \ poly8x16_t __ret; \ __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \ __ret; \ }) #else #define vld1q_p8(__p0) __extension__ ({ \ poly8x16_t __ret; \ __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_p16(__p0) __extension__ ({ \ poly16x8_t __ret; \ __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \ __ret; \ }) #else #define vld1q_p16(__p0) __extension__ ({ \ poly16x8_t __ret; \ __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_u8(__p0) __extension__ ({ \ uint8x16_t __ret; \ __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \ __ret; \ }) #else #define vld1q_u8(__p0) __extension__ ({ \ uint8x16_t __ret; \ __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_u32(__p0) __extension__ ({ \ uint32x4_t __ret; \ __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \ __ret; \ }) #else #define vld1q_u32(__p0) __extension__ ({ \ uint32x4_t __ret; \ __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_u64(__p0) __extension__ ({ \ uint64x2_t __ret; \ __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \ __ret; \ }) #else #define vld1q_u64(__p0) __extension__ ({ \ uint64x2_t __ret; \ __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_u16(__p0) __extension__ ({ \ uint16x8_t __ret; \ __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \ __ret; \ }) #else #define vld1q_u16(__p0) __extension__ ({ \ uint16x8_t __ret; \ __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_s8(__p0) __extension__ ({ \ int8x16_t __ret; \ __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \ __ret; \ }) #else #define vld1q_s8(__p0) __extension__ ({ \ int8x16_t __ret; \ __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_f32(__p0) __extension__ ({ \ float32x4_t __ret; \ __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \ __ret; \ }) #else #define vld1q_f32(__p0) __extension__ ({ \ float32x4_t __ret; \ __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_s32(__p0) __extension__ ({ \ int32x4_t __ret; \ __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \ __ret; \ }) #else #define vld1q_s32(__p0) __extension__ ({ \ int32x4_t __ret; \ __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_s64(__p0) __extension__ ({ \ int64x2_t __ret; \ __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \ __ret; \ }) #else #define vld1q_s64(__p0) __extension__ ({ \ int64x2_t __ret; \ __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_s16(__p0) __extension__ ({ \ int16x8_t __ret; \ __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \ __ret; \ }) #else #define vld1q_s16(__p0) __extension__ ({ \ int16x8_t __ret; \ __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_u8(__p0) __extension__ ({ \ uint8x8_t __ret; \ __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \ __ret; \ }) #else #define vld1_u8(__p0) __extension__ ({ \ uint8x8_t __ret; \ __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_u32(__p0) __extension__ ({ \ uint32x2_t __ret; \ __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \ __ret; \ }) #else #define vld1_u32(__p0) __extension__ ({ \ uint32x2_t __ret; \ __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #define vld1_u64(__p0) __extension__ ({ \ uint64x1_t __ret; \ __ret = (uint64x1_t) __builtin_neon_vld1_v(__p0, 19); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld1_u16(__p0) __extension__ ({ \ uint16x4_t __ret; \ __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \ __ret; \ }) #else #define vld1_u16(__p0) __extension__ ({ \ uint16x4_t __ret; \ __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_s8(__p0) __extension__ ({ \ int8x8_t __ret; \ __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \ __ret; \ }) #else #define vld1_s8(__p0) __extension__ ({ \ int8x8_t __ret; \ __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_f32(__p0) __extension__ ({ \ float32x2_t __ret; \ __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \ __ret; \ }) #else #define vld1_f32(__p0) __extension__ ({ \ float32x2_t __ret; \ __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_s32(__p0) __extension__ ({ \ int32x2_t __ret; \ __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \ __ret; \ }) #else #define vld1_s32(__p0) __extension__ ({ \ int32x2_t __ret; \ __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #define vld1_s64(__p0) __extension__ ({ \ int64x1_t __ret; \ __ret = (int64x1_t) __builtin_neon_vld1_v(__p0, 3); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld1_s16(__p0) __extension__ ({ \ int16x4_t __ret; \ __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \ __ret; \ }) #else #define vld1_s16(__p0) __extension__ ({ \ int16x4_t __ret; \ __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_dup_p8(__p0) __extension__ ({ \ poly8x8_t __ret; \ __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \ __ret; \ }) #else #define vld1_dup_p8(__p0) __extension__ ({ \ poly8x8_t __ret; \ __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_dup_p16(__p0) __extension__ ({ \ poly16x4_t __ret; \ __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \ __ret; \ }) #else #define vld1_dup_p16(__p0) __extension__ ({ \ poly16x4_t __ret; \ __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_dup_p8(__p0) __extension__ ({ \ poly8x16_t __ret; \ __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \ __ret; \ }) #else #define vld1q_dup_p8(__p0) __extension__ ({ \ poly8x16_t __ret; \ __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_dup_p16(__p0) __extension__ ({ \ poly16x8_t __ret; \ __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \ __ret; \ }) #else #define vld1q_dup_p16(__p0) __extension__ ({ \ poly16x8_t __ret; \ __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_dup_u8(__p0) __extension__ ({ \ uint8x16_t __ret; \ __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \ __ret; \ }) #else #define vld1q_dup_u8(__p0) __extension__ ({ \ uint8x16_t __ret; \ __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_dup_u32(__p0) __extension__ ({ \ uint32x4_t __ret; \ __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \ __ret; \ }) #else #define vld1q_dup_u32(__p0) __extension__ ({ \ uint32x4_t __ret; \ __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_dup_u64(__p0) __extension__ ({ \ uint64x2_t __ret; \ __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \ __ret; \ }) #else #define vld1q_dup_u64(__p0) __extension__ ({ \ uint64x2_t __ret; \ __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_dup_u16(__p0) __extension__ ({ \ uint16x8_t __ret; \ __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \ __ret; \ }) #else #define vld1q_dup_u16(__p0) __extension__ ({ \ uint16x8_t __ret; \ __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_dup_s8(__p0) __extension__ ({ \ int8x16_t __ret; \ __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \ __ret; \ }) #else #define vld1q_dup_s8(__p0) __extension__ ({ \ int8x16_t __ret; \ __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_dup_f32(__p0) __extension__ ({ \ float32x4_t __ret; \ __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \ __ret; \ }) #else #define vld1q_dup_f32(__p0) __extension__ ({ \ float32x4_t __ret; \ __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_dup_s32(__p0) __extension__ ({ \ int32x4_t __ret; \ __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \ __ret; \ }) #else #define vld1q_dup_s32(__p0) __extension__ ({ \ int32x4_t __ret; \ __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_dup_s64(__p0) __extension__ ({ \ int64x2_t __ret; \ __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \ __ret; \ }) #else #define vld1q_dup_s64(__p0) __extension__ ({ \ int64x2_t __ret; \ __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_dup_s16(__p0) __extension__ ({ \ int16x8_t __ret; \ __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \ __ret; \ }) #else #define vld1q_dup_s16(__p0) __extension__ ({ \ int16x8_t __ret; \ __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_dup_u8(__p0) __extension__ ({ \ uint8x8_t __ret; \ __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \ __ret; \ }) #else #define vld1_dup_u8(__p0) __extension__ ({ \ uint8x8_t __ret; \ __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_dup_u32(__p0) __extension__ ({ \ uint32x2_t __ret; \ __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \ __ret; \ }) #else #define vld1_dup_u32(__p0) __extension__ ({ \ uint32x2_t __ret; \ __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #define vld1_dup_u64(__p0) __extension__ ({ \ uint64x1_t __ret; \ __ret = (uint64x1_t) __builtin_neon_vld1_dup_v(__p0, 19); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld1_dup_u16(__p0) __extension__ ({ \ uint16x4_t __ret; \ __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \ __ret; \ }) #else #define vld1_dup_u16(__p0) __extension__ ({ \ uint16x4_t __ret; \ __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_dup_s8(__p0) __extension__ ({ \ int8x8_t __ret; \ __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \ __ret; \ }) #else #define vld1_dup_s8(__p0) __extension__ ({ \ int8x8_t __ret; \ __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_dup_f32(__p0) __extension__ ({ \ float32x2_t __ret; \ __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \ __ret; \ }) #else #define vld1_dup_f32(__p0) __extension__ ({ \ float32x2_t __ret; \ __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_dup_s32(__p0) __extension__ ({ \ int32x2_t __ret; \ __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \ __ret; \ }) #else #define vld1_dup_s32(__p0) __extension__ ({ \ int32x2_t __ret; \ __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #define vld1_dup_s64(__p0) __extension__ ({ \ int64x1_t __ret; \ __ret = (int64x1_t) __builtin_neon_vld1_dup_v(__p0, 3); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld1_dup_s16(__p0) __extension__ ({ \ int16x4_t __ret; \ __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \ __ret; \ }) #else #define vld1_dup_s16(__p0) __extension__ ({ \ int16x4_t __ret; \ __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x8_t __ret; \ poly8x8_t __s1 = __p1; \ __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \ __ret; \ }) #else #define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x8_t __ret; \ poly8x8_t __s1 = __p1; \ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x4_t __ret; \ poly16x4_t __s1 = __p1; \ __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \ __ret; \ }) #else #define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x4_t __ret; \ poly16x4_t __s1 = __p1; \ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x16_t __ret; \ poly8x16_t __s1 = __p1; \ __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \ __ret; \ }) #else #define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x16_t __ret; \ poly8x16_t __s1 = __p1; \ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x8_t __ret; \ poly16x8_t __s1 = __p1; \ __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \ __ret; \ }) #else #define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x8_t __ret; \ poly16x8_t __s1 = __p1; \ poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x16_t __ret; \ uint8x16_t __s1 = __p1; \ __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \ __ret; \ }) #else #define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x16_t __ret; \ uint8x16_t __s1 = __p1; \ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s1 = __p1; \ __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \ __ret; \ }) #else #define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s1 = __p1; \ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2_t __ret; \ uint64x2_t __s1 = __p1; \ __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \ __ret; \ }) #else #define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2_t __ret; \ uint64x2_t __s1 = __p1; \ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x8_t __ret; \ uint16x8_t __s1 = __p1; \ __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \ __ret; \ }) #else #define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x8_t __ret; \ uint16x8_t __s1 = __p1; \ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x16_t __ret; \ int8x16_t __s1 = __p1; \ __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \ __ret; \ }) #else #define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x16_t __ret; \ int8x16_t __s1 = __p1; \ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x4_t __ret; \ float32x4_t __s1 = __p1; \ __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \ __ret; \ }) #else #define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x4_t __ret; \ float32x4_t __s1 = __p1; \ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s1 = __p1; \ __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \ __ret; \ }) #else #define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s1 = __p1; \ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ int64x2_t __ret; \ int64x2_t __s1 = __p1; \ __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \ __ret; \ }) #else #define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ int64x2_t __ret; \ int64x2_t __s1 = __p1; \ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s1 = __p1; \ __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \ __ret; \ }) #else #define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s1 = __p1; \ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x8_t __ret; \ uint8x8_t __s1 = __p1; \ __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \ __ret; \ }) #else #define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x8_t __ret; \ uint8x8_t __s1 = __p1; \ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x2_t __ret; \ uint32x2_t __s1 = __p1; \ __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \ __ret; \ }) #else #define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x2_t __ret; \ uint32x2_t __s1 = __p1; \ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #define vld1_lane_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x1_t __ret; \ uint64x1_t __s1 = __p1; \ __ret = (uint64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x4_t __ret; \ uint16x4_t __s1 = __p1; \ __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \ __ret; \ }) #else #define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x4_t __ret; \ uint16x4_t __s1 = __p1; \ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x8_t __ret; \ int8x8_t __s1 = __p1; \ __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \ __ret; \ }) #else #define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x8_t __ret; \ int8x8_t __s1 = __p1; \ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x2_t __ret; \ float32x2_t __s1 = __p1; \ __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \ __ret; \ }) #else #define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x2_t __ret; \ float32x2_t __s1 = __p1; \ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s1 = __p1; \ __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \ __ret; \ }) #else #define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s1 = __p1; \ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #define vld1_lane_s64(__p0, __p1, __p2) __extension__ ({ \ int64x1_t __ret; \ int64x1_t __s1 = __p1; \ __ret = (int64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s1 = __p1; \ __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \ __ret; \ }) #else #define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s1 = __p1; \ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_p8_x2(__p0) __extension__ ({ \ poly8x8x2_t __ret; \ __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \ __ret; \ }) #else #define vld1_p8_x2(__p0) __extension__ ({ \ poly8x8x2_t __ret; \ __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_p16_x2(__p0) __extension__ ({ \ poly16x4x2_t __ret; \ __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \ __ret; \ }) #else #define vld1_p16_x2(__p0) __extension__ ({ \ poly16x4x2_t __ret; \ __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_p8_x2(__p0) __extension__ ({ \ poly8x16x2_t __ret; \ __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \ __ret; \ }) #else #define vld1q_p8_x2(__p0) __extension__ ({ \ poly8x16x2_t __ret; \ __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_p16_x2(__p0) __extension__ ({ \ poly16x8x2_t __ret; \ __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \ __ret; \ }) #else #define vld1q_p16_x2(__p0) __extension__ ({ \ poly16x8x2_t __ret; \ __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_u8_x2(__p0) __extension__ ({ \ uint8x16x2_t __ret; \ __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \ __ret; \ }) #else #define vld1q_u8_x2(__p0) __extension__ ({ \ uint8x16x2_t __ret; \ __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_u32_x2(__p0) __extension__ ({ \ uint32x4x2_t __ret; \ __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \ __ret; \ }) #else #define vld1q_u32_x2(__p0) __extension__ ({ \ uint32x4x2_t __ret; \ __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_u64_x2(__p0) __extension__ ({ \ uint64x2x2_t __ret; \ __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \ __ret; \ }) #else #define vld1q_u64_x2(__p0) __extension__ ({ \ uint64x2x2_t __ret; \ __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_u16_x2(__p0) __extension__ ({ \ uint16x8x2_t __ret; \ __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \ __ret; \ }) #else #define vld1q_u16_x2(__p0) __extension__ ({ \ uint16x8x2_t __ret; \ __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_s8_x2(__p0) __extension__ ({ \ int8x16x2_t __ret; \ __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \ __ret; \ }) #else #define vld1q_s8_x2(__p0) __extension__ ({ \ int8x16x2_t __ret; \ __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_f32_x2(__p0) __extension__ ({ \ float32x4x2_t __ret; \ __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \ __ret; \ }) #else #define vld1q_f32_x2(__p0) __extension__ ({ \ float32x4x2_t __ret; \ __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_s32_x2(__p0) __extension__ ({ \ int32x4x2_t __ret; \ __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \ __ret; \ }) #else #define vld1q_s32_x2(__p0) __extension__ ({ \ int32x4x2_t __ret; \ __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_s64_x2(__p0) __extension__ ({ \ int64x2x2_t __ret; \ __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \ __ret; \ }) #else #define vld1q_s64_x2(__p0) __extension__ ({ \ int64x2x2_t __ret; \ __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_s16_x2(__p0) __extension__ ({ \ int16x8x2_t __ret; \ __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \ __ret; \ }) #else #define vld1q_s16_x2(__p0) __extension__ ({ \ int16x8x2_t __ret; \ __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_u8_x2(__p0) __extension__ ({ \ uint8x8x2_t __ret; \ __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \ __ret; \ }) #else #define vld1_u8_x2(__p0) __extension__ ({ \ uint8x8x2_t __ret; \ __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_u32_x2(__p0) __extension__ ({ \ uint32x2x2_t __ret; \ __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \ __ret; \ }) #else #define vld1_u32_x2(__p0) __extension__ ({ \ uint32x2x2_t __ret; \ __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret; \ }) #endif #define vld1_u64_x2(__p0) __extension__ ({ \ uint64x1x2_t __ret; \ __builtin_neon_vld1_x2_v(&__ret, __p0, 19); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld1_u16_x2(__p0) __extension__ ({ \ uint16x4x2_t __ret; \ __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \ __ret; \ }) #else #define vld1_u16_x2(__p0) __extension__ ({ \ uint16x4x2_t __ret; \ __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_s8_x2(__p0) __extension__ ({ \ int8x8x2_t __ret; \ __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \ __ret; \ }) #else #define vld1_s8_x2(__p0) __extension__ ({ \ int8x8x2_t __ret; \ __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_f32_x2(__p0) __extension__ ({ \ float32x2x2_t __ret; \ __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \ __ret; \ }) #else #define vld1_f32_x2(__p0) __extension__ ({ \ float32x2x2_t __ret; \ __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_s32_x2(__p0) __extension__ ({ \ int32x2x2_t __ret; \ __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \ __ret; \ }) #else #define vld1_s32_x2(__p0) __extension__ ({ \ int32x2x2_t __ret; \ __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret; \ }) #endif #define vld1_s64_x2(__p0) __extension__ ({ \ int64x1x2_t __ret; \ __builtin_neon_vld1_x2_v(&__ret, __p0, 3); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld1_s16_x2(__p0) __extension__ ({ \ int16x4x2_t __ret; \ __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \ __ret; \ }) #else #define vld1_s16_x2(__p0) __extension__ ({ \ int16x4x2_t __ret; \ __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_p8_x3(__p0) __extension__ ({ \ poly8x8x3_t __ret; \ __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \ __ret; \ }) #else #define vld1_p8_x3(__p0) __extension__ ({ \ poly8x8x3_t __ret; \ __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_p16_x3(__p0) __extension__ ({ \ poly16x4x3_t __ret; \ __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \ __ret; \ }) #else #define vld1_p16_x3(__p0) __extension__ ({ \ poly16x4x3_t __ret; \ __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_p8_x3(__p0) __extension__ ({ \ poly8x16x3_t __ret; \ __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \ __ret; \ }) #else #define vld1q_p8_x3(__p0) __extension__ ({ \ poly8x16x3_t __ret; \ __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_p16_x3(__p0) __extension__ ({ \ poly16x8x3_t __ret; \ __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \ __ret; \ }) #else #define vld1q_p16_x3(__p0) __extension__ ({ \ poly16x8x3_t __ret; \ __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_u8_x3(__p0) __extension__ ({ \ uint8x16x3_t __ret; \ __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \ __ret; \ }) #else #define vld1q_u8_x3(__p0) __extension__ ({ \ uint8x16x3_t __ret; \ __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_u32_x3(__p0) __extension__ ({ \ uint32x4x3_t __ret; \ __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \ __ret; \ }) #else #define vld1q_u32_x3(__p0) __extension__ ({ \ uint32x4x3_t __ret; \ __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_u64_x3(__p0) __extension__ ({ \ uint64x2x3_t __ret; \ __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \ __ret; \ }) #else #define vld1q_u64_x3(__p0) __extension__ ({ \ uint64x2x3_t __ret; \ __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_u16_x3(__p0) __extension__ ({ \ uint16x8x3_t __ret; \ __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \ __ret; \ }) #else #define vld1q_u16_x3(__p0) __extension__ ({ \ uint16x8x3_t __ret; \ __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_s8_x3(__p0) __extension__ ({ \ int8x16x3_t __ret; \ __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \ __ret; \ }) #else #define vld1q_s8_x3(__p0) __extension__ ({ \ int8x16x3_t __ret; \ __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_f32_x3(__p0) __extension__ ({ \ float32x4x3_t __ret; \ __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \ __ret; \ }) #else #define vld1q_f32_x3(__p0) __extension__ ({ \ float32x4x3_t __ret; \ __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_s32_x3(__p0) __extension__ ({ \ int32x4x3_t __ret; \ __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \ __ret; \ }) #else #define vld1q_s32_x3(__p0) __extension__ ({ \ int32x4x3_t __ret; \ __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_s64_x3(__p0) __extension__ ({ \ int64x2x3_t __ret; \ __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \ __ret; \ }) #else #define vld1q_s64_x3(__p0) __extension__ ({ \ int64x2x3_t __ret; \ __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_s16_x3(__p0) __extension__ ({ \ int16x8x3_t __ret; \ __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \ __ret; \ }) #else #define vld1q_s16_x3(__p0) __extension__ ({ \ int16x8x3_t __ret; \ __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_u8_x3(__p0) __extension__ ({ \ uint8x8x3_t __ret; \ __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \ __ret; \ }) #else #define vld1_u8_x3(__p0) __extension__ ({ \ uint8x8x3_t __ret; \ __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_u32_x3(__p0) __extension__ ({ \ uint32x2x3_t __ret; \ __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \ __ret; \ }) #else #define vld1_u32_x3(__p0) __extension__ ({ \ uint32x2x3_t __ret; \ __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret; \ }) #endif #define vld1_u64_x3(__p0) __extension__ ({ \ uint64x1x3_t __ret; \ __builtin_neon_vld1_x3_v(&__ret, __p0, 19); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld1_u16_x3(__p0) __extension__ ({ \ uint16x4x3_t __ret; \ __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \ __ret; \ }) #else #define vld1_u16_x3(__p0) __extension__ ({ \ uint16x4x3_t __ret; \ __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_s8_x3(__p0) __extension__ ({ \ int8x8x3_t __ret; \ __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \ __ret; \ }) #else #define vld1_s8_x3(__p0) __extension__ ({ \ int8x8x3_t __ret; \ __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_f32_x3(__p0) __extension__ ({ \ float32x2x3_t __ret; \ __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \ __ret; \ }) #else #define vld1_f32_x3(__p0) __extension__ ({ \ float32x2x3_t __ret; \ __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_s32_x3(__p0) __extension__ ({ \ int32x2x3_t __ret; \ __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \ __ret; \ }) #else #define vld1_s32_x3(__p0) __extension__ ({ \ int32x2x3_t __ret; \ __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret; \ }) #endif #define vld1_s64_x3(__p0) __extension__ ({ \ int64x1x3_t __ret; \ __builtin_neon_vld1_x3_v(&__ret, __p0, 3); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld1_s16_x3(__p0) __extension__ ({ \ int16x4x3_t __ret; \ __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \ __ret; \ }) #else #define vld1_s16_x3(__p0) __extension__ ({ \ int16x4x3_t __ret; \ __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_p8_x4(__p0) __extension__ ({ \ poly8x8x4_t __ret; \ __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \ __ret; \ }) #else #define vld1_p8_x4(__p0) __extension__ ({ \ poly8x8x4_t __ret; \ __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_p16_x4(__p0) __extension__ ({ \ poly16x4x4_t __ret; \ __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \ __ret; \ }) #else #define vld1_p16_x4(__p0) __extension__ ({ \ poly16x4x4_t __ret; \ __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_p8_x4(__p0) __extension__ ({ \ poly8x16x4_t __ret; \ __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \ __ret; \ }) #else #define vld1q_p8_x4(__p0) __extension__ ({ \ poly8x16x4_t __ret; \ __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_p16_x4(__p0) __extension__ ({ \ poly16x8x4_t __ret; \ __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \ __ret; \ }) #else #define vld1q_p16_x4(__p0) __extension__ ({ \ poly16x8x4_t __ret; \ __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_u8_x4(__p0) __extension__ ({ \ uint8x16x4_t __ret; \ __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \ __ret; \ }) #else #define vld1q_u8_x4(__p0) __extension__ ({ \ uint8x16x4_t __ret; \ __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_u32_x4(__p0) __extension__ ({ \ uint32x4x4_t __ret; \ __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \ __ret; \ }) #else #define vld1q_u32_x4(__p0) __extension__ ({ \ uint32x4x4_t __ret; \ __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_u64_x4(__p0) __extension__ ({ \ uint64x2x4_t __ret; \ __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \ __ret; \ }) #else #define vld1q_u64_x4(__p0) __extension__ ({ \ uint64x2x4_t __ret; \ __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_u16_x4(__p0) __extension__ ({ \ uint16x8x4_t __ret; \ __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \ __ret; \ }) #else #define vld1q_u16_x4(__p0) __extension__ ({ \ uint16x8x4_t __ret; \ __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_s8_x4(__p0) __extension__ ({ \ int8x16x4_t __ret; \ __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \ __ret; \ }) #else #define vld1q_s8_x4(__p0) __extension__ ({ \ int8x16x4_t __ret; \ __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_f32_x4(__p0) __extension__ ({ \ float32x4x4_t __ret; \ __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \ __ret; \ }) #else #define vld1q_f32_x4(__p0) __extension__ ({ \ float32x4x4_t __ret; \ __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_s32_x4(__p0) __extension__ ({ \ int32x4x4_t __ret; \ __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \ __ret; \ }) #else #define vld1q_s32_x4(__p0) __extension__ ({ \ int32x4x4_t __ret; \ __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_s64_x4(__p0) __extension__ ({ \ int64x2x4_t __ret; \ __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \ __ret; \ }) #else #define vld1q_s64_x4(__p0) __extension__ ({ \ int64x2x4_t __ret; \ __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_s16_x4(__p0) __extension__ ({ \ int16x8x4_t __ret; \ __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \ __ret; \ }) #else #define vld1q_s16_x4(__p0) __extension__ ({ \ int16x8x4_t __ret; \ __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_u8_x4(__p0) __extension__ ({ \ uint8x8x4_t __ret; \ __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \ __ret; \ }) #else #define vld1_u8_x4(__p0) __extension__ ({ \ uint8x8x4_t __ret; \ __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_u32_x4(__p0) __extension__ ({ \ uint32x2x4_t __ret; \ __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \ __ret; \ }) #else #define vld1_u32_x4(__p0) __extension__ ({ \ uint32x2x4_t __ret; \ __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif #define vld1_u64_x4(__p0) __extension__ ({ \ uint64x1x4_t __ret; \ __builtin_neon_vld1_x4_v(&__ret, __p0, 19); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld1_u16_x4(__p0) __extension__ ({ \ uint16x4x4_t __ret; \ __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \ __ret; \ }) #else #define vld1_u16_x4(__p0) __extension__ ({ \ uint16x4x4_t __ret; \ __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_s8_x4(__p0) __extension__ ({ \ int8x8x4_t __ret; \ __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \ __ret; \ }) #else #define vld1_s8_x4(__p0) __extension__ ({ \ int8x8x4_t __ret; \ __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_f32_x4(__p0) __extension__ ({ \ float32x2x4_t __ret; \ __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \ __ret; \ }) #else #define vld1_f32_x4(__p0) __extension__ ({ \ float32x2x4_t __ret; \ __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_s32_x4(__p0) __extension__ ({ \ int32x2x4_t __ret; \ __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \ __ret; \ }) #else #define vld1_s32_x4(__p0) __extension__ ({ \ int32x2x4_t __ret; \ __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif #define vld1_s64_x4(__p0) __extension__ ({ \ int64x1x4_t __ret; \ __builtin_neon_vld1_x4_v(&__ret, __p0, 3); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld1_s16_x4(__p0) __extension__ ({ \ int16x4x4_t __ret; \ __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \ __ret; \ }) #else #define vld1_s16_x4(__p0) __extension__ ({ \ int16x4x4_t __ret; \ __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2_p8(__p0) __extension__ ({ \ poly8x8x2_t __ret; \ __builtin_neon_vld2_v(&__ret, __p0, 4); \ __ret; \ }) #else #define vld2_p8(__p0) __extension__ ({ \ poly8x8x2_t __ret; \ __builtin_neon_vld2_v(&__ret, __p0, 4); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2_p16(__p0) __extension__ ({ \ poly16x4x2_t __ret; \ __builtin_neon_vld2_v(&__ret, __p0, 5); \ __ret; \ }) #else #define vld2_p16(__p0) __extension__ ({ \ poly16x4x2_t __ret; \ __builtin_neon_vld2_v(&__ret, __p0, 5); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_p8(__p0) __extension__ ({ \ poly8x16x2_t __ret; \ __builtin_neon_vld2q_v(&__ret, __p0, 36); \ __ret; \ }) #else #define vld2q_p8(__p0) __extension__ ({ \ poly8x16x2_t __ret; \ __builtin_neon_vld2q_v(&__ret, __p0, 36); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_p16(__p0) __extension__ ({ \ poly16x8x2_t __ret; \ __builtin_neon_vld2q_v(&__ret, __p0, 37); \ __ret; \ }) #else #define vld2q_p16(__p0) __extension__ ({ \ poly16x8x2_t __ret; \ __builtin_neon_vld2q_v(&__ret, __p0, 37); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_u8(__p0) __extension__ ({ \ uint8x16x2_t __ret; \ __builtin_neon_vld2q_v(&__ret, __p0, 48); \ __ret; \ }) #else #define vld2q_u8(__p0) __extension__ ({ \ uint8x16x2_t __ret; \ __builtin_neon_vld2q_v(&__ret, __p0, 48); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_u32(__p0) __extension__ ({ \ uint32x4x2_t __ret; \ __builtin_neon_vld2q_v(&__ret, __p0, 50); \ __ret; \ }) #else #define vld2q_u32(__p0) __extension__ ({ \ uint32x4x2_t __ret; \ __builtin_neon_vld2q_v(&__ret, __p0, 50); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_u16(__p0) __extension__ ({ \ uint16x8x2_t __ret; \ __builtin_neon_vld2q_v(&__ret, __p0, 49); \ __ret; \ }) #else #define vld2q_u16(__p0) __extension__ ({ \ uint16x8x2_t __ret; \ __builtin_neon_vld2q_v(&__ret, __p0, 49); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_s8(__p0) __extension__ ({ \ int8x16x2_t __ret; \ __builtin_neon_vld2q_v(&__ret, __p0, 32); \ __ret; \ }) #else #define vld2q_s8(__p0) __extension__ ({ \ int8x16x2_t __ret; \ __builtin_neon_vld2q_v(&__ret, __p0, 32); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_f32(__p0) __extension__ ({ \ float32x4x2_t __ret; \ __builtin_neon_vld2q_v(&__ret, __p0, 41); \ __ret; \ }) #else #define vld2q_f32(__p0) __extension__ ({ \ float32x4x2_t __ret; \ __builtin_neon_vld2q_v(&__ret, __p0, 41); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_s32(__p0) __extension__ ({ \ int32x4x2_t __ret; \ __builtin_neon_vld2q_v(&__ret, __p0, 34); \ __ret; \ }) #else #define vld2q_s32(__p0) __extension__ ({ \ int32x4x2_t __ret; \ __builtin_neon_vld2q_v(&__ret, __p0, 34); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_s16(__p0) __extension__ ({ \ int16x8x2_t __ret; \ __builtin_neon_vld2q_v(&__ret, __p0, 33); \ __ret; \ }) #else #define vld2q_s16(__p0) __extension__ ({ \ int16x8x2_t __ret; \ __builtin_neon_vld2q_v(&__ret, __p0, 33); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2_u8(__p0) __extension__ ({ \ uint8x8x2_t __ret; \ __builtin_neon_vld2_v(&__ret, __p0, 16); \ __ret; \ }) #else #define vld2_u8(__p0) __extension__ ({ \ uint8x8x2_t __ret; \ __builtin_neon_vld2_v(&__ret, __p0, 16); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2_u32(__p0) __extension__ ({ \ uint32x2x2_t __ret; \ __builtin_neon_vld2_v(&__ret, __p0, 18); \ __ret; \ }) #else #define vld2_u32(__p0) __extension__ ({ \ uint32x2x2_t __ret; \ __builtin_neon_vld2_v(&__ret, __p0, 18); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret; \ }) #endif #define vld2_u64(__p0) __extension__ ({ \ uint64x1x2_t __ret; \ __builtin_neon_vld2_v(&__ret, __p0, 19); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld2_u16(__p0) __extension__ ({ \ uint16x4x2_t __ret; \ __builtin_neon_vld2_v(&__ret, __p0, 17); \ __ret; \ }) #else #define vld2_u16(__p0) __extension__ ({ \ uint16x4x2_t __ret; \ __builtin_neon_vld2_v(&__ret, __p0, 17); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2_s8(__p0) __extension__ ({ \ int8x8x2_t __ret; \ __builtin_neon_vld2_v(&__ret, __p0, 0); \ __ret; \ }) #else #define vld2_s8(__p0) __extension__ ({ \ int8x8x2_t __ret; \ __builtin_neon_vld2_v(&__ret, __p0, 0); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2_f32(__p0) __extension__ ({ \ float32x2x2_t __ret; \ __builtin_neon_vld2_v(&__ret, __p0, 9); \ __ret; \ }) #else #define vld2_f32(__p0) __extension__ ({ \ float32x2x2_t __ret; \ __builtin_neon_vld2_v(&__ret, __p0, 9); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2_s32(__p0) __extension__ ({ \ int32x2x2_t __ret; \ __builtin_neon_vld2_v(&__ret, __p0, 2); \ __ret; \ }) #else #define vld2_s32(__p0) __extension__ ({ \ int32x2x2_t __ret; \ __builtin_neon_vld2_v(&__ret, __p0, 2); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret; \ }) #endif #define vld2_s64(__p0) __extension__ ({ \ int64x1x2_t __ret; \ __builtin_neon_vld2_v(&__ret, __p0, 3); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld2_s16(__p0) __extension__ ({ \ int16x4x2_t __ret; \ __builtin_neon_vld2_v(&__ret, __p0, 1); \ __ret; \ }) #else #define vld2_s16(__p0) __extension__ ({ \ int16x4x2_t __ret; \ __builtin_neon_vld2_v(&__ret, __p0, 1); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2_dup_p8(__p0) __extension__ ({ \ poly8x8x2_t __ret; \ __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \ __ret; \ }) #else #define vld2_dup_p8(__p0) __extension__ ({ \ poly8x8x2_t __ret; \ __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2_dup_p16(__p0) __extension__ ({ \ poly16x4x2_t __ret; \ __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \ __ret; \ }) #else #define vld2_dup_p16(__p0) __extension__ ({ \ poly16x4x2_t __ret; \ __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_dup_p8(__p0) __extension__ ({ \ poly8x16x2_t __ret; \ __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \ __ret; \ }) #else #define vld2q_dup_p8(__p0) __extension__ ({ \ poly8x16x2_t __ret; \ __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_dup_p16(__p0) __extension__ ({ \ poly16x8x2_t __ret; \ __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \ __ret; \ }) #else #define vld2q_dup_p16(__p0) __extension__ ({ \ poly16x8x2_t __ret; \ __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_dup_u8(__p0) __extension__ ({ \ uint8x16x2_t __ret; \ __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \ __ret; \ }) #else #define vld2q_dup_u8(__p0) __extension__ ({ \ uint8x16x2_t __ret; \ __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_dup_u32(__p0) __extension__ ({ \ uint32x4x2_t __ret; \ __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \ __ret; \ }) #else #define vld2q_dup_u32(__p0) __extension__ ({ \ uint32x4x2_t __ret; \ __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_dup_u64(__p0) __extension__ ({ \ uint64x2x2_t __ret; \ __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \ __ret; \ }) #else #define vld2q_dup_u64(__p0) __extension__ ({ \ uint64x2x2_t __ret; \ __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_dup_u16(__p0) __extension__ ({ \ uint16x8x2_t __ret; \ __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \ __ret; \ }) #else #define vld2q_dup_u16(__p0) __extension__ ({ \ uint16x8x2_t __ret; \ __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_dup_s8(__p0) __extension__ ({ \ int8x16x2_t __ret; \ __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \ __ret; \ }) #else #define vld2q_dup_s8(__p0) __extension__ ({ \ int8x16x2_t __ret; \ __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_dup_f32(__p0) __extension__ ({ \ float32x4x2_t __ret; \ __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \ __ret; \ }) #else #define vld2q_dup_f32(__p0) __extension__ ({ \ float32x4x2_t __ret; \ __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_dup_s32(__p0) __extension__ ({ \ int32x4x2_t __ret; \ __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \ __ret; \ }) #else #define vld2q_dup_s32(__p0) __extension__ ({ \ int32x4x2_t __ret; \ __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_dup_s64(__p0) __extension__ ({ \ int64x2x2_t __ret; \ __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \ __ret; \ }) #else #define vld2q_dup_s64(__p0) __extension__ ({ \ int64x2x2_t __ret; \ __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_dup_s16(__p0) __extension__ ({ \ int16x8x2_t __ret; \ __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \ __ret; \ }) #else #define vld2q_dup_s16(__p0) __extension__ ({ \ int16x8x2_t __ret; \ __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2_dup_u8(__p0) __extension__ ({ \ uint8x8x2_t __ret; \ __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \ __ret; \ }) #else #define vld2_dup_u8(__p0) __extension__ ({ \ uint8x8x2_t __ret; \ __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2_dup_u32(__p0) __extension__ ({ \ uint32x2x2_t __ret; \ __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \ __ret; \ }) #else #define vld2_dup_u32(__p0) __extension__ ({ \ uint32x2x2_t __ret; \ __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret; \ }) #endif #define vld2_dup_u64(__p0) __extension__ ({ \ uint64x1x2_t __ret; \ __builtin_neon_vld2_dup_v(&__ret, __p0, 19); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld2_dup_u16(__p0) __extension__ ({ \ uint16x4x2_t __ret; \ __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \ __ret; \ }) #else #define vld2_dup_u16(__p0) __extension__ ({ \ uint16x4x2_t __ret; \ __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2_dup_s8(__p0) __extension__ ({ \ int8x8x2_t __ret; \ __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \ __ret; \ }) #else #define vld2_dup_s8(__p0) __extension__ ({ \ int8x8x2_t __ret; \ __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2_dup_f32(__p0) __extension__ ({ \ float32x2x2_t __ret; \ __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \ __ret; \ }) #else #define vld2_dup_f32(__p0) __extension__ ({ \ float32x2x2_t __ret; \ __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2_dup_s32(__p0) __extension__ ({ \ int32x2x2_t __ret; \ __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \ __ret; \ }) #else #define vld2_dup_s32(__p0) __extension__ ({ \ int32x2x2_t __ret; \ __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret; \ }) #endif #define vld2_dup_s64(__p0) __extension__ ({ \ int64x1x2_t __ret; \ __builtin_neon_vld2_dup_v(&__ret, __p0, 3); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld2_dup_s16(__p0) __extension__ ({ \ int16x4x2_t __ret; \ __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \ __ret; \ }) #else #define vld2_dup_s16(__p0) __extension__ ({ \ int16x4x2_t __ret; \ __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x8x2_t __ret; \ poly8x8x2_t __s1 = __p1; \ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \ __ret; \ }) #else #define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x8x2_t __ret; \ poly8x8x2_t __s1 = __p1; \ poly8x8x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x4x2_t __ret; \ poly16x4x2_t __s1 = __p1; \ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \ __ret; \ }) #else #define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x4x2_t __ret; \ poly16x4x2_t __s1 = __p1; \ poly16x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x8x2_t __ret; \ poly16x8x2_t __s1 = __p1; \ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \ __ret; \ }) #else #define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x8x2_t __ret; \ poly16x8x2_t __s1 = __p1; \ poly16x8x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x4x2_t __ret; \ uint32x4x2_t __s1 = __p1; \ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \ __ret; \ }) #else #define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x4x2_t __ret; \ uint32x4x2_t __s1 = __p1; \ uint32x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x8x2_t __ret; \ uint16x8x2_t __s1 = __p1; \ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \ __ret; \ }) #else #define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x8x2_t __ret; \ uint16x8x2_t __s1 = __p1; \ uint16x8x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x4x2_t __ret; \ float32x4x2_t __s1 = __p1; \ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 41); \ __ret; \ }) #else #define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x4x2_t __ret; \ float32x4x2_t __s1 = __p1; \ float32x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 41); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4x2_t __ret; \ int32x4x2_t __s1 = __p1; \ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 34); \ __ret; \ }) #else #define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4x2_t __ret; \ int32x4x2_t __s1 = __p1; \ int32x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 34); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8x2_t __ret; \ int16x8x2_t __s1 = __p1; \ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 33); \ __ret; \ }) #else #define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8x2_t __ret; \ int16x8x2_t __s1 = __p1; \ int16x8x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 33); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x8x2_t __ret; \ uint8x8x2_t __s1 = __p1; \ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \ __ret; \ }) #else #define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x8x2_t __ret; \ uint8x8x2_t __s1 = __p1; \ uint8x8x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x2x2_t __ret; \ uint32x2x2_t __s1 = __p1; \ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \ __ret; \ }) #else #define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x2x2_t __ret; \ uint32x2x2_t __s1 = __p1; \ uint32x2x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x4x2_t __ret; \ uint16x4x2_t __s1 = __p1; \ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \ __ret; \ }) #else #define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x4x2_t __ret; \ uint16x4x2_t __s1 = __p1; \ uint16x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x8x2_t __ret; \ int8x8x2_t __s1 = __p1; \ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \ __ret; \ }) #else #define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x8x2_t __ret; \ int8x8x2_t __s1 = __p1; \ int8x8x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x2x2_t __ret; \ float32x2x2_t __s1 = __p1; \ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 9); \ __ret; \ }) #else #define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x2x2_t __ret; \ float32x2x2_t __s1 = __p1; \ float32x2x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 9); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2x2_t __ret; \ int32x2x2_t __s1 = __p1; \ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 2); \ __ret; \ }) #else #define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2x2_t __ret; \ int32x2x2_t __s1 = __p1; \ int32x2x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 2); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4x2_t __ret; \ int16x4x2_t __s1 = __p1; \ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 1); \ __ret; \ }) #else #define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4x2_t __ret; \ int16x4x2_t __s1 = __p1; \ int16x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 1); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3_p8(__p0) __extension__ ({ \ poly8x8x3_t __ret; \ __builtin_neon_vld3_v(&__ret, __p0, 4); \ __ret; \ }) #else #define vld3_p8(__p0) __extension__ ({ \ poly8x8x3_t __ret; \ __builtin_neon_vld3_v(&__ret, __p0, 4); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3_p16(__p0) __extension__ ({ \ poly16x4x3_t __ret; \ __builtin_neon_vld3_v(&__ret, __p0, 5); \ __ret; \ }) #else #define vld3_p16(__p0) __extension__ ({ \ poly16x4x3_t __ret; \ __builtin_neon_vld3_v(&__ret, __p0, 5); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_p8(__p0) __extension__ ({ \ poly8x16x3_t __ret; \ __builtin_neon_vld3q_v(&__ret, __p0, 36); \ __ret; \ }) #else #define vld3q_p8(__p0) __extension__ ({ \ poly8x16x3_t __ret; \ __builtin_neon_vld3q_v(&__ret, __p0, 36); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_p16(__p0) __extension__ ({ \ poly16x8x3_t __ret; \ __builtin_neon_vld3q_v(&__ret, __p0, 37); \ __ret; \ }) #else #define vld3q_p16(__p0) __extension__ ({ \ poly16x8x3_t __ret; \ __builtin_neon_vld3q_v(&__ret, __p0, 37); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_u8(__p0) __extension__ ({ \ uint8x16x3_t __ret; \ __builtin_neon_vld3q_v(&__ret, __p0, 48); \ __ret; \ }) #else #define vld3q_u8(__p0) __extension__ ({ \ uint8x16x3_t __ret; \ __builtin_neon_vld3q_v(&__ret, __p0, 48); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_u32(__p0) __extension__ ({ \ uint32x4x3_t __ret; \ __builtin_neon_vld3q_v(&__ret, __p0, 50); \ __ret; \ }) #else #define vld3q_u32(__p0) __extension__ ({ \ uint32x4x3_t __ret; \ __builtin_neon_vld3q_v(&__ret, __p0, 50); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_u16(__p0) __extension__ ({ \ uint16x8x3_t __ret; \ __builtin_neon_vld3q_v(&__ret, __p0, 49); \ __ret; \ }) #else #define vld3q_u16(__p0) __extension__ ({ \ uint16x8x3_t __ret; \ __builtin_neon_vld3q_v(&__ret, __p0, 49); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_s8(__p0) __extension__ ({ \ int8x16x3_t __ret; \ __builtin_neon_vld3q_v(&__ret, __p0, 32); \ __ret; \ }) #else #define vld3q_s8(__p0) __extension__ ({ \ int8x16x3_t __ret; \ __builtin_neon_vld3q_v(&__ret, __p0, 32); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_f32(__p0) __extension__ ({ \ float32x4x3_t __ret; \ __builtin_neon_vld3q_v(&__ret, __p0, 41); \ __ret; \ }) #else #define vld3q_f32(__p0) __extension__ ({ \ float32x4x3_t __ret; \ __builtin_neon_vld3q_v(&__ret, __p0, 41); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_s32(__p0) __extension__ ({ \ int32x4x3_t __ret; \ __builtin_neon_vld3q_v(&__ret, __p0, 34); \ __ret; \ }) #else #define vld3q_s32(__p0) __extension__ ({ \ int32x4x3_t __ret; \ __builtin_neon_vld3q_v(&__ret, __p0, 34); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_s16(__p0) __extension__ ({ \ int16x8x3_t __ret; \ __builtin_neon_vld3q_v(&__ret, __p0, 33); \ __ret; \ }) #else #define vld3q_s16(__p0) __extension__ ({ \ int16x8x3_t __ret; \ __builtin_neon_vld3q_v(&__ret, __p0, 33); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3_u8(__p0) __extension__ ({ \ uint8x8x3_t __ret; \ __builtin_neon_vld3_v(&__ret, __p0, 16); \ __ret; \ }) #else #define vld3_u8(__p0) __extension__ ({ \ uint8x8x3_t __ret; \ __builtin_neon_vld3_v(&__ret, __p0, 16); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3_u32(__p0) __extension__ ({ \ uint32x2x3_t __ret; \ __builtin_neon_vld3_v(&__ret, __p0, 18); \ __ret; \ }) #else #define vld3_u32(__p0) __extension__ ({ \ uint32x2x3_t __ret; \ __builtin_neon_vld3_v(&__ret, __p0, 18); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret; \ }) #endif #define vld3_u64(__p0) __extension__ ({ \ uint64x1x3_t __ret; \ __builtin_neon_vld3_v(&__ret, __p0, 19); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld3_u16(__p0) __extension__ ({ \ uint16x4x3_t __ret; \ __builtin_neon_vld3_v(&__ret, __p0, 17); \ __ret; \ }) #else #define vld3_u16(__p0) __extension__ ({ \ uint16x4x3_t __ret; \ __builtin_neon_vld3_v(&__ret, __p0, 17); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3_s8(__p0) __extension__ ({ \ int8x8x3_t __ret; \ __builtin_neon_vld3_v(&__ret, __p0, 0); \ __ret; \ }) #else #define vld3_s8(__p0) __extension__ ({ \ int8x8x3_t __ret; \ __builtin_neon_vld3_v(&__ret, __p0, 0); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3_f32(__p0) __extension__ ({ \ float32x2x3_t __ret; \ __builtin_neon_vld3_v(&__ret, __p0, 9); \ __ret; \ }) #else #define vld3_f32(__p0) __extension__ ({ \ float32x2x3_t __ret; \ __builtin_neon_vld3_v(&__ret, __p0, 9); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3_s32(__p0) __extension__ ({ \ int32x2x3_t __ret; \ __builtin_neon_vld3_v(&__ret, __p0, 2); \ __ret; \ }) #else #define vld3_s32(__p0) __extension__ ({ \ int32x2x3_t __ret; \ __builtin_neon_vld3_v(&__ret, __p0, 2); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret; \ }) #endif #define vld3_s64(__p0) __extension__ ({ \ int64x1x3_t __ret; \ __builtin_neon_vld3_v(&__ret, __p0, 3); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld3_s16(__p0) __extension__ ({ \ int16x4x3_t __ret; \ __builtin_neon_vld3_v(&__ret, __p0, 1); \ __ret; \ }) #else #define vld3_s16(__p0) __extension__ ({ \ int16x4x3_t __ret; \ __builtin_neon_vld3_v(&__ret, __p0, 1); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3_dup_p8(__p0) __extension__ ({ \ poly8x8x3_t __ret; \ __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \ __ret; \ }) #else #define vld3_dup_p8(__p0) __extension__ ({ \ poly8x8x3_t __ret; \ __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3_dup_p16(__p0) __extension__ ({ \ poly16x4x3_t __ret; \ __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \ __ret; \ }) #else #define vld3_dup_p16(__p0) __extension__ ({ \ poly16x4x3_t __ret; \ __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_dup_p8(__p0) __extension__ ({ \ poly8x16x3_t __ret; \ __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \ __ret; \ }) #else #define vld3q_dup_p8(__p0) __extension__ ({ \ poly8x16x3_t __ret; \ __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_dup_p16(__p0) __extension__ ({ \ poly16x8x3_t __ret; \ __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \ __ret; \ }) #else #define vld3q_dup_p16(__p0) __extension__ ({ \ poly16x8x3_t __ret; \ __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_dup_u8(__p0) __extension__ ({ \ uint8x16x3_t __ret; \ __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \ __ret; \ }) #else #define vld3q_dup_u8(__p0) __extension__ ({ \ uint8x16x3_t __ret; \ __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_dup_u32(__p0) __extension__ ({ \ uint32x4x3_t __ret; \ __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \ __ret; \ }) #else #define vld3q_dup_u32(__p0) __extension__ ({ \ uint32x4x3_t __ret; \ __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_dup_u64(__p0) __extension__ ({ \ uint64x2x3_t __ret; \ __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \ __ret; \ }) #else #define vld3q_dup_u64(__p0) __extension__ ({ \ uint64x2x3_t __ret; \ __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_dup_u16(__p0) __extension__ ({ \ uint16x8x3_t __ret; \ __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \ __ret; \ }) #else #define vld3q_dup_u16(__p0) __extension__ ({ \ uint16x8x3_t __ret; \ __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_dup_s8(__p0) __extension__ ({ \ int8x16x3_t __ret; \ __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \ __ret; \ }) #else #define vld3q_dup_s8(__p0) __extension__ ({ \ int8x16x3_t __ret; \ __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_dup_f32(__p0) __extension__ ({ \ float32x4x3_t __ret; \ __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \ __ret; \ }) #else #define vld3q_dup_f32(__p0) __extension__ ({ \ float32x4x3_t __ret; \ __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_dup_s32(__p0) __extension__ ({ \ int32x4x3_t __ret; \ __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \ __ret; \ }) #else #define vld3q_dup_s32(__p0) __extension__ ({ \ int32x4x3_t __ret; \ __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_dup_s64(__p0) __extension__ ({ \ int64x2x3_t __ret; \ __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \ __ret; \ }) #else #define vld3q_dup_s64(__p0) __extension__ ({ \ int64x2x3_t __ret; \ __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_dup_s16(__p0) __extension__ ({ \ int16x8x3_t __ret; \ __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \ __ret; \ }) #else #define vld3q_dup_s16(__p0) __extension__ ({ \ int16x8x3_t __ret; \ __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3_dup_u8(__p0) __extension__ ({ \ uint8x8x3_t __ret; \ __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \ __ret; \ }) #else #define vld3_dup_u8(__p0) __extension__ ({ \ uint8x8x3_t __ret; \ __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3_dup_u32(__p0) __extension__ ({ \ uint32x2x3_t __ret; \ __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \ __ret; \ }) #else #define vld3_dup_u32(__p0) __extension__ ({ \ uint32x2x3_t __ret; \ __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret; \ }) #endif #define vld3_dup_u64(__p0) __extension__ ({ \ uint64x1x3_t __ret; \ __builtin_neon_vld3_dup_v(&__ret, __p0, 19); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld3_dup_u16(__p0) __extension__ ({ \ uint16x4x3_t __ret; \ __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \ __ret; \ }) #else #define vld3_dup_u16(__p0) __extension__ ({ \ uint16x4x3_t __ret; \ __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3_dup_s8(__p0) __extension__ ({ \ int8x8x3_t __ret; \ __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \ __ret; \ }) #else #define vld3_dup_s8(__p0) __extension__ ({ \ int8x8x3_t __ret; \ __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3_dup_f32(__p0) __extension__ ({ \ float32x2x3_t __ret; \ __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \ __ret; \ }) #else #define vld3_dup_f32(__p0) __extension__ ({ \ float32x2x3_t __ret; \ __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3_dup_s32(__p0) __extension__ ({ \ int32x2x3_t __ret; \ __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \ __ret; \ }) #else #define vld3_dup_s32(__p0) __extension__ ({ \ int32x2x3_t __ret; \ __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret; \ }) #endif #define vld3_dup_s64(__p0) __extension__ ({ \ int64x1x3_t __ret; \ __builtin_neon_vld3_dup_v(&__ret, __p0, 3); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld3_dup_s16(__p0) __extension__ ({ \ int16x4x3_t __ret; \ __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \ __ret; \ }) #else #define vld3_dup_s16(__p0) __extension__ ({ \ int16x4x3_t __ret; \ __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x8x3_t __ret; \ poly8x8x3_t __s1 = __p1; \ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \ __ret; \ }) #else #define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x8x3_t __ret; \ poly8x8x3_t __s1 = __p1; \ poly8x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x4x3_t __ret; \ poly16x4x3_t __s1 = __p1; \ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \ __ret; \ }) #else #define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x4x3_t __ret; \ poly16x4x3_t __s1 = __p1; \ poly16x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x8x3_t __ret; \ poly16x8x3_t __s1 = __p1; \ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \ __ret; \ }) #else #define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x8x3_t __ret; \ poly16x8x3_t __s1 = __p1; \ poly16x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x4x3_t __ret; \ uint32x4x3_t __s1 = __p1; \ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \ __ret; \ }) #else #define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x4x3_t __ret; \ uint32x4x3_t __s1 = __p1; \ uint32x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x8x3_t __ret; \ uint16x8x3_t __s1 = __p1; \ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \ __ret; \ }) #else #define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x8x3_t __ret; \ uint16x8x3_t __s1 = __p1; \ uint16x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x4x3_t __ret; \ float32x4x3_t __s1 = __p1; \ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 41); \ __ret; \ }) #else #define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x4x3_t __ret; \ float32x4x3_t __s1 = __p1; \ float32x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 41); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4x3_t __ret; \ int32x4x3_t __s1 = __p1; \ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 34); \ __ret; \ }) #else #define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4x3_t __ret; \ int32x4x3_t __s1 = __p1; \ int32x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 34); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8x3_t __ret; \ int16x8x3_t __s1 = __p1; \ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 33); \ __ret; \ }) #else #define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8x3_t __ret; \ int16x8x3_t __s1 = __p1; \ int16x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 33); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x8x3_t __ret; \ uint8x8x3_t __s1 = __p1; \ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \ __ret; \ }) #else #define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x8x3_t __ret; \ uint8x8x3_t __s1 = __p1; \ uint8x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x2x3_t __ret; \ uint32x2x3_t __s1 = __p1; \ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \ __ret; \ }) #else #define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x2x3_t __ret; \ uint32x2x3_t __s1 = __p1; \ uint32x2x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x4x3_t __ret; \ uint16x4x3_t __s1 = __p1; \ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \ __ret; \ }) #else #define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x4x3_t __ret; \ uint16x4x3_t __s1 = __p1; \ uint16x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x8x3_t __ret; \ int8x8x3_t __s1 = __p1; \ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \ __ret; \ }) #else #define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x8x3_t __ret; \ int8x8x3_t __s1 = __p1; \ int8x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x2x3_t __ret; \ float32x2x3_t __s1 = __p1; \ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 9); \ __ret; \ }) #else #define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x2x3_t __ret; \ float32x2x3_t __s1 = __p1; \ float32x2x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 9); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2x3_t __ret; \ int32x2x3_t __s1 = __p1; \ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 2); \ __ret; \ }) #else #define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2x3_t __ret; \ int32x2x3_t __s1 = __p1; \ int32x2x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 2); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4x3_t __ret; \ int16x4x3_t __s1 = __p1; \ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 1); \ __ret; \ }) #else #define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4x3_t __ret; \ int16x4x3_t __s1 = __p1; \ int16x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 1); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4_p8(__p0) __extension__ ({ \ poly8x8x4_t __ret; \ __builtin_neon_vld4_v(&__ret, __p0, 4); \ __ret; \ }) #else #define vld4_p8(__p0) __extension__ ({ \ poly8x8x4_t __ret; \ __builtin_neon_vld4_v(&__ret, __p0, 4); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4_p16(__p0) __extension__ ({ \ poly16x4x4_t __ret; \ __builtin_neon_vld4_v(&__ret, __p0, 5); \ __ret; \ }) #else #define vld4_p16(__p0) __extension__ ({ \ poly16x4x4_t __ret; \ __builtin_neon_vld4_v(&__ret, __p0, 5); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_p8(__p0) __extension__ ({ \ poly8x16x4_t __ret; \ __builtin_neon_vld4q_v(&__ret, __p0, 36); \ __ret; \ }) #else #define vld4q_p8(__p0) __extension__ ({ \ poly8x16x4_t __ret; \ __builtin_neon_vld4q_v(&__ret, __p0, 36); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_p16(__p0) __extension__ ({ \ poly16x8x4_t __ret; \ __builtin_neon_vld4q_v(&__ret, __p0, 37); \ __ret; \ }) #else #define vld4q_p16(__p0) __extension__ ({ \ poly16x8x4_t __ret; \ __builtin_neon_vld4q_v(&__ret, __p0, 37); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_u8(__p0) __extension__ ({ \ uint8x16x4_t __ret; \ __builtin_neon_vld4q_v(&__ret, __p0, 48); \ __ret; \ }) #else #define vld4q_u8(__p0) __extension__ ({ \ uint8x16x4_t __ret; \ __builtin_neon_vld4q_v(&__ret, __p0, 48); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_u32(__p0) __extension__ ({ \ uint32x4x4_t __ret; \ __builtin_neon_vld4q_v(&__ret, __p0, 50); \ __ret; \ }) #else #define vld4q_u32(__p0) __extension__ ({ \ uint32x4x4_t __ret; \ __builtin_neon_vld4q_v(&__ret, __p0, 50); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_u16(__p0) __extension__ ({ \ uint16x8x4_t __ret; \ __builtin_neon_vld4q_v(&__ret, __p0, 49); \ __ret; \ }) #else #define vld4q_u16(__p0) __extension__ ({ \ uint16x8x4_t __ret; \ __builtin_neon_vld4q_v(&__ret, __p0, 49); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_s8(__p0) __extension__ ({ \ int8x16x4_t __ret; \ __builtin_neon_vld4q_v(&__ret, __p0, 32); \ __ret; \ }) #else #define vld4q_s8(__p0) __extension__ ({ \ int8x16x4_t __ret; \ __builtin_neon_vld4q_v(&__ret, __p0, 32); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_f32(__p0) __extension__ ({ \ float32x4x4_t __ret; \ __builtin_neon_vld4q_v(&__ret, __p0, 41); \ __ret; \ }) #else #define vld4q_f32(__p0) __extension__ ({ \ float32x4x4_t __ret; \ __builtin_neon_vld4q_v(&__ret, __p0, 41); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_s32(__p0) __extension__ ({ \ int32x4x4_t __ret; \ __builtin_neon_vld4q_v(&__ret, __p0, 34); \ __ret; \ }) #else #define vld4q_s32(__p0) __extension__ ({ \ int32x4x4_t __ret; \ __builtin_neon_vld4q_v(&__ret, __p0, 34); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_s16(__p0) __extension__ ({ \ int16x8x4_t __ret; \ __builtin_neon_vld4q_v(&__ret, __p0, 33); \ __ret; \ }) #else #define vld4q_s16(__p0) __extension__ ({ \ int16x8x4_t __ret; \ __builtin_neon_vld4q_v(&__ret, __p0, 33); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4_u8(__p0) __extension__ ({ \ uint8x8x4_t __ret; \ __builtin_neon_vld4_v(&__ret, __p0, 16); \ __ret; \ }) #else #define vld4_u8(__p0) __extension__ ({ \ uint8x8x4_t __ret; \ __builtin_neon_vld4_v(&__ret, __p0, 16); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4_u32(__p0) __extension__ ({ \ uint32x2x4_t __ret; \ __builtin_neon_vld4_v(&__ret, __p0, 18); \ __ret; \ }) #else #define vld4_u32(__p0) __extension__ ({ \ uint32x2x4_t __ret; \ __builtin_neon_vld4_v(&__ret, __p0, 18); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif #define vld4_u64(__p0) __extension__ ({ \ uint64x1x4_t __ret; \ __builtin_neon_vld4_v(&__ret, __p0, 19); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld4_u16(__p0) __extension__ ({ \ uint16x4x4_t __ret; \ __builtin_neon_vld4_v(&__ret, __p0, 17); \ __ret; \ }) #else #define vld4_u16(__p0) __extension__ ({ \ uint16x4x4_t __ret; \ __builtin_neon_vld4_v(&__ret, __p0, 17); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4_s8(__p0) __extension__ ({ \ int8x8x4_t __ret; \ __builtin_neon_vld4_v(&__ret, __p0, 0); \ __ret; \ }) #else #define vld4_s8(__p0) __extension__ ({ \ int8x8x4_t __ret; \ __builtin_neon_vld4_v(&__ret, __p0, 0); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4_f32(__p0) __extension__ ({ \ float32x2x4_t __ret; \ __builtin_neon_vld4_v(&__ret, __p0, 9); \ __ret; \ }) #else #define vld4_f32(__p0) __extension__ ({ \ float32x2x4_t __ret; \ __builtin_neon_vld4_v(&__ret, __p0, 9); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4_s32(__p0) __extension__ ({ \ int32x2x4_t __ret; \ __builtin_neon_vld4_v(&__ret, __p0, 2); \ __ret; \ }) #else #define vld4_s32(__p0) __extension__ ({ \ int32x2x4_t __ret; \ __builtin_neon_vld4_v(&__ret, __p0, 2); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif #define vld4_s64(__p0) __extension__ ({ \ int64x1x4_t __ret; \ __builtin_neon_vld4_v(&__ret, __p0, 3); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld4_s16(__p0) __extension__ ({ \ int16x4x4_t __ret; \ __builtin_neon_vld4_v(&__ret, __p0, 1); \ __ret; \ }) #else #define vld4_s16(__p0) __extension__ ({ \ int16x4x4_t __ret; \ __builtin_neon_vld4_v(&__ret, __p0, 1); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4_dup_p8(__p0) __extension__ ({ \ poly8x8x4_t __ret; \ __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \ __ret; \ }) #else #define vld4_dup_p8(__p0) __extension__ ({ \ poly8x8x4_t __ret; \ __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4_dup_p16(__p0) __extension__ ({ \ poly16x4x4_t __ret; \ __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \ __ret; \ }) #else #define vld4_dup_p16(__p0) __extension__ ({ \ poly16x4x4_t __ret; \ __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_dup_p8(__p0) __extension__ ({ \ poly8x16x4_t __ret; \ __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \ __ret; \ }) #else #define vld4q_dup_p8(__p0) __extension__ ({ \ poly8x16x4_t __ret; \ __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_dup_p16(__p0) __extension__ ({ \ poly16x8x4_t __ret; \ __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \ __ret; \ }) #else #define vld4q_dup_p16(__p0) __extension__ ({ \ poly16x8x4_t __ret; \ __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_dup_u8(__p0) __extension__ ({ \ uint8x16x4_t __ret; \ __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \ __ret; \ }) #else #define vld4q_dup_u8(__p0) __extension__ ({ \ uint8x16x4_t __ret; \ __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_dup_u32(__p0) __extension__ ({ \ uint32x4x4_t __ret; \ __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \ __ret; \ }) #else #define vld4q_dup_u32(__p0) __extension__ ({ \ uint32x4x4_t __ret; \ __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_dup_u64(__p0) __extension__ ({ \ uint64x2x4_t __ret; \ __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \ __ret; \ }) #else #define vld4q_dup_u64(__p0) __extension__ ({ \ uint64x2x4_t __ret; \ __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_dup_u16(__p0) __extension__ ({ \ uint16x8x4_t __ret; \ __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \ __ret; \ }) #else #define vld4q_dup_u16(__p0) __extension__ ({ \ uint16x8x4_t __ret; \ __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_dup_s8(__p0) __extension__ ({ \ int8x16x4_t __ret; \ __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \ __ret; \ }) #else #define vld4q_dup_s8(__p0) __extension__ ({ \ int8x16x4_t __ret; \ __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_dup_f32(__p0) __extension__ ({ \ float32x4x4_t __ret; \ __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \ __ret; \ }) #else #define vld4q_dup_f32(__p0) __extension__ ({ \ float32x4x4_t __ret; \ __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_dup_s32(__p0) __extension__ ({ \ int32x4x4_t __ret; \ __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \ __ret; \ }) #else #define vld4q_dup_s32(__p0) __extension__ ({ \ int32x4x4_t __ret; \ __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_dup_s64(__p0) __extension__ ({ \ int64x2x4_t __ret; \ __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \ __ret; \ }) #else #define vld4q_dup_s64(__p0) __extension__ ({ \ int64x2x4_t __ret; \ __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_dup_s16(__p0) __extension__ ({ \ int16x8x4_t __ret; \ __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \ __ret; \ }) #else #define vld4q_dup_s16(__p0) __extension__ ({ \ int16x8x4_t __ret; \ __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4_dup_u8(__p0) __extension__ ({ \ uint8x8x4_t __ret; \ __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \ __ret; \ }) #else #define vld4_dup_u8(__p0) __extension__ ({ \ uint8x8x4_t __ret; \ __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4_dup_u32(__p0) __extension__ ({ \ uint32x2x4_t __ret; \ __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \ __ret; \ }) #else #define vld4_dup_u32(__p0) __extension__ ({ \ uint32x2x4_t __ret; \ __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif #define vld4_dup_u64(__p0) __extension__ ({ \ uint64x1x4_t __ret; \ __builtin_neon_vld4_dup_v(&__ret, __p0, 19); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld4_dup_u16(__p0) __extension__ ({ \ uint16x4x4_t __ret; \ __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \ __ret; \ }) #else #define vld4_dup_u16(__p0) __extension__ ({ \ uint16x4x4_t __ret; \ __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4_dup_s8(__p0) __extension__ ({ \ int8x8x4_t __ret; \ __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \ __ret; \ }) #else #define vld4_dup_s8(__p0) __extension__ ({ \ int8x8x4_t __ret; \ __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4_dup_f32(__p0) __extension__ ({ \ float32x2x4_t __ret; \ __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \ __ret; \ }) #else #define vld4_dup_f32(__p0) __extension__ ({ \ float32x2x4_t __ret; \ __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4_dup_s32(__p0) __extension__ ({ \ int32x2x4_t __ret; \ __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \ __ret; \ }) #else #define vld4_dup_s32(__p0) __extension__ ({ \ int32x2x4_t __ret; \ __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif #define vld4_dup_s64(__p0) __extension__ ({ \ int64x1x4_t __ret; \ __builtin_neon_vld4_dup_v(&__ret, __p0, 3); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld4_dup_s16(__p0) __extension__ ({ \ int16x4x4_t __ret; \ __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \ __ret; \ }) #else #define vld4_dup_s16(__p0) __extension__ ({ \ int16x4x4_t __ret; \ __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x8x4_t __ret; \ poly8x8x4_t __s1 = __p1; \ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \ __ret; \ }) #else #define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x8x4_t __ret; \ poly8x8x4_t __s1 = __p1; \ poly8x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x4x4_t __ret; \ poly16x4x4_t __s1 = __p1; \ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \ __ret; \ }) #else #define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x4x4_t __ret; \ poly16x4x4_t __s1 = __p1; \ poly16x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x8x4_t __ret; \ poly16x8x4_t __s1 = __p1; \ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \ __ret; \ }) #else #define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x8x4_t __ret; \ poly16x8x4_t __s1 = __p1; \ poly16x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x4x4_t __ret; \ uint32x4x4_t __s1 = __p1; \ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \ __ret; \ }) #else #define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x4x4_t __ret; \ uint32x4x4_t __s1 = __p1; \ uint32x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x8x4_t __ret; \ uint16x8x4_t __s1 = __p1; \ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \ __ret; \ }) #else #define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x8x4_t __ret; \ uint16x8x4_t __s1 = __p1; \ uint16x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x4x4_t __ret; \ float32x4x4_t __s1 = __p1; \ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 41); \ __ret; \ }) #else #define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x4x4_t __ret; \ float32x4x4_t __s1 = __p1; \ float32x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 41); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4x4_t __ret; \ int32x4x4_t __s1 = __p1; \ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 34); \ __ret; \ }) #else #define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4x4_t __ret; \ int32x4x4_t __s1 = __p1; \ int32x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 34); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8x4_t __ret; \ int16x8x4_t __s1 = __p1; \ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 33); \ __ret; \ }) #else #define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8x4_t __ret; \ int16x8x4_t __s1 = __p1; \ int16x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 33); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x8x4_t __ret; \ uint8x8x4_t __s1 = __p1; \ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \ __ret; \ }) #else #define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x8x4_t __ret; \ uint8x8x4_t __s1 = __p1; \ uint8x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x2x4_t __ret; \ uint32x2x4_t __s1 = __p1; \ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \ __ret; \ }) #else #define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x2x4_t __ret; \ uint32x2x4_t __s1 = __p1; \ uint32x2x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x4x4_t __ret; \ uint16x4x4_t __s1 = __p1; \ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \ __ret; \ }) #else #define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x4x4_t __ret; \ uint16x4x4_t __s1 = __p1; \ uint16x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x8x4_t __ret; \ int8x8x4_t __s1 = __p1; \ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \ __ret; \ }) #else #define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x8x4_t __ret; \ int8x8x4_t __s1 = __p1; \ int8x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x2x4_t __ret; \ float32x2x4_t __s1 = __p1; \ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 9); \ __ret; \ }) #else #define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x2x4_t __ret; \ float32x2x4_t __s1 = __p1; \ float32x2x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 9); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2x4_t __ret; \ int32x2x4_t __s1 = __p1; \ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 2); \ __ret; \ }) #else #define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2x4_t __ret; \ int32x2x4_t __s1 = __p1; \ int32x2x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 2); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4x4_t __ret; \ int16x4x4_t __s1 = __p1; \ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 1); \ __ret; \ }) #else #define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4x4_t __ret; \ int16x4x4_t __s1 = __p1; \ int16x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 1); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else __ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else __ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else __ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else __ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else __ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else __ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else __ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else __ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else __ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else __ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else __ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else __ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else __ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else __ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else __ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else __ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else __ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else __ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else __ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else __ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else __ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else __ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else __ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else __ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else __ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else __ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else __ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else __ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; __ret = __p0 + __p1 * __p2; return __ret; } #else __ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 + __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = __p0 + __p1 * __p2; return __ret; } #else __ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __rev0 + __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; __ret = __p0 + __p1 * __p2; return __ret; } #else __ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 + __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; __ret = __p0 + __p1 * __p2; return __ret; } #else __ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 + __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; __ret = __p0 + __p1 * __p2; return __ret; } #else __ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __rev0 + __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; __ret = __p0 + __p1 * __p2; return __ret; } #else __ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __rev0 + __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; __ret = __p0 + __p1 * __p2; return __ret; } #else __ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 + __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint8x8_t __ret; __ret = __p0 + __p1 * __p2; return __ret; } #else __ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 + __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint32x2_t __ret; __ret = __p0 + __p1 * __p2; return __ret; } #else __ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = __rev0 + __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint16x4_t __ret; __ret = __p0 + __p1 * __p2; return __ret; } #else __ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __rev0 + __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int8x8_t __ret; __ret = __p0 + __p1 * __p2; return __ret; } #else __ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 + __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; __ret = __p0 + __p1 * __p2; return __ret; } #else __ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = __rev0 + __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; __ret = __p0 + __p1 * __p2; return __ret; } #else __ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = __rev0 + __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; __ret = __p0 + __p1 * __p2; return __ret; } #else __ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __rev0 + __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vmlaq_lane_u32(__p0_42, __p1_42, __p2_42, __p3_42) __extension__ ({ \ uint32x4_t __ret_42; \ uint32x4_t __s0_42 = __p0_42; \ uint32x4_t __s1_42 = __p1_42; \ uint32x2_t __s2_42 = __p2_42; \ __ret_42 = __s0_42 + __s1_42 * splatq_lane_u32(__s2_42, __p3_42); \ __ret_42; \ }) #else #define vmlaq_lane_u32(__p0_43, __p1_43, __p2_43, __p3_43) __extension__ ({ \ uint32x4_t __ret_43; \ uint32x4_t __s0_43 = __p0_43; \ uint32x4_t __s1_43 = __p1_43; \ uint32x2_t __s2_43 = __p2_43; \ uint32x4_t __rev0_43; __rev0_43 = __builtin_shufflevector(__s0_43, __s0_43, 3, 2, 1, 0); \ uint32x4_t __rev1_43; __rev1_43 = __builtin_shufflevector(__s1_43, __s1_43, 3, 2, 1, 0); \ uint32x2_t __rev2_43; __rev2_43 = __builtin_shufflevector(__s2_43, __s2_43, 1, 0); \ __ret_43 = __rev0_43 + __rev1_43 * __noswap_splatq_lane_u32(__rev2_43, __p3_43); \ __ret_43 = __builtin_shufflevector(__ret_43, __ret_43, 3, 2, 1, 0); \ __ret_43; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlaq_lane_u16(__p0_44, __p1_44, __p2_44, __p3_44) __extension__ ({ \ uint16x8_t __ret_44; \ uint16x8_t __s0_44 = __p0_44; \ uint16x8_t __s1_44 = __p1_44; \ uint16x4_t __s2_44 = __p2_44; \ __ret_44 = __s0_44 + __s1_44 * splatq_lane_u16(__s2_44, __p3_44); \ __ret_44; \ }) #else #define vmlaq_lane_u16(__p0_45, __p1_45, __p2_45, __p3_45) __extension__ ({ \ uint16x8_t __ret_45; \ uint16x8_t __s0_45 = __p0_45; \ uint16x8_t __s1_45 = __p1_45; \ uint16x4_t __s2_45 = __p2_45; \ uint16x8_t __rev0_45; __rev0_45 = __builtin_shufflevector(__s0_45, __s0_45, 7, 6, 5, 4, 3, 2, 1, 0); \ uint16x8_t __rev1_45; __rev1_45 = __builtin_shufflevector(__s1_45, __s1_45, 7, 6, 5, 4, 3, 2, 1, 0); \ uint16x4_t __rev2_45; __rev2_45 = __builtin_shufflevector(__s2_45, __s2_45, 3, 2, 1, 0); \ __ret_45 = __rev0_45 + __rev1_45 * __noswap_splatq_lane_u16(__rev2_45, __p3_45); \ __ret_45 = __builtin_shufflevector(__ret_45, __ret_45, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_45; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlaq_lane_f32(__p0_46, __p1_46, __p2_46, __p3_46) __extension__ ({ \ float32x4_t __ret_46; \ float32x4_t __s0_46 = __p0_46; \ float32x4_t __s1_46 = __p1_46; \ float32x2_t __s2_46 = __p2_46; \ __ret_46 = __s0_46 + __s1_46 * splatq_lane_f32(__s2_46, __p3_46); \ __ret_46; \ }) #else #define vmlaq_lane_f32(__p0_47, __p1_47, __p2_47, __p3_47) __extension__ ({ \ float32x4_t __ret_47; \ float32x4_t __s0_47 = __p0_47; \ float32x4_t __s1_47 = __p1_47; \ float32x2_t __s2_47 = __p2_47; \ float32x4_t __rev0_47; __rev0_47 = __builtin_shufflevector(__s0_47, __s0_47, 3, 2, 1, 0); \ float32x4_t __rev1_47; __rev1_47 = __builtin_shufflevector(__s1_47, __s1_47, 3, 2, 1, 0); \ float32x2_t __rev2_47; __rev2_47 = __builtin_shufflevector(__s2_47, __s2_47, 1, 0); \ __ret_47 = __rev0_47 + __rev1_47 * __noswap_splatq_lane_f32(__rev2_47, __p3_47); \ __ret_47 = __builtin_shufflevector(__ret_47, __ret_47, 3, 2, 1, 0); \ __ret_47; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlaq_lane_s32(__p0_48, __p1_48, __p2_48, __p3_48) __extension__ ({ \ int32x4_t __ret_48; \ int32x4_t __s0_48 = __p0_48; \ int32x4_t __s1_48 = __p1_48; \ int32x2_t __s2_48 = __p2_48; \ __ret_48 = __s0_48 + __s1_48 * splatq_lane_s32(__s2_48, __p3_48); \ __ret_48; \ }) #else #define vmlaq_lane_s32(__p0_49, __p1_49, __p2_49, __p3_49) __extension__ ({ \ int32x4_t __ret_49; \ int32x4_t __s0_49 = __p0_49; \ int32x4_t __s1_49 = __p1_49; \ int32x2_t __s2_49 = __p2_49; \ int32x4_t __rev0_49; __rev0_49 = __builtin_shufflevector(__s0_49, __s0_49, 3, 2, 1, 0); \ int32x4_t __rev1_49; __rev1_49 = __builtin_shufflevector(__s1_49, __s1_49, 3, 2, 1, 0); \ int32x2_t __rev2_49; __rev2_49 = __builtin_shufflevector(__s2_49, __s2_49, 1, 0); \ __ret_49 = __rev0_49 + __rev1_49 * __noswap_splatq_lane_s32(__rev2_49, __p3_49); \ __ret_49 = __builtin_shufflevector(__ret_49, __ret_49, 3, 2, 1, 0); \ __ret_49; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlaq_lane_s16(__p0_50, __p1_50, __p2_50, __p3_50) __extension__ ({ \ int16x8_t __ret_50; \ int16x8_t __s0_50 = __p0_50; \ int16x8_t __s1_50 = __p1_50; \ int16x4_t __s2_50 = __p2_50; \ __ret_50 = __s0_50 + __s1_50 * splatq_lane_s16(__s2_50, __p3_50); \ __ret_50; \ }) #else #define vmlaq_lane_s16(__p0_51, __p1_51, __p2_51, __p3_51) __extension__ ({ \ int16x8_t __ret_51; \ int16x8_t __s0_51 = __p0_51; \ int16x8_t __s1_51 = __p1_51; \ int16x4_t __s2_51 = __p2_51; \ int16x8_t __rev0_51; __rev0_51 = __builtin_shufflevector(__s0_51, __s0_51, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev1_51; __rev1_51 = __builtin_shufflevector(__s1_51, __s1_51, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x4_t __rev2_51; __rev2_51 = __builtin_shufflevector(__s2_51, __s2_51, 3, 2, 1, 0); \ __ret_51 = __rev0_51 + __rev1_51 * __noswap_splatq_lane_s16(__rev2_51, __p3_51); \ __ret_51 = __builtin_shufflevector(__ret_51, __ret_51, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_51; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmla_lane_u32(__p0_52, __p1_52, __p2_52, __p3_52) __extension__ ({ \ uint32x2_t __ret_52; \ uint32x2_t __s0_52 = __p0_52; \ uint32x2_t __s1_52 = __p1_52; \ uint32x2_t __s2_52 = __p2_52; \ __ret_52 = __s0_52 + __s1_52 * splat_lane_u32(__s2_52, __p3_52); \ __ret_52; \ }) #else #define vmla_lane_u32(__p0_53, __p1_53, __p2_53, __p3_53) __extension__ ({ \ uint32x2_t __ret_53; \ uint32x2_t __s0_53 = __p0_53; \ uint32x2_t __s1_53 = __p1_53; \ uint32x2_t __s2_53 = __p2_53; \ uint32x2_t __rev0_53; __rev0_53 = __builtin_shufflevector(__s0_53, __s0_53, 1, 0); \ uint32x2_t __rev1_53; __rev1_53 = __builtin_shufflevector(__s1_53, __s1_53, 1, 0); \ uint32x2_t __rev2_53; __rev2_53 = __builtin_shufflevector(__s2_53, __s2_53, 1, 0); \ __ret_53 = __rev0_53 + __rev1_53 * __noswap_splat_lane_u32(__rev2_53, __p3_53); \ __ret_53 = __builtin_shufflevector(__ret_53, __ret_53, 1, 0); \ __ret_53; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmla_lane_u16(__p0_54, __p1_54, __p2_54, __p3_54) __extension__ ({ \ uint16x4_t __ret_54; \ uint16x4_t __s0_54 = __p0_54; \ uint16x4_t __s1_54 = __p1_54; \ uint16x4_t __s2_54 = __p2_54; \ __ret_54 = __s0_54 + __s1_54 * splat_lane_u16(__s2_54, __p3_54); \ __ret_54; \ }) #else #define vmla_lane_u16(__p0_55, __p1_55, __p2_55, __p3_55) __extension__ ({ \ uint16x4_t __ret_55; \ uint16x4_t __s0_55 = __p0_55; \ uint16x4_t __s1_55 = __p1_55; \ uint16x4_t __s2_55 = __p2_55; \ uint16x4_t __rev0_55; __rev0_55 = __builtin_shufflevector(__s0_55, __s0_55, 3, 2, 1, 0); \ uint16x4_t __rev1_55; __rev1_55 = __builtin_shufflevector(__s1_55, __s1_55, 3, 2, 1, 0); \ uint16x4_t __rev2_55; __rev2_55 = __builtin_shufflevector(__s2_55, __s2_55, 3, 2, 1, 0); \ __ret_55 = __rev0_55 + __rev1_55 * __noswap_splat_lane_u16(__rev2_55, __p3_55); \ __ret_55 = __builtin_shufflevector(__ret_55, __ret_55, 3, 2, 1, 0); \ __ret_55; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmla_lane_f32(__p0_56, __p1_56, __p2_56, __p3_56) __extension__ ({ \ float32x2_t __ret_56; \ float32x2_t __s0_56 = __p0_56; \ float32x2_t __s1_56 = __p1_56; \ float32x2_t __s2_56 = __p2_56; \ __ret_56 = __s0_56 + __s1_56 * splat_lane_f32(__s2_56, __p3_56); \ __ret_56; \ }) #else #define vmla_lane_f32(__p0_57, __p1_57, __p2_57, __p3_57) __extension__ ({ \ float32x2_t __ret_57; \ float32x2_t __s0_57 = __p0_57; \ float32x2_t __s1_57 = __p1_57; \ float32x2_t __s2_57 = __p2_57; \ float32x2_t __rev0_57; __rev0_57 = __builtin_shufflevector(__s0_57, __s0_57, 1, 0); \ float32x2_t __rev1_57; __rev1_57 = __builtin_shufflevector(__s1_57, __s1_57, 1, 0); \ float32x2_t __rev2_57; __rev2_57 = __builtin_shufflevector(__s2_57, __s2_57, 1, 0); \ __ret_57 = __rev0_57 + __rev1_57 * __noswap_splat_lane_f32(__rev2_57, __p3_57); \ __ret_57 = __builtin_shufflevector(__ret_57, __ret_57, 1, 0); \ __ret_57; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmla_lane_s32(__p0_58, __p1_58, __p2_58, __p3_58) __extension__ ({ \ int32x2_t __ret_58; \ int32x2_t __s0_58 = __p0_58; \ int32x2_t __s1_58 = __p1_58; \ int32x2_t __s2_58 = __p2_58; \ __ret_58 = __s0_58 + __s1_58 * splat_lane_s32(__s2_58, __p3_58); \ __ret_58; \ }) #else #define vmla_lane_s32(__p0_59, __p1_59, __p2_59, __p3_59) __extension__ ({ \ int32x2_t __ret_59; \ int32x2_t __s0_59 = __p0_59; \ int32x2_t __s1_59 = __p1_59; \ int32x2_t __s2_59 = __p2_59; \ int32x2_t __rev0_59; __rev0_59 = __builtin_shufflevector(__s0_59, __s0_59, 1, 0); \ int32x2_t __rev1_59; __rev1_59 = __builtin_shufflevector(__s1_59, __s1_59, 1, 0); \ int32x2_t __rev2_59; __rev2_59 = __builtin_shufflevector(__s2_59, __s2_59, 1, 0); \ __ret_59 = __rev0_59 + __rev1_59 * __noswap_splat_lane_s32(__rev2_59, __p3_59); \ __ret_59 = __builtin_shufflevector(__ret_59, __ret_59, 1, 0); \ __ret_59; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmla_lane_s16(__p0_60, __p1_60, __p2_60, __p3_60) __extension__ ({ \ int16x4_t __ret_60; \ int16x4_t __s0_60 = __p0_60; \ int16x4_t __s1_60 = __p1_60; \ int16x4_t __s2_60 = __p2_60; \ __ret_60 = __s0_60 + __s1_60 * splat_lane_s16(__s2_60, __p3_60); \ __ret_60; \ }) #else #define vmla_lane_s16(__p0_61, __p1_61, __p2_61, __p3_61) __extension__ ({ \ int16x4_t __ret_61; \ int16x4_t __s0_61 = __p0_61; \ int16x4_t __s1_61 = __p1_61; \ int16x4_t __s2_61 = __p2_61; \ int16x4_t __rev0_61; __rev0_61 = __builtin_shufflevector(__s0_61, __s0_61, 3, 2, 1, 0); \ int16x4_t __rev1_61; __rev1_61 = __builtin_shufflevector(__s1_61, __s1_61, 3, 2, 1, 0); \ int16x4_t __rev2_61; __rev2_61 = __builtin_shufflevector(__s2_61, __s2_61, 3, 2, 1, 0); \ __ret_61 = __rev0_61 + __rev1_61 * __noswap_splat_lane_s16(__rev2_61, __p3_61); \ __ret_61 = __builtin_shufflevector(__ret_61, __ret_61, 3, 2, 1, 0); \ __ret_61; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { uint32x4_t __ret; __ret = __p0 + __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2}; return __ret; } #else __ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 + __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { uint16x8_t __ret; __ret = __p0 + __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; return __ret; } #else __ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 + __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { float32x4_t __ret; __ret = __p0 + __p1 * (float32x4_t) {__p2, __p2, __p2, __p2}; return __ret; } #else __ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 + __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { int32x4_t __ret; __ret = __p0 + __p1 * (int32x4_t) {__p2, __p2, __p2, __p2}; return __ret; } #else __ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 + __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { int16x8_t __ret; __ret = __p0 + __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; return __ret; } #else __ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 + __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { uint32x2_t __ret; __ret = __p0 + __p1 * (uint32x2_t) {__p2, __p2}; return __ret; } #else __ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 + __rev1 * (uint32x2_t) {__p2, __p2}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { uint16x4_t __ret; __ret = __p0 + __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2}; return __ret; } #else __ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 + __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { float32x2_t __ret; __ret = __p0 + __p1 * (float32x2_t) {__p2, __p2}; return __ret; } #else __ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 + __rev1 * (float32x2_t) {__p2, __p2}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { int32x2_t __ret; __ret = __p0 + __p1 * (int32x2_t) {__p2, __p2}; return __ret; } #else __ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 + __rev1 * (int32x2_t) {__p2, __p2}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { int16x4_t __ret; __ret = __p0 + __p1 * (int16x4_t) {__p2, __p2, __p2, __p2}; return __ret; } #else __ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 + __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; __ret = __p0 - __p1 * __p2; return __ret; } #else __ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 - __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = __p0 - __p1 * __p2; return __ret; } #else __ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __rev0 - __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; __ret = __p0 - __p1 * __p2; return __ret; } #else __ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 - __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; __ret = __p0 - __p1 * __p2; return __ret; } #else __ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 - __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; __ret = __p0 - __p1 * __p2; return __ret; } #else __ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __rev0 - __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; __ret = __p0 - __p1 * __p2; return __ret; } #else __ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __rev0 - __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; __ret = __p0 - __p1 * __p2; return __ret; } #else __ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 - __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint8x8_t __ret; __ret = __p0 - __p1 * __p2; return __ret; } #else __ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 - __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint32x2_t __ret; __ret = __p0 - __p1 * __p2; return __ret; } #else __ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = __rev0 - __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint16x4_t __ret; __ret = __p0 - __p1 * __p2; return __ret; } #else __ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __rev0 - __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int8x8_t __ret; __ret = __p0 - __p1 * __p2; return __ret; } #else __ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 - __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; __ret = __p0 - __p1 * __p2; return __ret; } #else __ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = __rev0 - __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; __ret = __p0 - __p1 * __p2; return __ret; } #else __ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = __rev0 - __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; __ret = __p0 - __p1 * __p2; return __ret; } #else __ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __rev0 - __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vmlsq_lane_u32(__p0_62, __p1_62, __p2_62, __p3_62) __extension__ ({ \ uint32x4_t __ret_62; \ uint32x4_t __s0_62 = __p0_62; \ uint32x4_t __s1_62 = __p1_62; \ uint32x2_t __s2_62 = __p2_62; \ __ret_62 = __s0_62 - __s1_62 * splatq_lane_u32(__s2_62, __p3_62); \ __ret_62; \ }) #else #define vmlsq_lane_u32(__p0_63, __p1_63, __p2_63, __p3_63) __extension__ ({ \ uint32x4_t __ret_63; \ uint32x4_t __s0_63 = __p0_63; \ uint32x4_t __s1_63 = __p1_63; \ uint32x2_t __s2_63 = __p2_63; \ uint32x4_t __rev0_63; __rev0_63 = __builtin_shufflevector(__s0_63, __s0_63, 3, 2, 1, 0); \ uint32x4_t __rev1_63; __rev1_63 = __builtin_shufflevector(__s1_63, __s1_63, 3, 2, 1, 0); \ uint32x2_t __rev2_63; __rev2_63 = __builtin_shufflevector(__s2_63, __s2_63, 1, 0); \ __ret_63 = __rev0_63 - __rev1_63 * __noswap_splatq_lane_u32(__rev2_63, __p3_63); \ __ret_63 = __builtin_shufflevector(__ret_63, __ret_63, 3, 2, 1, 0); \ __ret_63; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlsq_lane_u16(__p0_64, __p1_64, __p2_64, __p3_64) __extension__ ({ \ uint16x8_t __ret_64; \ uint16x8_t __s0_64 = __p0_64; \ uint16x8_t __s1_64 = __p1_64; \ uint16x4_t __s2_64 = __p2_64; \ __ret_64 = __s0_64 - __s1_64 * splatq_lane_u16(__s2_64, __p3_64); \ __ret_64; \ }) #else #define vmlsq_lane_u16(__p0_65, __p1_65, __p2_65, __p3_65) __extension__ ({ \ uint16x8_t __ret_65; \ uint16x8_t __s0_65 = __p0_65; \ uint16x8_t __s1_65 = __p1_65; \ uint16x4_t __s2_65 = __p2_65; \ uint16x8_t __rev0_65; __rev0_65 = __builtin_shufflevector(__s0_65, __s0_65, 7, 6, 5, 4, 3, 2, 1, 0); \ uint16x8_t __rev1_65; __rev1_65 = __builtin_shufflevector(__s1_65, __s1_65, 7, 6, 5, 4, 3, 2, 1, 0); \ uint16x4_t __rev2_65; __rev2_65 = __builtin_shufflevector(__s2_65, __s2_65, 3, 2, 1, 0); \ __ret_65 = __rev0_65 - __rev1_65 * __noswap_splatq_lane_u16(__rev2_65, __p3_65); \ __ret_65 = __builtin_shufflevector(__ret_65, __ret_65, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_65; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlsq_lane_f32(__p0_66, __p1_66, __p2_66, __p3_66) __extension__ ({ \ float32x4_t __ret_66; \ float32x4_t __s0_66 = __p0_66; \ float32x4_t __s1_66 = __p1_66; \ float32x2_t __s2_66 = __p2_66; \ __ret_66 = __s0_66 - __s1_66 * splatq_lane_f32(__s2_66, __p3_66); \ __ret_66; \ }) #else #define vmlsq_lane_f32(__p0_67, __p1_67, __p2_67, __p3_67) __extension__ ({ \ float32x4_t __ret_67; \ float32x4_t __s0_67 = __p0_67; \ float32x4_t __s1_67 = __p1_67; \ float32x2_t __s2_67 = __p2_67; \ float32x4_t __rev0_67; __rev0_67 = __builtin_shufflevector(__s0_67, __s0_67, 3, 2, 1, 0); \ float32x4_t __rev1_67; __rev1_67 = __builtin_shufflevector(__s1_67, __s1_67, 3, 2, 1, 0); \ float32x2_t __rev2_67; __rev2_67 = __builtin_shufflevector(__s2_67, __s2_67, 1, 0); \ __ret_67 = __rev0_67 - __rev1_67 * __noswap_splatq_lane_f32(__rev2_67, __p3_67); \ __ret_67 = __builtin_shufflevector(__ret_67, __ret_67, 3, 2, 1, 0); \ __ret_67; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlsq_lane_s32(__p0_68, __p1_68, __p2_68, __p3_68) __extension__ ({ \ int32x4_t __ret_68; \ int32x4_t __s0_68 = __p0_68; \ int32x4_t __s1_68 = __p1_68; \ int32x2_t __s2_68 = __p2_68; \ __ret_68 = __s0_68 - __s1_68 * splatq_lane_s32(__s2_68, __p3_68); \ __ret_68; \ }) #else #define vmlsq_lane_s32(__p0_69, __p1_69, __p2_69, __p3_69) __extension__ ({ \ int32x4_t __ret_69; \ int32x4_t __s0_69 = __p0_69; \ int32x4_t __s1_69 = __p1_69; \ int32x2_t __s2_69 = __p2_69; \ int32x4_t __rev0_69; __rev0_69 = __builtin_shufflevector(__s0_69, __s0_69, 3, 2, 1, 0); \ int32x4_t __rev1_69; __rev1_69 = __builtin_shufflevector(__s1_69, __s1_69, 3, 2, 1, 0); \ int32x2_t __rev2_69; __rev2_69 = __builtin_shufflevector(__s2_69, __s2_69, 1, 0); \ __ret_69 = __rev0_69 - __rev1_69 * __noswap_splatq_lane_s32(__rev2_69, __p3_69); \ __ret_69 = __builtin_shufflevector(__ret_69, __ret_69, 3, 2, 1, 0); \ __ret_69; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlsq_lane_s16(__p0_70, __p1_70, __p2_70, __p3_70) __extension__ ({ \ int16x8_t __ret_70; \ int16x8_t __s0_70 = __p0_70; \ int16x8_t __s1_70 = __p1_70; \ int16x4_t __s2_70 = __p2_70; \ __ret_70 = __s0_70 - __s1_70 * splatq_lane_s16(__s2_70, __p3_70); \ __ret_70; \ }) #else #define vmlsq_lane_s16(__p0_71, __p1_71, __p2_71, __p3_71) __extension__ ({ \ int16x8_t __ret_71; \ int16x8_t __s0_71 = __p0_71; \ int16x8_t __s1_71 = __p1_71; \ int16x4_t __s2_71 = __p2_71; \ int16x8_t __rev0_71; __rev0_71 = __builtin_shufflevector(__s0_71, __s0_71, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev1_71; __rev1_71 = __builtin_shufflevector(__s1_71, __s1_71, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x4_t __rev2_71; __rev2_71 = __builtin_shufflevector(__s2_71, __s2_71, 3, 2, 1, 0); \ __ret_71 = __rev0_71 - __rev1_71 * __noswap_splatq_lane_s16(__rev2_71, __p3_71); \ __ret_71 = __builtin_shufflevector(__ret_71, __ret_71, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_71; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmls_lane_u32(__p0_72, __p1_72, __p2_72, __p3_72) __extension__ ({ \ uint32x2_t __ret_72; \ uint32x2_t __s0_72 = __p0_72; \ uint32x2_t __s1_72 = __p1_72; \ uint32x2_t __s2_72 = __p2_72; \ __ret_72 = __s0_72 - __s1_72 * splat_lane_u32(__s2_72, __p3_72); \ __ret_72; \ }) #else #define vmls_lane_u32(__p0_73, __p1_73, __p2_73, __p3_73) __extension__ ({ \ uint32x2_t __ret_73; \ uint32x2_t __s0_73 = __p0_73; \ uint32x2_t __s1_73 = __p1_73; \ uint32x2_t __s2_73 = __p2_73; \ uint32x2_t __rev0_73; __rev0_73 = __builtin_shufflevector(__s0_73, __s0_73, 1, 0); \ uint32x2_t __rev1_73; __rev1_73 = __builtin_shufflevector(__s1_73, __s1_73, 1, 0); \ uint32x2_t __rev2_73; __rev2_73 = __builtin_shufflevector(__s2_73, __s2_73, 1, 0); \ __ret_73 = __rev0_73 - __rev1_73 * __noswap_splat_lane_u32(__rev2_73, __p3_73); \ __ret_73 = __builtin_shufflevector(__ret_73, __ret_73, 1, 0); \ __ret_73; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmls_lane_u16(__p0_74, __p1_74, __p2_74, __p3_74) __extension__ ({ \ uint16x4_t __ret_74; \ uint16x4_t __s0_74 = __p0_74; \ uint16x4_t __s1_74 = __p1_74; \ uint16x4_t __s2_74 = __p2_74; \ __ret_74 = __s0_74 - __s1_74 * splat_lane_u16(__s2_74, __p3_74); \ __ret_74; \ }) #else #define vmls_lane_u16(__p0_75, __p1_75, __p2_75, __p3_75) __extension__ ({ \ uint16x4_t __ret_75; \ uint16x4_t __s0_75 = __p0_75; \ uint16x4_t __s1_75 = __p1_75; \ uint16x4_t __s2_75 = __p2_75; \ uint16x4_t __rev0_75; __rev0_75 = __builtin_shufflevector(__s0_75, __s0_75, 3, 2, 1, 0); \ uint16x4_t __rev1_75; __rev1_75 = __builtin_shufflevector(__s1_75, __s1_75, 3, 2, 1, 0); \ uint16x4_t __rev2_75; __rev2_75 = __builtin_shufflevector(__s2_75, __s2_75, 3, 2, 1, 0); \ __ret_75 = __rev0_75 - __rev1_75 * __noswap_splat_lane_u16(__rev2_75, __p3_75); \ __ret_75 = __builtin_shufflevector(__ret_75, __ret_75, 3, 2, 1, 0); \ __ret_75; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmls_lane_f32(__p0_76, __p1_76, __p2_76, __p3_76) __extension__ ({ \ float32x2_t __ret_76; \ float32x2_t __s0_76 = __p0_76; \ float32x2_t __s1_76 = __p1_76; \ float32x2_t __s2_76 = __p2_76; \ __ret_76 = __s0_76 - __s1_76 * splat_lane_f32(__s2_76, __p3_76); \ __ret_76; \ }) #else #define vmls_lane_f32(__p0_77, __p1_77, __p2_77, __p3_77) __extension__ ({ \ float32x2_t __ret_77; \ float32x2_t __s0_77 = __p0_77; \ float32x2_t __s1_77 = __p1_77; \ float32x2_t __s2_77 = __p2_77; \ float32x2_t __rev0_77; __rev0_77 = __builtin_shufflevector(__s0_77, __s0_77, 1, 0); \ float32x2_t __rev1_77; __rev1_77 = __builtin_shufflevector(__s1_77, __s1_77, 1, 0); \ float32x2_t __rev2_77; __rev2_77 = __builtin_shufflevector(__s2_77, __s2_77, 1, 0); \ __ret_77 = __rev0_77 - __rev1_77 * __noswap_splat_lane_f32(__rev2_77, __p3_77); \ __ret_77 = __builtin_shufflevector(__ret_77, __ret_77, 1, 0); \ __ret_77; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmls_lane_s32(__p0_78, __p1_78, __p2_78, __p3_78) __extension__ ({ \ int32x2_t __ret_78; \ int32x2_t __s0_78 = __p0_78; \ int32x2_t __s1_78 = __p1_78; \ int32x2_t __s2_78 = __p2_78; \ __ret_78 = __s0_78 - __s1_78 * splat_lane_s32(__s2_78, __p3_78); \ __ret_78; \ }) #else #define vmls_lane_s32(__p0_79, __p1_79, __p2_79, __p3_79) __extension__ ({ \ int32x2_t __ret_79; \ int32x2_t __s0_79 = __p0_79; \ int32x2_t __s1_79 = __p1_79; \ int32x2_t __s2_79 = __p2_79; \ int32x2_t __rev0_79; __rev0_79 = __builtin_shufflevector(__s0_79, __s0_79, 1, 0); \ int32x2_t __rev1_79; __rev1_79 = __builtin_shufflevector(__s1_79, __s1_79, 1, 0); \ int32x2_t __rev2_79; __rev2_79 = __builtin_shufflevector(__s2_79, __s2_79, 1, 0); \ __ret_79 = __rev0_79 - __rev1_79 * __noswap_splat_lane_s32(__rev2_79, __p3_79); \ __ret_79 = __builtin_shufflevector(__ret_79, __ret_79, 1, 0); \ __ret_79; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmls_lane_s16(__p0_80, __p1_80, __p2_80, __p3_80) __extension__ ({ \ int16x4_t __ret_80; \ int16x4_t __s0_80 = __p0_80; \ int16x4_t __s1_80 = __p1_80; \ int16x4_t __s2_80 = __p2_80; \ __ret_80 = __s0_80 - __s1_80 * splat_lane_s16(__s2_80, __p3_80); \ __ret_80; \ }) #else #define vmls_lane_s16(__p0_81, __p1_81, __p2_81, __p3_81) __extension__ ({ \ int16x4_t __ret_81; \ int16x4_t __s0_81 = __p0_81; \ int16x4_t __s1_81 = __p1_81; \ int16x4_t __s2_81 = __p2_81; \ int16x4_t __rev0_81; __rev0_81 = __builtin_shufflevector(__s0_81, __s0_81, 3, 2, 1, 0); \ int16x4_t __rev1_81; __rev1_81 = __builtin_shufflevector(__s1_81, __s1_81, 3, 2, 1, 0); \ int16x4_t __rev2_81; __rev2_81 = __builtin_shufflevector(__s2_81, __s2_81, 3, 2, 1, 0); \ __ret_81 = __rev0_81 - __rev1_81 * __noswap_splat_lane_s16(__rev2_81, __p3_81); \ __ret_81 = __builtin_shufflevector(__ret_81, __ret_81, 3, 2, 1, 0); \ __ret_81; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { uint32x4_t __ret; __ret = __p0 - __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2}; return __ret; } #else __ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 - __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { uint16x8_t __ret; __ret = __p0 - __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; return __ret; } #else __ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 - __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { float32x4_t __ret; __ret = __p0 - __p1 * (float32x4_t) {__p2, __p2, __p2, __p2}; return __ret; } #else __ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 - __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { int32x4_t __ret; __ret = __p0 - __p1 * (int32x4_t) {__p2, __p2, __p2, __p2}; return __ret; } #else __ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 - __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { int16x8_t __ret; __ret = __p0 - __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; return __ret; } #else __ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 - __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { uint32x2_t __ret; __ret = __p0 - __p1 * (uint32x2_t) {__p2, __p2}; return __ret; } #else __ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 - __rev1 * (uint32x2_t) {__p2, __p2}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { uint16x4_t __ret; __ret = __p0 - __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2}; return __ret; } #else __ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 - __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { float32x2_t __ret; __ret = __p0 - __p1 * (float32x2_t) {__p2, __p2}; return __ret; } #else __ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 - __rev1 * (float32x2_t) {__p2, __p2}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { int32x2_t __ret; __ret = __p0 - __p1 * (int32x2_t) {__p2, __p2}; return __ret; } #else __ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 - __rev1 * (int32x2_t) {__p2, __p2}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { int16x4_t __ret; __ret = __p0 - __p1 * (int16x4_t) {__p2, __p2, __p2, __p2}; return __ret; } #else __ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 - __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vmov_n_p8(poly8_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else __ai poly8x8_t vmov_n_p8(poly8_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly16x4_t vmov_n_p16(poly16_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; return __ret; } #else __ai poly16x4_t vmov_n_p16(poly16_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x16_t vmovq_n_p8(poly8_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else __ai poly8x16_t vmovq_n_p8(poly8_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly16x8_t vmovq_n_p16(poly16_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else __ai poly16x8_t vmovq_n_p16(poly16_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vmovq_n_u8(uint8_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else __ai uint8x16_t vmovq_n_u8(uint8_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vmovq_n_u32(uint32_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; return __ret; } #else __ai uint32x4_t vmovq_n_u32(uint32_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vmovq_n_u64(uint64_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) {__p0, __p0}; return __ret; } #else __ai uint64x2_t vmovq_n_u64(uint64_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) {__p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vmovq_n_u16(uint16_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else __ai uint16x8_t vmovq_n_u16(uint16_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vmovq_n_s8(int8_t __p0) { int8x16_t __ret; __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else __ai int8x16_t vmovq_n_s8(int8_t __p0) { int8x16_t __ret; __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vmovq_n_f32(float32_t __p0) { float32x4_t __ret; __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; return __ret; } #else __ai float32x4_t vmovq_n_f32(float32_t __p0) { float32x4_t __ret; __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vmovq_n_f16(__p0) __extension__ ({ \ float16x8_t __ret; \ float16_t __s0 = __p0; \ __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ __ret; \ }) #else #define vmovq_n_f16(__p0) __extension__ ({ \ float16x8_t __ret; \ float16_t __s0 = __p0; \ __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vmovq_n_s32(int32_t __p0) { int32x4_t __ret; __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; return __ret; } #else __ai int32x4_t vmovq_n_s32(int32_t __p0) { int32x4_t __ret; __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vmovq_n_s64(int64_t __p0) { int64x2_t __ret; __ret = (int64x2_t) {__p0, __p0}; return __ret; } #else __ai int64x2_t vmovq_n_s64(int64_t __p0) { int64x2_t __ret; __ret = (int64x2_t) {__p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vmovq_n_s16(int16_t __p0) { int16x8_t __ret; __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else __ai int16x8_t vmovq_n_s16(int16_t __p0) { int16x8_t __ret; __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vmov_n_u8(uint8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else __ai uint8x8_t vmov_n_u8(uint8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vmov_n_u32(uint32_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) {__p0, __p0}; return __ret; } #else __ai uint32x2_t vmov_n_u32(uint32_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) {__p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vmov_n_u64(uint64_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) {__p0}; return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vmov_n_u16(uint16_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; return __ret; } #else __ai uint16x4_t vmov_n_u16(uint16_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vmov_n_s8(int8_t __p0) { int8x8_t __ret; __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else __ai int8x8_t vmov_n_s8(int8_t __p0) { int8x8_t __ret; __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vmov_n_f32(float32_t __p0) { float32x2_t __ret; __ret = (float32x2_t) {__p0, __p0}; return __ret; } #else __ai float32x2_t vmov_n_f32(float32_t __p0) { float32x2_t __ret; __ret = (float32x2_t) {__p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vmov_n_f16(__p0) __extension__ ({ \ float16x4_t __ret; \ float16_t __s0 = __p0; \ __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ __ret; \ }) #else #define vmov_n_f16(__p0) __extension__ ({ \ float16x4_t __ret; \ float16_t __s0 = __p0; \ __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vmov_n_s32(int32_t __p0) { int32x2_t __ret; __ret = (int32x2_t) {__p0, __p0}; return __ret; } #else __ai int32x2_t vmov_n_s32(int32_t __p0) { int32x2_t __ret; __ret = (int32x2_t) {__p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai int64x1_t vmov_n_s64(int64_t __p0) { int64x1_t __ret; __ret = (int64x1_t) {__p0}; return __ret; } #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vmov_n_s16(int16_t __p0) { int16x4_t __ret; __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; return __ret; } #else __ai int16x4_t vmov_n_s16(int16_t __p0) { int16x4_t __ret; __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vmovl_u8(uint8x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49); return __ret; } #else __ai uint16x8_t vmovl_u8(uint8x8_t __p0) { uint16x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai uint16x8_t __noswap_vmovl_u8(uint8x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vmovl_u32(uint32x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51); return __ret; } #else __ai uint64x2_t vmovl_u32(uint32x2_t __p0) { uint64x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai uint64x2_t __noswap_vmovl_u32(uint32x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vmovl_u16(uint16x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50); return __ret; } #else __ai uint32x4_t vmovl_u16(uint16x4_t __p0) { uint32x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai uint32x4_t __noswap_vmovl_u16(uint16x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vmovl_s8(int8x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33); return __ret; } #else __ai int16x8_t vmovl_s8(int8x8_t __p0) { int16x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai int16x8_t __noswap_vmovl_s8(int8x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vmovl_s32(int32x2_t __p0) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35); return __ret; } #else __ai int64x2_t vmovl_s32(int32x2_t __p0) { int64x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai int64x2_t __noswap_vmovl_s32(int32x2_t __p0) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vmovl_s16(int16x4_t __p0) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34); return __ret; } #else __ai int32x4_t vmovl_s16(int16x4_t __p0) { int32x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int32x4_t __noswap_vmovl_s16(int16x4_t __p0) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vmovn_u32(uint32x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17); return __ret; } #else __ai uint16x4_t vmovn_u32(uint32x4_t __p0) { uint16x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai uint16x4_t __noswap_vmovn_u32(uint32x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vmovn_u64(uint64x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18); return __ret; } #else __ai uint32x2_t vmovn_u64(uint64x2_t __p0) { uint32x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai uint32x2_t __noswap_vmovn_u64(uint64x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vmovn_u16(uint16x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16); return __ret; } #else __ai uint8x8_t vmovn_u16(uint16x8_t __p0) { uint8x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai uint8x8_t __noswap_vmovn_u16(uint16x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vmovn_s32(int32x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1); return __ret; } #else __ai int16x4_t vmovn_s32(int32x4_t __p0) { int16x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int16x4_t __noswap_vmovn_s32(int32x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vmovn_s64(int64x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2); return __ret; } #else __ai int32x2_t vmovn_s64(int64x2_t __p0) { int32x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai int32x2_t __noswap_vmovn_s64(int64x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vmovn_s16(int16x8_t __p0) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0); return __ret; } #else __ai int8x8_t vmovn_s16(int16x8_t __p0) { int8x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai int8x8_t __noswap_vmovn_s16(int16x8_t __p0) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = __p0 * __p1; return __ret; } #else __ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 * __rev1; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = __p0 * __p1; return __ret; } #else __ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 * __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = __p0 * __p1; return __ret; } #else __ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 * __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = __p0 * __p1; return __ret; } #else __ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 * __rev1; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = __p0 * __p1; return __ret; } #else __ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 * __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = __p0 * __p1; return __ret; } #else __ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 * __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = __p0 * __p1; return __ret; } #else __ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 * __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = __p0 * __p1; return __ret; } #else __ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 * __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = __p0 * __p1; return __ret; } #else __ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 * __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = __p0 * __p1; return __ret; } #else __ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 * __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = __p0 * __p1; return __ret; } #else __ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 * __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = __p0 * __p1; return __ret; } #else __ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 * __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = __p0 * __p1; return __ret; } #else __ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 * __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = __p0 * __p1; return __ret; } #else __ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 * __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__p0, (int8x8_t)__p1, 4); return __ret; } #else __ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__p0, (int8x16_t)__p1, 36); return __ret; } #else __ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vmulq_lane_u32(__p0_82, __p1_82, __p2_82) __extension__ ({ \ uint32x4_t __ret_82; \ uint32x4_t __s0_82 = __p0_82; \ uint32x2_t __s1_82 = __p1_82; \ __ret_82 = __s0_82 * splatq_lane_u32(__s1_82, __p2_82); \ __ret_82; \ }) #else #define vmulq_lane_u32(__p0_83, __p1_83, __p2_83) __extension__ ({ \ uint32x4_t __ret_83; \ uint32x4_t __s0_83 = __p0_83; \ uint32x2_t __s1_83 = __p1_83; \ uint32x4_t __rev0_83; __rev0_83 = __builtin_shufflevector(__s0_83, __s0_83, 3, 2, 1, 0); \ uint32x2_t __rev1_83; __rev1_83 = __builtin_shufflevector(__s1_83, __s1_83, 1, 0); \ __ret_83 = __rev0_83 * __noswap_splatq_lane_u32(__rev1_83, __p2_83); \ __ret_83 = __builtin_shufflevector(__ret_83, __ret_83, 3, 2, 1, 0); \ __ret_83; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmulq_lane_u16(__p0_84, __p1_84, __p2_84) __extension__ ({ \ uint16x8_t __ret_84; \ uint16x8_t __s0_84 = __p0_84; \ uint16x4_t __s1_84 = __p1_84; \ __ret_84 = __s0_84 * splatq_lane_u16(__s1_84, __p2_84); \ __ret_84; \ }) #else #define vmulq_lane_u16(__p0_85, __p1_85, __p2_85) __extension__ ({ \ uint16x8_t __ret_85; \ uint16x8_t __s0_85 = __p0_85; \ uint16x4_t __s1_85 = __p1_85; \ uint16x8_t __rev0_85; __rev0_85 = __builtin_shufflevector(__s0_85, __s0_85, 7, 6, 5, 4, 3, 2, 1, 0); \ uint16x4_t __rev1_85; __rev1_85 = __builtin_shufflevector(__s1_85, __s1_85, 3, 2, 1, 0); \ __ret_85 = __rev0_85 * __noswap_splatq_lane_u16(__rev1_85, __p2_85); \ __ret_85 = __builtin_shufflevector(__ret_85, __ret_85, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_85; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmulq_lane_f32(__p0_86, __p1_86, __p2_86) __extension__ ({ \ float32x4_t __ret_86; \ float32x4_t __s0_86 = __p0_86; \ float32x2_t __s1_86 = __p1_86; \ __ret_86 = __s0_86 * splatq_lane_f32(__s1_86, __p2_86); \ __ret_86; \ }) #else #define vmulq_lane_f32(__p0_87, __p1_87, __p2_87) __extension__ ({ \ float32x4_t __ret_87; \ float32x4_t __s0_87 = __p0_87; \ float32x2_t __s1_87 = __p1_87; \ float32x4_t __rev0_87; __rev0_87 = __builtin_shufflevector(__s0_87, __s0_87, 3, 2, 1, 0); \ float32x2_t __rev1_87; __rev1_87 = __builtin_shufflevector(__s1_87, __s1_87, 1, 0); \ __ret_87 = __rev0_87 * __noswap_splatq_lane_f32(__rev1_87, __p2_87); \ __ret_87 = __builtin_shufflevector(__ret_87, __ret_87, 3, 2, 1, 0); \ __ret_87; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmulq_lane_s32(__p0_88, __p1_88, __p2_88) __extension__ ({ \ int32x4_t __ret_88; \ int32x4_t __s0_88 = __p0_88; \ int32x2_t __s1_88 = __p1_88; \ __ret_88 = __s0_88 * splatq_lane_s32(__s1_88, __p2_88); \ __ret_88; \ }) #else #define vmulq_lane_s32(__p0_89, __p1_89, __p2_89) __extension__ ({ \ int32x4_t __ret_89; \ int32x4_t __s0_89 = __p0_89; \ int32x2_t __s1_89 = __p1_89; \ int32x4_t __rev0_89; __rev0_89 = __builtin_shufflevector(__s0_89, __s0_89, 3, 2, 1, 0); \ int32x2_t __rev1_89; __rev1_89 = __builtin_shufflevector(__s1_89, __s1_89, 1, 0); \ __ret_89 = __rev0_89 * __noswap_splatq_lane_s32(__rev1_89, __p2_89); \ __ret_89 = __builtin_shufflevector(__ret_89, __ret_89, 3, 2, 1, 0); \ __ret_89; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmulq_lane_s16(__p0_90, __p1_90, __p2_90) __extension__ ({ \ int16x8_t __ret_90; \ int16x8_t __s0_90 = __p0_90; \ int16x4_t __s1_90 = __p1_90; \ __ret_90 = __s0_90 * splatq_lane_s16(__s1_90, __p2_90); \ __ret_90; \ }) #else #define vmulq_lane_s16(__p0_91, __p1_91, __p2_91) __extension__ ({ \ int16x8_t __ret_91; \ int16x8_t __s0_91 = __p0_91; \ int16x4_t __s1_91 = __p1_91; \ int16x8_t __rev0_91; __rev0_91 = __builtin_shufflevector(__s0_91, __s0_91, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x4_t __rev1_91; __rev1_91 = __builtin_shufflevector(__s1_91, __s1_91, 3, 2, 1, 0); \ __ret_91 = __rev0_91 * __noswap_splatq_lane_s16(__rev1_91, __p2_91); \ __ret_91 = __builtin_shufflevector(__ret_91, __ret_91, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_91; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmul_lane_u32(__p0_92, __p1_92, __p2_92) __extension__ ({ \ uint32x2_t __ret_92; \ uint32x2_t __s0_92 = __p0_92; \ uint32x2_t __s1_92 = __p1_92; \ __ret_92 = __s0_92 * splat_lane_u32(__s1_92, __p2_92); \ __ret_92; \ }) #else #define vmul_lane_u32(__p0_93, __p1_93, __p2_93) __extension__ ({ \ uint32x2_t __ret_93; \ uint32x2_t __s0_93 = __p0_93; \ uint32x2_t __s1_93 = __p1_93; \ uint32x2_t __rev0_93; __rev0_93 = __builtin_shufflevector(__s0_93, __s0_93, 1, 0); \ uint32x2_t __rev1_93; __rev1_93 = __builtin_shufflevector(__s1_93, __s1_93, 1, 0); \ __ret_93 = __rev0_93 * __noswap_splat_lane_u32(__rev1_93, __p2_93); \ __ret_93 = __builtin_shufflevector(__ret_93, __ret_93, 1, 0); \ __ret_93; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmul_lane_u16(__p0_94, __p1_94, __p2_94) __extension__ ({ \ uint16x4_t __ret_94; \ uint16x4_t __s0_94 = __p0_94; \ uint16x4_t __s1_94 = __p1_94; \ __ret_94 = __s0_94 * splat_lane_u16(__s1_94, __p2_94); \ __ret_94; \ }) #else #define vmul_lane_u16(__p0_95, __p1_95, __p2_95) __extension__ ({ \ uint16x4_t __ret_95; \ uint16x4_t __s0_95 = __p0_95; \ uint16x4_t __s1_95 = __p1_95; \ uint16x4_t __rev0_95; __rev0_95 = __builtin_shufflevector(__s0_95, __s0_95, 3, 2, 1, 0); \ uint16x4_t __rev1_95; __rev1_95 = __builtin_shufflevector(__s1_95, __s1_95, 3, 2, 1, 0); \ __ret_95 = __rev0_95 * __noswap_splat_lane_u16(__rev1_95, __p2_95); \ __ret_95 = __builtin_shufflevector(__ret_95, __ret_95, 3, 2, 1, 0); \ __ret_95; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmul_lane_f32(__p0_96, __p1_96, __p2_96) __extension__ ({ \ float32x2_t __ret_96; \ float32x2_t __s0_96 = __p0_96; \ float32x2_t __s1_96 = __p1_96; \ __ret_96 = __s0_96 * splat_lane_f32(__s1_96, __p2_96); \ __ret_96; \ }) #else #define vmul_lane_f32(__p0_97, __p1_97, __p2_97) __extension__ ({ \ float32x2_t __ret_97; \ float32x2_t __s0_97 = __p0_97; \ float32x2_t __s1_97 = __p1_97; \ float32x2_t __rev0_97; __rev0_97 = __builtin_shufflevector(__s0_97, __s0_97, 1, 0); \ float32x2_t __rev1_97; __rev1_97 = __builtin_shufflevector(__s1_97, __s1_97, 1, 0); \ __ret_97 = __rev0_97 * __noswap_splat_lane_f32(__rev1_97, __p2_97); \ __ret_97 = __builtin_shufflevector(__ret_97, __ret_97, 1, 0); \ __ret_97; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmul_lane_s32(__p0_98, __p1_98, __p2_98) __extension__ ({ \ int32x2_t __ret_98; \ int32x2_t __s0_98 = __p0_98; \ int32x2_t __s1_98 = __p1_98; \ __ret_98 = __s0_98 * splat_lane_s32(__s1_98, __p2_98); \ __ret_98; \ }) #else #define vmul_lane_s32(__p0_99, __p1_99, __p2_99) __extension__ ({ \ int32x2_t __ret_99; \ int32x2_t __s0_99 = __p0_99; \ int32x2_t __s1_99 = __p1_99; \ int32x2_t __rev0_99; __rev0_99 = __builtin_shufflevector(__s0_99, __s0_99, 1, 0); \ int32x2_t __rev1_99; __rev1_99 = __builtin_shufflevector(__s1_99, __s1_99, 1, 0); \ __ret_99 = __rev0_99 * __noswap_splat_lane_s32(__rev1_99, __p2_99); \ __ret_99 = __builtin_shufflevector(__ret_99, __ret_99, 1, 0); \ __ret_99; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmul_lane_s16(__p0_100, __p1_100, __p2_100) __extension__ ({ \ int16x4_t __ret_100; \ int16x4_t __s0_100 = __p0_100; \ int16x4_t __s1_100 = __p1_100; \ __ret_100 = __s0_100 * splat_lane_s16(__s1_100, __p2_100); \ __ret_100; \ }) #else #define vmul_lane_s16(__p0_101, __p1_101, __p2_101) __extension__ ({ \ int16x4_t __ret_101; \ int16x4_t __s0_101 = __p0_101; \ int16x4_t __s1_101 = __p1_101; \ int16x4_t __rev0_101; __rev0_101 = __builtin_shufflevector(__s0_101, __s0_101, 3, 2, 1, 0); \ int16x4_t __rev1_101; __rev1_101 = __builtin_shufflevector(__s1_101, __s1_101, 3, 2, 1, 0); \ __ret_101 = __rev0_101 * __noswap_splat_lane_s16(__rev1_101, __p2_101); \ __ret_101 = __builtin_shufflevector(__ret_101, __ret_101, 3, 2, 1, 0); \ __ret_101; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) { uint32x4_t __ret; __ret = __p0 * (uint32x4_t) {__p1, __p1, __p1, __p1}; return __ret; } #else __ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __rev0 * (uint32x4_t) {__p1, __p1, __p1, __p1}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) { uint16x8_t __ret; __ret = __p0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; return __ret; } #else __ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) { float32x4_t __ret; __ret = __p0 * (float32x4_t) {__p1, __p1, __p1, __p1}; return __ret; } #else __ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __rev0 * (float32x4_t) {__p1, __p1, __p1, __p1}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) { int32x4_t __ret; __ret = __p0 * (int32x4_t) {__p1, __p1, __p1, __p1}; return __ret; } #else __ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __rev0 * (int32x4_t) {__p1, __p1, __p1, __p1}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) { int16x8_t __ret; __ret = __p0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; return __ret; } #else __ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) { uint32x2_t __ret; __ret = __p0 * (uint32x2_t) {__p1, __p1}; return __ret; } #else __ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __rev0 * (uint32x2_t) {__p1, __p1}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) { uint16x4_t __ret; __ret = __p0 * (uint16x4_t) {__p1, __p1, __p1, __p1}; return __ret; } #else __ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __rev0 * (uint16x4_t) {__p1, __p1, __p1, __p1}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) { float32x2_t __ret; __ret = __p0 * (float32x2_t) {__p1, __p1}; return __ret; } #else __ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __rev0 * (float32x2_t) {__p1, __p1}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) { int32x2_t __ret; __ret = __p0 * (int32x2_t) {__p1, __p1}; return __ret; } #else __ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __rev0 * (int32x2_t) {__p1, __p1}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) { int16x4_t __ret; __ret = __p0 * (int16x4_t) {__p1, __p1, __p1, __p1}; return __ret; } #else __ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __rev0 * (int16x4_t) {__p1, __p1, __p1, __p1}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) { poly16x8_t __ret; __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37); return __ret; } #else __ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) { poly16x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 37); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai poly16x8_t __noswap_vmull_p8(poly8x8_t __p0, poly8x8_t __p1) { poly16x8_t __ret; __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49); return __ret; } #else __ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai uint16x8_t __noswap_vmull_u8(uint8x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51); return __ret; } #else __ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai uint64x2_t __noswap_vmull_u32(uint32x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50); return __ret; } #else __ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai uint32x4_t __noswap_vmull_u16(uint16x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33); return __ret; } #else __ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) { int16x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai int16x8_t __noswap_vmull_s8(int8x8_t __p0, int8x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); return __ret; } #else __ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) { int64x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai int64x2_t __noswap_vmull_s32(int32x2_t __p0, int32x2_t __p1) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); return __ret; } #else __ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) { int32x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int32x4_t __noswap_vmull_s16(int16x4_t __p0, int16x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vmull_lane_u32(__p0_102, __p1_102, __p2_102) __extension__ ({ \ uint64x2_t __ret_102; \ uint32x2_t __s0_102 = __p0_102; \ uint32x2_t __s1_102 = __p1_102; \ __ret_102 = vmull_u32(__s0_102, splat_lane_u32(__s1_102, __p2_102)); \ __ret_102; \ }) #else #define vmull_lane_u32(__p0_103, __p1_103, __p2_103) __extension__ ({ \ uint64x2_t __ret_103; \ uint32x2_t __s0_103 = __p0_103; \ uint32x2_t __s1_103 = __p1_103; \ uint32x2_t __rev0_103; __rev0_103 = __builtin_shufflevector(__s0_103, __s0_103, 1, 0); \ uint32x2_t __rev1_103; __rev1_103 = __builtin_shufflevector(__s1_103, __s1_103, 1, 0); \ __ret_103 = __noswap_vmull_u32(__rev0_103, __noswap_splat_lane_u32(__rev1_103, __p2_103)); \ __ret_103 = __builtin_shufflevector(__ret_103, __ret_103, 1, 0); \ __ret_103; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmull_lane_u16(__p0_104, __p1_104, __p2_104) __extension__ ({ \ uint32x4_t __ret_104; \ uint16x4_t __s0_104 = __p0_104; \ uint16x4_t __s1_104 = __p1_104; \ __ret_104 = vmull_u16(__s0_104, splat_lane_u16(__s1_104, __p2_104)); \ __ret_104; \ }) #else #define vmull_lane_u16(__p0_105, __p1_105, __p2_105) __extension__ ({ \ uint32x4_t __ret_105; \ uint16x4_t __s0_105 = __p0_105; \ uint16x4_t __s1_105 = __p1_105; \ uint16x4_t __rev0_105; __rev0_105 = __builtin_shufflevector(__s0_105, __s0_105, 3, 2, 1, 0); \ uint16x4_t __rev1_105; __rev1_105 = __builtin_shufflevector(__s1_105, __s1_105, 3, 2, 1, 0); \ __ret_105 = __noswap_vmull_u16(__rev0_105, __noswap_splat_lane_u16(__rev1_105, __p2_105)); \ __ret_105 = __builtin_shufflevector(__ret_105, __ret_105, 3, 2, 1, 0); \ __ret_105; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmull_lane_s32(__p0_106, __p1_106, __p2_106) __extension__ ({ \ int64x2_t __ret_106; \ int32x2_t __s0_106 = __p0_106; \ int32x2_t __s1_106 = __p1_106; \ __ret_106 = vmull_s32(__s0_106, splat_lane_s32(__s1_106, __p2_106)); \ __ret_106; \ }) #else #define vmull_lane_s32(__p0_107, __p1_107, __p2_107) __extension__ ({ \ int64x2_t __ret_107; \ int32x2_t __s0_107 = __p0_107; \ int32x2_t __s1_107 = __p1_107; \ int32x2_t __rev0_107; __rev0_107 = __builtin_shufflevector(__s0_107, __s0_107, 1, 0); \ int32x2_t __rev1_107; __rev1_107 = __builtin_shufflevector(__s1_107, __s1_107, 1, 0); \ __ret_107 = __noswap_vmull_s32(__rev0_107, __noswap_splat_lane_s32(__rev1_107, __p2_107)); \ __ret_107 = __builtin_shufflevector(__ret_107, __ret_107, 1, 0); \ __ret_107; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmull_lane_s16(__p0_108, __p1_108, __p2_108) __extension__ ({ \ int32x4_t __ret_108; \ int16x4_t __s0_108 = __p0_108; \ int16x4_t __s1_108 = __p1_108; \ __ret_108 = vmull_s16(__s0_108, splat_lane_s16(__s1_108, __p2_108)); \ __ret_108; \ }) #else #define vmull_lane_s16(__p0_109, __p1_109, __p2_109) __extension__ ({ \ int32x4_t __ret_109; \ int16x4_t __s0_109 = __p0_109; \ int16x4_t __s1_109 = __p1_109; \ int16x4_t __rev0_109; __rev0_109 = __builtin_shufflevector(__s0_109, __s0_109, 3, 2, 1, 0); \ int16x4_t __rev1_109; __rev1_109 = __builtin_shufflevector(__s1_109, __s1_109, 3, 2, 1, 0); \ __ret_109 = __noswap_vmull_s16(__rev0_109, __noswap_splat_lane_s16(__rev1_109, __p2_109)); \ __ret_109 = __builtin_shufflevector(__ret_109, __ret_109, 3, 2, 1, 0); \ __ret_109; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) { uint64x2_t __ret; __ret = vmull_u32(__p0, (uint32x2_t) {__p1, __p1}); return __ret; } #else __ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) { uint64x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __noswap_vmull_u32(__rev0, (uint32x2_t) {__p1, __p1}); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai uint64x2_t __noswap_vmull_n_u32(uint32x2_t __p0, uint32_t __p1) { uint64x2_t __ret; __ret = __noswap_vmull_u32(__p0, (uint32x2_t) {__p1, __p1}); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) { uint32x4_t __ret; __ret = vmull_u16(__p0, (uint16x4_t) {__p1, __p1, __p1, __p1}); return __ret; } #else __ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) { uint32x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __noswap_vmull_u16(__rev0, (uint16x4_t) {__p1, __p1, __p1, __p1}); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai uint32x4_t __noswap_vmull_n_u16(uint16x4_t __p0, uint16_t __p1) { uint32x4_t __ret; __ret = __noswap_vmull_u16(__p0, (uint16x4_t) {__p1, __p1, __p1, __p1}); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) { int64x2_t __ret; __ret = vmull_s32(__p0, (int32x2_t) {__p1, __p1}); return __ret; } #else __ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) { int64x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __noswap_vmull_s32(__rev0, (int32x2_t) {__p1, __p1}); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai int64x2_t __noswap_vmull_n_s32(int32x2_t __p0, int32_t __p1) { int64x2_t __ret; __ret = __noswap_vmull_s32(__p0, (int32x2_t) {__p1, __p1}); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) { int32x4_t __ret; __ret = vmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); return __ret; } #else __ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) { int32x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __noswap_vmull_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1}); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int32x4_t __noswap_vmull_n_s16(int16x4_t __p0, int16_t __p1) { int32x4_t __ret; __ret = __noswap_vmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vmvn_p8(poly8x8_t __p0) { poly8x8_t __ret; __ret = ~__p0; return __ret; } #else __ai poly8x8_t vmvn_p8(poly8x8_t __p0) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = ~__rev0; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x16_t vmvnq_p8(poly8x16_t __p0) { poly8x16_t __ret; __ret = ~__p0; return __ret; } #else __ai poly8x16_t vmvnq_p8(poly8x16_t __p0) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = ~__rev0; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vmvnq_u8(uint8x16_t __p0) { uint8x16_t __ret; __ret = ~__p0; return __ret; } #else __ai uint8x16_t vmvnq_u8(uint8x16_t __p0) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = ~__rev0; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vmvnq_u32(uint32x4_t __p0) { uint32x4_t __ret; __ret = ~__p0; return __ret; } #else __ai uint32x4_t vmvnq_u32(uint32x4_t __p0) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = ~__rev0; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vmvnq_u16(uint16x8_t __p0) { uint16x8_t __ret; __ret = ~__p0; return __ret; } #else __ai uint16x8_t vmvnq_u16(uint16x8_t __p0) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = ~__rev0; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vmvnq_s8(int8x16_t __p0) { int8x16_t __ret; __ret = ~__p0; return __ret; } #else __ai int8x16_t vmvnq_s8(int8x16_t __p0) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = ~__rev0; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vmvnq_s32(int32x4_t __p0) { int32x4_t __ret; __ret = ~__p0; return __ret; } #else __ai int32x4_t vmvnq_s32(int32x4_t __p0) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = ~__rev0; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vmvnq_s16(int16x8_t __p0) { int16x8_t __ret; __ret = ~__p0; return __ret; } #else __ai int16x8_t vmvnq_s16(int16x8_t __p0) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = ~__rev0; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vmvn_u8(uint8x8_t __p0) { uint8x8_t __ret; __ret = ~__p0; return __ret; } #else __ai uint8x8_t vmvn_u8(uint8x8_t __p0) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = ~__rev0; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vmvn_u32(uint32x2_t __p0) { uint32x2_t __ret; __ret = ~__p0; return __ret; } #else __ai uint32x2_t vmvn_u32(uint32x2_t __p0) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = ~__rev0; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vmvn_u16(uint16x4_t __p0) { uint16x4_t __ret; __ret = ~__p0; return __ret; } #else __ai uint16x4_t vmvn_u16(uint16x4_t __p0) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = ~__rev0; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vmvn_s8(int8x8_t __p0) { int8x8_t __ret; __ret = ~__p0; return __ret; } #else __ai int8x8_t vmvn_s8(int8x8_t __p0) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = ~__rev0; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vmvn_s32(int32x2_t __p0) { int32x2_t __ret; __ret = ~__p0; return __ret; } #else __ai int32x2_t vmvn_s32(int32x2_t __p0) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = ~__rev0; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vmvn_s16(int16x4_t __p0) { int16x4_t __ret; __ret = ~__p0; return __ret; } #else __ai int16x4_t vmvn_s16(int16x4_t __p0) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = ~__rev0; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vnegq_s8(int8x16_t __p0) { int8x16_t __ret; __ret = -__p0; return __ret; } #else __ai int8x16_t vnegq_s8(int8x16_t __p0) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = -__rev0; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vnegq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = -__p0; return __ret; } #else __ai float32x4_t vnegq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = -__rev0; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vnegq_s32(int32x4_t __p0) { int32x4_t __ret; __ret = -__p0; return __ret; } #else __ai int32x4_t vnegq_s32(int32x4_t __p0) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = -__rev0; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vnegq_s16(int16x8_t __p0) { int16x8_t __ret; __ret = -__p0; return __ret; } #else __ai int16x8_t vnegq_s16(int16x8_t __p0) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = -__rev0; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vneg_s8(int8x8_t __p0) { int8x8_t __ret; __ret = -__p0; return __ret; } #else __ai int8x8_t vneg_s8(int8x8_t __p0) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = -__rev0; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vneg_f32(float32x2_t __p0) { float32x2_t __ret; __ret = -__p0; return __ret; } #else __ai float32x2_t vneg_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = -__rev0; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vneg_s32(int32x2_t __p0) { int32x2_t __ret; __ret = -__p0; return __ret; } #else __ai int32x2_t vneg_s32(int32x2_t __p0) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = -__rev0; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vneg_s16(int16x4_t __p0) { int16x4_t __ret; __ret = -__p0; return __ret; } #else __ai int16x4_t vneg_s16(int16x4_t __p0) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = -__rev0; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = __p0 | ~__p1; return __ret; } #else __ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 | ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = __p0 | ~__p1; return __ret; } #else __ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 | ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = __p0 | ~__p1; return __ret; } #else __ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 | ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = __p0 | ~__p1; return __ret; } #else __ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 | ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = __p0 | ~__p1; return __ret; } #else __ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 | ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = __p0 | ~__p1; return __ret; } #else __ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 | ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = __p0 | ~__p1; return __ret; } #else __ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 | ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = __p0 | ~__p1; return __ret; } #else __ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 | ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = __p0 | ~__p1; return __ret; } #else __ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 | ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = __p0 | ~__p1; return __ret; } #else __ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 | ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vorn_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x1_t __ret; __ret = __p0 | ~__p1; return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = __p0 | ~__p1; return __ret; } #else __ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 | ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = __p0 | ~__p1; return __ret; } #else __ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 | ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = __p0 | ~__p1; return __ret; } #else __ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 | ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai int64x1_t vorn_s64(int64x1_t __p0, int64x1_t __p1) { int64x1_t __ret; __ret = __p0 | ~__p1; return __ret; } #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = __p0 | ~__p1; return __ret; } #else __ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 | ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = __p0 | __p1; return __ret; } #else __ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 | __rev1; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = __p0 | __p1; return __ret; } #else __ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 | __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = __p0 | __p1; return __ret; } #else __ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 | __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = __p0 | __p1; return __ret; } #else __ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 | __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = __p0 | __p1; return __ret; } #else __ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 | __rev1; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = __p0 | __p1; return __ret; } #else __ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 | __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = __p0 | __p1; return __ret; } #else __ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 | __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = __p0 | __p1; return __ret; } #else __ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 | __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = __p0 | __p1; return __ret; } #else __ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 | __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = __p0 | __p1; return __ret; } #else __ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 | __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vorr_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x1_t __ret; __ret = __p0 | __p1; return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = __p0 | __p1; return __ret; } #else __ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 | __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = __p0 | __p1; return __ret; } #else __ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 | __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = __p0 | __p1; return __ret; } #else __ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 | __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai int64x1_t vorr_s64(int64x1_t __p0, int64x1_t __p1) { int64x1_t __ret; __ret = __p0 | __p1; return __ret; } #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = __p0 | __p1; return __ret; } #else __ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 | __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else __ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else __ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else __ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else __ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); return __ret; } #else __ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else __ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else __ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 19); return __ret; } #else __ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) { uint64x1_t __ret; uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 19); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else __ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else __ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 3); return __ret; } #else __ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) { int64x1_t __ret; int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 3); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else __ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else __ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else __ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else __ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else __ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else __ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else __ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else __ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 49); return __ret; } #else __ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) { uint16x8_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 51); return __ret; } #else __ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) { uint64x2_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 50); return __ret; } #else __ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) { uint32x4_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vpaddlq_s8(int8x16_t __p0) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 33); return __ret; } #else __ai int16x8_t vpaddlq_s8(int8x16_t __p0) { int16x8_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vpaddlq_s32(int32x4_t __p0) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 35); return __ret; } #else __ai int64x2_t vpaddlq_s32(int32x4_t __p0) { int64x2_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vpaddlq_s16(int16x8_t __p0) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 34); return __ret; } #else __ai int32x4_t vpaddlq_s16(int16x8_t __p0) { int32x4_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vpaddl_u8(uint8x8_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 17); return __ret; } #else __ai uint16x4_t vpaddl_u8(uint8x8_t __p0) { uint16x4_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x1_t vpaddl_u32(uint32x2_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 19); return __ret; } #else __ai uint64x1_t vpaddl_u32(uint32x2_t __p0) { uint64x1_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 19); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vpaddl_u16(uint16x4_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 18); return __ret; } #else __ai uint32x2_t vpaddl_u16(uint16x4_t __p0) { uint32x2_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vpaddl_s8(int8x8_t __p0) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 1); return __ret; } #else __ai int16x4_t vpaddl_s8(int8x8_t __p0) { int16x4_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x1_t vpaddl_s32(int32x2_t __p0) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 3); return __ret; } #else __ai int64x1_t vpaddl_s32(int32x2_t __p0) { int64x1_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 3); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vpaddl_s16(int16x4_t __p0) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 2); return __ret; } #else __ai int32x2_t vpaddl_s16(int16x4_t __p0) { int32x2_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else __ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else __ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else __ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else __ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else __ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else __ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else __ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else __ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else __ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else __ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else __ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else __ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else __ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else __ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vqabsq_s8(int8x16_t __p0) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 32); return __ret; } #else __ai int8x16_t vqabsq_s8(int8x16_t __p0) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vqabsq_s32(int32x4_t __p0) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 34); return __ret; } #else __ai int32x4_t vqabsq_s32(int32x4_t __p0) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vqabsq_s16(int16x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 33); return __ret; } #else __ai int16x8_t vqabsq_s16(int16x8_t __p0) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vqabs_s8(int8x8_t __p0) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 0); return __ret; } #else __ai int8x8_t vqabs_s8(int8x8_t __p0) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vqabs_s32(int32x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 2); return __ret; } #else __ai int32x2_t vqabs_s32(int32x2_t __p0) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vqabs_s16(int16x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 1); return __ret; } #else __ai int16x4_t vqabs_s16(int16x4_t __p0) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else __ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else __ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else __ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else __ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else __ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else __ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); return __ret; } #else __ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else __ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else __ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else __ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vqadd_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else __ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else __ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else __ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai int64x1_t vqadd_s64(int64x1_t __p0, int64x1_t __p1) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else __ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); return __ret; } #else __ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai int64x2_t __noswap_vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); return __ret; } #else __ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int32x4_t __noswap_vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vqdmlal_lane_s32(__p0_110, __p1_110, __p2_110, __p3_110) __extension__ ({ \ int64x2_t __ret_110; \ int64x2_t __s0_110 = __p0_110; \ int32x2_t __s1_110 = __p1_110; \ int32x2_t __s2_110 = __p2_110; \ __ret_110 = vqdmlal_s32(__s0_110, __s1_110, splat_lane_s32(__s2_110, __p3_110)); \ __ret_110; \ }) #else #define vqdmlal_lane_s32(__p0_111, __p1_111, __p2_111, __p3_111) __extension__ ({ \ int64x2_t __ret_111; \ int64x2_t __s0_111 = __p0_111; \ int32x2_t __s1_111 = __p1_111; \ int32x2_t __s2_111 = __p2_111; \ int64x2_t __rev0_111; __rev0_111 = __builtin_shufflevector(__s0_111, __s0_111, 1, 0); \ int32x2_t __rev1_111; __rev1_111 = __builtin_shufflevector(__s1_111, __s1_111, 1, 0); \ int32x2_t __rev2_111; __rev2_111 = __builtin_shufflevector(__s2_111, __s2_111, 1, 0); \ __ret_111 = __noswap_vqdmlal_s32(__rev0_111, __rev1_111, __noswap_splat_lane_s32(__rev2_111, __p3_111)); \ __ret_111 = __builtin_shufflevector(__ret_111, __ret_111, 1, 0); \ __ret_111; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmlal_lane_s16(__p0_112, __p1_112, __p2_112, __p3_112) __extension__ ({ \ int32x4_t __ret_112; \ int32x4_t __s0_112 = __p0_112; \ int16x4_t __s1_112 = __p1_112; \ int16x4_t __s2_112 = __p2_112; \ __ret_112 = vqdmlal_s16(__s0_112, __s1_112, splat_lane_s16(__s2_112, __p3_112)); \ __ret_112; \ }) #else #define vqdmlal_lane_s16(__p0_113, __p1_113, __p2_113, __p3_113) __extension__ ({ \ int32x4_t __ret_113; \ int32x4_t __s0_113 = __p0_113; \ int16x4_t __s1_113 = __p1_113; \ int16x4_t __s2_113 = __p2_113; \ int32x4_t __rev0_113; __rev0_113 = __builtin_shufflevector(__s0_113, __s0_113, 3, 2, 1, 0); \ int16x4_t __rev1_113; __rev1_113 = __builtin_shufflevector(__s1_113, __s1_113, 3, 2, 1, 0); \ int16x4_t __rev2_113; __rev2_113 = __builtin_shufflevector(__s2_113, __s2_113, 3, 2, 1, 0); \ __ret_113 = __noswap_vqdmlal_s16(__rev0_113, __rev1_113, __noswap_splat_lane_s16(__rev2_113, __p3_113)); \ __ret_113 = __builtin_shufflevector(__ret_113, __ret_113, 3, 2, 1, 0); \ __ret_113; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { int64x2_t __ret; __ret = vqdmlal_s32(__p0, __p1, (int32x2_t) {__p2, __p2}); return __ret; } #else __ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __noswap_vqdmlal_s32(__rev0, __rev1, (int32x2_t) {__p2, __p2}); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai int64x2_t __noswap_vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { int64x2_t __ret; __ret = __noswap_vqdmlal_s32(__p0, __p1, (int32x2_t) {__p2, __p2}); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { int32x4_t __ret; __ret = vqdmlal_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2}); return __ret; } #else __ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __noswap_vqdmlal_s16(__rev0, __rev1, (int16x4_t) {__p2, __p2, __p2, __p2}); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int32x4_t __noswap_vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { int32x4_t __ret; __ret = __noswap_vqdmlal_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2}); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); return __ret; } #else __ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai int64x2_t __noswap_vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); return __ret; } #else __ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int32x4_t __noswap_vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vqdmlsl_lane_s32(__p0_114, __p1_114, __p2_114, __p3_114) __extension__ ({ \ int64x2_t __ret_114; \ int64x2_t __s0_114 = __p0_114; \ int32x2_t __s1_114 = __p1_114; \ int32x2_t __s2_114 = __p2_114; \ __ret_114 = vqdmlsl_s32(__s0_114, __s1_114, splat_lane_s32(__s2_114, __p3_114)); \ __ret_114; \ }) #else #define vqdmlsl_lane_s32(__p0_115, __p1_115, __p2_115, __p3_115) __extension__ ({ \ int64x2_t __ret_115; \ int64x2_t __s0_115 = __p0_115; \ int32x2_t __s1_115 = __p1_115; \ int32x2_t __s2_115 = __p2_115; \ int64x2_t __rev0_115; __rev0_115 = __builtin_shufflevector(__s0_115, __s0_115, 1, 0); \ int32x2_t __rev1_115; __rev1_115 = __builtin_shufflevector(__s1_115, __s1_115, 1, 0); \ int32x2_t __rev2_115; __rev2_115 = __builtin_shufflevector(__s2_115, __s2_115, 1, 0); \ __ret_115 = __noswap_vqdmlsl_s32(__rev0_115, __rev1_115, __noswap_splat_lane_s32(__rev2_115, __p3_115)); \ __ret_115 = __builtin_shufflevector(__ret_115, __ret_115, 1, 0); \ __ret_115; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmlsl_lane_s16(__p0_116, __p1_116, __p2_116, __p3_116) __extension__ ({ \ int32x4_t __ret_116; \ int32x4_t __s0_116 = __p0_116; \ int16x4_t __s1_116 = __p1_116; \ int16x4_t __s2_116 = __p2_116; \ __ret_116 = vqdmlsl_s16(__s0_116, __s1_116, splat_lane_s16(__s2_116, __p3_116)); \ __ret_116; \ }) #else #define vqdmlsl_lane_s16(__p0_117, __p1_117, __p2_117, __p3_117) __extension__ ({ \ int32x4_t __ret_117; \ int32x4_t __s0_117 = __p0_117; \ int16x4_t __s1_117 = __p1_117; \ int16x4_t __s2_117 = __p2_117; \ int32x4_t __rev0_117; __rev0_117 = __builtin_shufflevector(__s0_117, __s0_117, 3, 2, 1, 0); \ int16x4_t __rev1_117; __rev1_117 = __builtin_shufflevector(__s1_117, __s1_117, 3, 2, 1, 0); \ int16x4_t __rev2_117; __rev2_117 = __builtin_shufflevector(__s2_117, __s2_117, 3, 2, 1, 0); \ __ret_117 = __noswap_vqdmlsl_s16(__rev0_117, __rev1_117, __noswap_splat_lane_s16(__rev2_117, __p3_117)); \ __ret_117 = __builtin_shufflevector(__ret_117, __ret_117, 3, 2, 1, 0); \ __ret_117; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { int64x2_t __ret; __ret = vqdmlsl_s32(__p0, __p1, (int32x2_t) {__p2, __p2}); return __ret; } #else __ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, (int32x2_t) {__p2, __p2}); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai int64x2_t __noswap_vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { int64x2_t __ret; __ret = __noswap_vqdmlsl_s32(__p0, __p1, (int32x2_t) {__p2, __p2}); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { int32x4_t __ret; __ret = vqdmlsl_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2}); return __ret; } #else __ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, (int16x4_t) {__p2, __p2, __p2, __p2}); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int32x4_t __noswap_vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { int32x4_t __ret; __ret = __noswap_vqdmlsl_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2}); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else __ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int32x4_t __noswap_vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else __ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai int16x8_t __noswap_vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else __ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai int32x2_t __noswap_vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else __ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int16x4_t __noswap_vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { int32x4_t __ret; __ret = vqdmulhq_s32(__p0, (int32x4_t) {__p1, __p1, __p1, __p1}); return __ret; } #else __ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __noswap_vqdmulhq_s32(__rev0, (int32x4_t) {__p1, __p1, __p1, __p1}); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { int16x8_t __ret; __ret = vqdmulhq_s16(__p0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}); return __ret; } #else __ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vqdmulhq_s16(__rev0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) { int32x2_t __ret; __ret = vqdmulh_s32(__p0, (int32x2_t) {__p1, __p1}); return __ret; } #else __ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __noswap_vqdmulh_s32(__rev0, (int32x2_t) {__p1, __p1}); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) { int16x4_t __ret; __ret = vqdmulh_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); return __ret; } #else __ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __noswap_vqdmulh_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1}); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); return __ret; } #else __ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) { int64x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai int64x2_t __noswap_vqdmull_s32(int32x2_t __p0, int32x2_t __p1) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); return __ret; } #else __ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) { int32x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int32x4_t __noswap_vqdmull_s16(int16x4_t __p0, int16x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vqdmull_lane_s32(__p0_118, __p1_118, __p2_118) __extension__ ({ \ int64x2_t __ret_118; \ int32x2_t __s0_118 = __p0_118; \ int32x2_t __s1_118 = __p1_118; \ __ret_118 = vqdmull_s32(__s0_118, splat_lane_s32(__s1_118, __p2_118)); \ __ret_118; \ }) #else #define vqdmull_lane_s32(__p0_119, __p1_119, __p2_119) __extension__ ({ \ int64x2_t __ret_119; \ int32x2_t __s0_119 = __p0_119; \ int32x2_t __s1_119 = __p1_119; \ int32x2_t __rev0_119; __rev0_119 = __builtin_shufflevector(__s0_119, __s0_119, 1, 0); \ int32x2_t __rev1_119; __rev1_119 = __builtin_shufflevector(__s1_119, __s1_119, 1, 0); \ __ret_119 = __noswap_vqdmull_s32(__rev0_119, __noswap_splat_lane_s32(__rev1_119, __p2_119)); \ __ret_119 = __builtin_shufflevector(__ret_119, __ret_119, 1, 0); \ __ret_119; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmull_lane_s16(__p0_120, __p1_120, __p2_120) __extension__ ({ \ int32x4_t __ret_120; \ int16x4_t __s0_120 = __p0_120; \ int16x4_t __s1_120 = __p1_120; \ __ret_120 = vqdmull_s16(__s0_120, splat_lane_s16(__s1_120, __p2_120)); \ __ret_120; \ }) #else #define vqdmull_lane_s16(__p0_121, __p1_121, __p2_121) __extension__ ({ \ int32x4_t __ret_121; \ int16x4_t __s0_121 = __p0_121; \ int16x4_t __s1_121 = __p1_121; \ int16x4_t __rev0_121; __rev0_121 = __builtin_shufflevector(__s0_121, __s0_121, 3, 2, 1, 0); \ int16x4_t __rev1_121; __rev1_121 = __builtin_shufflevector(__s1_121, __s1_121, 3, 2, 1, 0); \ __ret_121 = __noswap_vqdmull_s16(__rev0_121, __noswap_splat_lane_s16(__rev1_121, __p2_121)); \ __ret_121 = __builtin_shufflevector(__ret_121, __ret_121, 3, 2, 1, 0); \ __ret_121; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) { int64x2_t __ret; __ret = vqdmull_s32(__p0, (int32x2_t) {__p1, __p1}); return __ret; } #else __ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) { int64x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __noswap_vqdmull_s32(__rev0, (int32x2_t) {__p1, __p1}); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai int64x2_t __noswap_vqdmull_n_s32(int32x2_t __p0, int32_t __p1) { int64x2_t __ret; __ret = __noswap_vqdmull_s32(__p0, (int32x2_t) {__p1, __p1}); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) { int32x4_t __ret; __ret = vqdmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); return __ret; } #else __ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) { int32x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __noswap_vqdmull_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1}); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int32x4_t __noswap_vqdmull_n_s16(int16x4_t __p0, int16_t __p1) { int32x4_t __ret; __ret = __noswap_vqdmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vqmovn_u32(uint32x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17); return __ret; } #else __ai uint16x4_t vqmovn_u32(uint32x4_t __p0) { uint16x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai uint16x4_t __noswap_vqmovn_u32(uint32x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vqmovn_u64(uint64x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18); return __ret; } #else __ai uint32x2_t vqmovn_u64(uint64x2_t __p0) { uint32x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai uint32x2_t __noswap_vqmovn_u64(uint64x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vqmovn_u16(uint16x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16); return __ret; } #else __ai uint8x8_t vqmovn_u16(uint16x8_t __p0) { uint8x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai uint8x8_t __noswap_vqmovn_u16(uint16x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vqmovn_s32(int32x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1); return __ret; } #else __ai int16x4_t vqmovn_s32(int32x4_t __p0) { int16x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int16x4_t __noswap_vqmovn_s32(int32x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vqmovn_s64(int64x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2); return __ret; } #else __ai int32x2_t vqmovn_s64(int64x2_t __p0) { int32x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai int32x2_t __noswap_vqmovn_s64(int64x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vqmovn_s16(int16x8_t __p0) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0); return __ret; } #else __ai int8x8_t vqmovn_s16(int16x8_t __p0) { int8x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai int8x8_t __noswap_vqmovn_s16(int16x8_t __p0) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vqmovun_s32(int32x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17); return __ret; } #else __ai uint16x4_t vqmovun_s32(int32x4_t __p0) { uint16x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai uint16x4_t __noswap_vqmovun_s32(int32x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vqmovun_s64(int64x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18); return __ret; } #else __ai uint32x2_t vqmovun_s64(int64x2_t __p0) { uint32x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai uint32x2_t __noswap_vqmovun_s64(int64x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vqmovun_s16(int16x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16); return __ret; } #else __ai uint8x8_t vqmovun_s16(int16x8_t __p0) { uint8x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai uint8x8_t __noswap_vqmovun_s16(int16x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vqnegq_s8(int8x16_t __p0) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 32); return __ret; } #else __ai int8x16_t vqnegq_s8(int8x16_t __p0) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vqnegq_s32(int32x4_t __p0) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 34); return __ret; } #else __ai int32x4_t vqnegq_s32(int32x4_t __p0) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vqnegq_s16(int16x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 33); return __ret; } #else __ai int16x8_t vqnegq_s16(int16x8_t __p0) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vqneg_s8(int8x8_t __p0) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 0); return __ret; } #else __ai int8x8_t vqneg_s8(int8x8_t __p0) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vqneg_s32(int32x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 2); return __ret; } #else __ai int32x2_t vqneg_s32(int32x2_t __p0) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vqneg_s16(int16x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 1); return __ret; } #else __ai int16x4_t vqneg_s16(int16x4_t __p0) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else __ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int32x4_t __noswap_vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else __ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai int16x8_t __noswap_vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else __ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai int32x2_t __noswap_vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else __ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int16x4_t __noswap_vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { int32x4_t __ret; __ret = vqrdmulhq_s32(__p0, (int32x4_t) {__p1, __p1, __p1, __p1}); return __ret; } #else __ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __noswap_vqrdmulhq_s32(__rev0, (int32x4_t) {__p1, __p1, __p1, __p1}); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { int16x8_t __ret; __ret = vqrdmulhq_s16(__p0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}); return __ret; } #else __ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vqrdmulhq_s16(__rev0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) { int32x2_t __ret; __ret = vqrdmulh_s32(__p0, (int32x2_t) {__p1, __p1}); return __ret; } #else __ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __noswap_vqrdmulh_s32(__rev0, (int32x2_t) {__p1, __p1}); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) { int16x4_t __ret; __ret = vqrdmulh_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); return __ret; } #else __ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __noswap_vqrdmulh_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1}); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else __ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else __ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else __ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else __ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else __ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else __ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); return __ret; } #else __ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else __ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else __ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else __ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vqrshl_u64(uint64x1_t __p0, int64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else __ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else __ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else __ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai int64x1_t vqrshl_s64(int64x1_t __p0, int64x1_t __p1) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else __ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ uint32x4_t __s0 = __p0; \ __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \ __ret; \ }) #else #define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 17); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vqrshrn_n_u32(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ uint32x4_t __s0 = __p0; \ __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint64x2_t __s0 = __p0; \ __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \ __ret; \ }) #else #define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint64x2_t __s0 = __p0; \ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 18); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_vqrshrn_n_u64(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint64x2_t __s0 = __p0; \ __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint16x8_t __s0 = __p0; \ __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \ __ret; \ }) #else #define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint16x8_t __s0 = __p0; \ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 16); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vqrshrn_n_u16(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint16x8_t __s0 = __p0; \ __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int32x4_t __s0 = __p0; \ __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \ __ret; \ }) #else #define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 1); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vqrshrn_n_s32(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int32x4_t __s0 = __p0; \ __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int64x2_t __s0 = __p0; \ __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \ __ret; \ }) #else #define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int64x2_t __s0 = __p0; \ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_vqrshrn_n_s64(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int64x2_t __s0 = __p0; \ __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int16x8_t __s0 = __p0; \ __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \ __ret; \ }) #else #define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 0); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vqrshrn_n_s16(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int16x8_t __s0 = __p0; \ __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ int32x4_t __s0 = __p0; \ __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \ __ret; \ }) #else #define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 17); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vqrshrun_n_s32(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ int32x4_t __s0 = __p0; \ __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ int64x2_t __s0 = __p0; \ __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \ __ret; \ }) #else #define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ int64x2_t __s0 = __p0; \ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 18); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_vqrshrun_n_s64(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ int64x2_t __s0 = __p0; \ __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ int16x8_t __s0 = __p0; \ __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \ __ret; \ }) #else #define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 16); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vqrshrun_n_s16(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ int16x8_t __s0 = __p0; \ __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else __ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else __ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else __ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else __ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else __ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else __ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); return __ret; } #else __ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else __ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else __ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else __ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vqshl_u64(uint64x1_t __p0, int64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else __ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else __ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else __ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai int64x1_t vqshl_s64(int64x1_t __p0, int64x1_t __p1) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else __ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vqshlq_n_u8(__p0, __p1) __extension__ ({ \ uint8x16_t __ret; \ uint8x16_t __s0 = __p0; \ __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 48); \ __ret; \ }) #else #define vqshlq_n_u8(__p0, __p1) __extension__ ({ \ uint8x16_t __ret; \ uint8x16_t __s0 = __p0; \ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 48); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqshlq_n_u32(__p0, __p1) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 50); \ __ret; \ }) #else #define vqshlq_n_u32(__p0, __p1) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 50); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqshlq_n_u64(__p0, __p1) __extension__ ({ \ uint64x2_t __ret; \ uint64x2_t __s0 = __p0; \ __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 51); \ __ret; \ }) #else #define vqshlq_n_u64(__p0, __p1) __extension__ ({ \ uint64x2_t __ret; \ uint64x2_t __s0 = __p0; \ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 51); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqshlq_n_u16(__p0, __p1) __extension__ ({ \ uint16x8_t __ret; \ uint16x8_t __s0 = __p0; \ __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 49); \ __ret; \ }) #else #define vqshlq_n_u16(__p0, __p1) __extension__ ({ \ uint16x8_t __ret; \ uint16x8_t __s0 = __p0; \ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 49); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqshlq_n_s8(__p0, __p1) __extension__ ({ \ int8x16_t __ret; \ int8x16_t __s0 = __p0; \ __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 32); \ __ret; \ }) #else #define vqshlq_n_s8(__p0, __p1) __extension__ ({ \ int8x16_t __ret; \ int8x16_t __s0 = __p0; \ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 32); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqshlq_n_s32(__p0, __p1) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 34); \ __ret; \ }) #else #define vqshlq_n_s32(__p0, __p1) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 34); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqshlq_n_s64(__p0, __p1) __extension__ ({ \ int64x2_t __ret; \ int64x2_t __s0 = __p0; \ __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 35); \ __ret; \ }) #else #define vqshlq_n_s64(__p0, __p1) __extension__ ({ \ int64x2_t __ret; \ int64x2_t __s0 = __p0; \ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 35); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqshlq_n_s16(__p0, __p1) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 33); \ __ret; \ }) #else #define vqshlq_n_s16(__p0, __p1) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 33); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqshl_n_u8(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint8x8_t __s0 = __p0; \ __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 16); \ __ret; \ }) #else #define vqshl_n_u8(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint8x8_t __s0 = __p0; \ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 16); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqshl_n_u32(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint32x2_t __s0 = __p0; \ __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 18); \ __ret; \ }) #else #define vqshl_n_u32(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint32x2_t __s0 = __p0; \ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 18); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #define vqshl_n_u64(__p0, __p1) __extension__ ({ \ uint64x1_t __ret; \ uint64x1_t __s0 = __p0; \ __ret = (uint64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 19); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vqshl_n_u16(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ uint16x4_t __s0 = __p0; \ __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 17); \ __ret; \ }) #else #define vqshl_n_u16(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ uint16x4_t __s0 = __p0; \ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 17); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqshl_n_s8(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int8x8_t __s0 = __p0; \ __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 0); \ __ret; \ }) #else #define vqshl_n_s8(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int8x8_t __s0 = __p0; \ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 0); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqshl_n_s32(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 2); \ __ret; \ }) #else #define vqshl_n_s32(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #define vqshl_n_s64(__p0, __p1) __extension__ ({ \ int64x1_t __ret; \ int64x1_t __s0 = __p0; \ __ret = (int64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 3); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vqshl_n_s16(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 1); \ __ret; \ }) #else #define vqshl_n_s16(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 1); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqshluq_n_s8(__p0, __p1) __extension__ ({ \ uint8x16_t __ret; \ int8x16_t __s0 = __p0; \ __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 48); \ __ret; \ }) #else #define vqshluq_n_s8(__p0, __p1) __extension__ ({ \ uint8x16_t __ret; \ int8x16_t __s0 = __p0; \ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 48); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqshluq_n_s32(__p0, __p1) __extension__ ({ \ uint32x4_t __ret; \ int32x4_t __s0 = __p0; \ __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 50); \ __ret; \ }) #else #define vqshluq_n_s32(__p0, __p1) __extension__ ({ \ uint32x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 50); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqshluq_n_s64(__p0, __p1) __extension__ ({ \ uint64x2_t __ret; \ int64x2_t __s0 = __p0; \ __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 51); \ __ret; \ }) #else #define vqshluq_n_s64(__p0, __p1) __extension__ ({ \ uint64x2_t __ret; \ int64x2_t __s0 = __p0; \ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 51); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqshluq_n_s16(__p0, __p1) __extension__ ({ \ uint16x8_t __ret; \ int16x8_t __s0 = __p0; \ __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 49); \ __ret; \ }) #else #define vqshluq_n_s16(__p0, __p1) __extension__ ({ \ uint16x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 49); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqshlu_n_s8(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ int8x8_t __s0 = __p0; \ __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 16); \ __ret; \ }) #else #define vqshlu_n_s8(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ int8x8_t __s0 = __p0; \ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 16); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqshlu_n_s32(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ int32x2_t __s0 = __p0; \ __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 18); \ __ret; \ }) #else #define vqshlu_n_s32(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ int32x2_t __s0 = __p0; \ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 18); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #define vqshlu_n_s64(__p0, __p1) __extension__ ({ \ uint64x1_t __ret; \ int64x1_t __s0 = __p0; \ __ret = (uint64x1_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 19); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vqshlu_n_s16(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ int16x4_t __s0 = __p0; \ __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 17); \ __ret; \ }) #else #define vqshlu_n_s16(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ int16x4_t __s0 = __p0; \ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 17); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqshrn_n_u32(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ uint32x4_t __s0 = __p0; \ __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \ __ret; \ }) #else #define vqshrn_n_u32(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 17); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vqshrn_n_u32(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ uint32x4_t __s0 = __p0; \ __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqshrn_n_u64(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint64x2_t __s0 = __p0; \ __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \ __ret; \ }) #else #define vqshrn_n_u64(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint64x2_t __s0 = __p0; \ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 18); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_vqshrn_n_u64(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint64x2_t __s0 = __p0; \ __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqshrn_n_u16(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint16x8_t __s0 = __p0; \ __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \ __ret; \ }) #else #define vqshrn_n_u16(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint16x8_t __s0 = __p0; \ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 16); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vqshrn_n_u16(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint16x8_t __s0 = __p0; \ __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqshrn_n_s32(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int32x4_t __s0 = __p0; \ __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \ __ret; \ }) #else #define vqshrn_n_s32(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 1); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vqshrn_n_s32(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int32x4_t __s0 = __p0; \ __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqshrn_n_s64(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int64x2_t __s0 = __p0; \ __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \ __ret; \ }) #else #define vqshrn_n_s64(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int64x2_t __s0 = __p0; \ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_vqshrn_n_s64(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int64x2_t __s0 = __p0; \ __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqshrn_n_s16(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int16x8_t __s0 = __p0; \ __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \ __ret; \ }) #else #define vqshrn_n_s16(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 0); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vqshrn_n_s16(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int16x8_t __s0 = __p0; \ __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqshrun_n_s32(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ int32x4_t __s0 = __p0; \ __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \ __ret; \ }) #else #define vqshrun_n_s32(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 17); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vqshrun_n_s32(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ int32x4_t __s0 = __p0; \ __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqshrun_n_s64(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ int64x2_t __s0 = __p0; \ __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \ __ret; \ }) #else #define vqshrun_n_s64(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ int64x2_t __s0 = __p0; \ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 18); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_vqshrun_n_s64(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ int64x2_t __s0 = __p0; \ __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqshrun_n_s16(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ int16x8_t __s0 = __p0; \ __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \ __ret; \ }) #else #define vqshrun_n_s16(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 16); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vqshrun_n_s16(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ int16x8_t __s0 = __p0; \ __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else __ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else __ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else __ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else __ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else __ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else __ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); return __ret; } #else __ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else __ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else __ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else __ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vqsub_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else __ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else __ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else __ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai int64x1_t vqsub_s64(int64x1_t __p0, int64x1_t __p1) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 3); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else __ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); return __ret; } #else __ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { uint16x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai uint16x4_t __noswap_vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); return __ret; } #else __ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { uint32x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai uint32x2_t __noswap_vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); return __ret; } #else __ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { uint8x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai uint8x8_t __noswap_vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); return __ret; } #else __ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) { int16x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int16x4_t __noswap_vraddhn_s32(int32x4_t __p0, int32x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); return __ret; } #else __ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) { int32x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai int32x2_t __noswap_vraddhn_s64(int64x2_t __p0, int64x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); return __ret; } #else __ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) { int8x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai int8x8_t __noswap_vraddhn_s16(int16x8_t __p0, int16x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 50); return __ret; } #else __ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vrecpeq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 41); return __ret; } #else __ai float32x4_t vrecpeq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vrecpe_u32(uint32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 18); return __ret; } #else __ai uint32x2_t vrecpe_u32(uint32x2_t __p0) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vrecpe_f32(float32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 9); return __ret; } #else __ai float32x2_t vrecpe_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else __ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else __ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vrev16_p8(poly8x8_t __p0) { poly8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); return __ret; } #else __ai poly8x8_t vrev16_p8(poly8x8_t __p0) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x16_t vrev16q_p8(poly8x16_t __p0) { poly8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); return __ret; } #else __ai poly8x16_t vrev16q_p8(poly8x16_t __p0) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vrev16q_u8(uint8x16_t __p0) { uint8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); return __ret; } #else __ai uint8x16_t vrev16q_u8(uint8x16_t __p0) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vrev16q_s8(int8x16_t __p0) { int8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); return __ret; } #else __ai int8x16_t vrev16q_s8(int8x16_t __p0) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vrev16_u8(uint8x8_t __p0) { uint8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); return __ret; } #else __ai uint8x8_t vrev16_u8(uint8x8_t __p0) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vrev16_s8(int8x8_t __p0) { int8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); return __ret; } #else __ai int8x8_t vrev16_s8(int8x8_t __p0) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vrev32_p8(poly8x8_t __p0) { poly8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); return __ret; } #else __ai poly8x8_t vrev32_p8(poly8x8_t __p0) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly16x4_t vrev32_p16(poly16x4_t __p0) { poly16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); return __ret; } #else __ai poly16x4_t vrev32_p16(poly16x4_t __p0) { poly16x4_t __ret; poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x16_t vrev32q_p8(poly8x16_t __p0) { poly8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); return __ret; } #else __ai poly8x16_t vrev32q_p8(poly8x16_t __p0) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly16x8_t vrev32q_p16(poly16x8_t __p0) { poly16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); return __ret; } #else __ai poly16x8_t vrev32q_p16(poly16x8_t __p0) { poly16x8_t __ret; poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vrev32q_u8(uint8x16_t __p0) { uint8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); return __ret; } #else __ai uint8x16_t vrev32q_u8(uint8x16_t __p0) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vrev32q_u16(uint16x8_t __p0) { uint16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); return __ret; } #else __ai uint16x8_t vrev32q_u16(uint16x8_t __p0) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vrev32q_s8(int8x16_t __p0) { int8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); return __ret; } #else __ai int8x16_t vrev32q_s8(int8x16_t __p0) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vrev32q_s16(int16x8_t __p0) { int16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); return __ret; } #else __ai int16x8_t vrev32q_s16(int16x8_t __p0) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vrev32_u8(uint8x8_t __p0) { uint8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); return __ret; } #else __ai uint8x8_t vrev32_u8(uint8x8_t __p0) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vrev32_u16(uint16x4_t __p0) { uint16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); return __ret; } #else __ai uint16x4_t vrev32_u16(uint16x4_t __p0) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vrev32_s8(int8x8_t __p0) { int8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); return __ret; } #else __ai int8x8_t vrev32_s8(int8x8_t __p0) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vrev32_s16(int16x4_t __p0) { int16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); return __ret; } #else __ai int16x4_t vrev32_s16(int16x4_t __p0) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vrev64_p8(poly8x8_t __p0) { poly8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #else __ai poly8x8_t vrev64_p8(poly8x8_t __p0) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly16x4_t vrev64_p16(poly16x4_t __p0) { poly16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); return __ret; } #else __ai poly16x4_t vrev64_p16(poly16x4_t __p0) { poly16x4_t __ret; poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x16_t vrev64q_p8(poly8x16_t __p0) { poly8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); return __ret; } #else __ai poly8x16_t vrev64q_p8(poly8x16_t __p0) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly16x8_t vrev64q_p16(poly16x8_t __p0) { poly16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); return __ret; } #else __ai poly16x8_t vrev64q_p16(poly16x8_t __p0) { poly16x8_t __ret; poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vrev64q_u8(uint8x16_t __p0) { uint8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); return __ret; } #else __ai uint8x16_t vrev64q_u8(uint8x16_t __p0) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vrev64q_u32(uint32x4_t __p0) { uint32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); return __ret; } #else __ai uint32x4_t vrev64q_u32(uint32x4_t __p0) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vrev64q_u16(uint16x8_t __p0) { uint16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); return __ret; } #else __ai uint16x8_t vrev64q_u16(uint16x8_t __p0) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vrev64q_s8(int8x16_t __p0) { int8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); return __ret; } #else __ai int8x16_t vrev64q_s8(int8x16_t __p0) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vrev64q_f32(float32x4_t __p0) { float32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); return __ret; } #else __ai float32x4_t vrev64q_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vrev64q_s32(int32x4_t __p0) { int32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); return __ret; } #else __ai int32x4_t vrev64q_s32(int32x4_t __p0) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vrev64q_s16(int16x8_t __p0) { int16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); return __ret; } #else __ai int16x8_t vrev64q_s16(int16x8_t __p0) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vrev64_u8(uint8x8_t __p0) { uint8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #else __ai uint8x8_t vrev64_u8(uint8x8_t __p0) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vrev64_u32(uint32x2_t __p0) { uint32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0); return __ret; } #else __ai uint32x2_t vrev64_u32(uint32x2_t __p0) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vrev64_u16(uint16x4_t __p0) { uint16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); return __ret; } #else __ai uint16x4_t vrev64_u16(uint16x4_t __p0) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vrev64_s8(int8x8_t __p0) { int8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #else __ai int8x8_t vrev64_s8(int8x8_t __p0) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vrev64_f32(float32x2_t __p0) { float32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0); return __ret; } #else __ai float32x2_t vrev64_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vrev64_s32(int32x2_t __p0) { int32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0); return __ret; } #else __ai int32x2_t vrev64_s32(int32x2_t __p0) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vrev64_s16(int16x4_t __p0) { int16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); return __ret; } #else __ai int16x4_t vrev64_s16(int16x4_t __p0) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else __ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else __ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else __ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else __ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else __ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else __ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else __ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else __ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else __ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else __ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else __ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else __ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else __ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else __ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else __ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else __ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else __ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else __ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); return __ret; } #else __ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else __ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else __ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else __ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vrshl_u64(uint64x1_t __p0, int64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else __ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else __ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else __ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai int64x1_t vrshl_s64(int64x1_t __p0, int64x1_t __p1) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else __ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vrshrq_n_u8(__p0, __p1) __extension__ ({ \ uint8x16_t __ret; \ uint8x16_t __s0 = __p0; \ __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 48); \ __ret; \ }) #else #define vrshrq_n_u8(__p0, __p1) __extension__ ({ \ uint8x16_t __ret; \ uint8x16_t __s0 = __p0; \ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 48); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vrshrq_n_u32(__p0, __p1) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 50); \ __ret; \ }) #else #define vrshrq_n_u32(__p0, __p1) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 50); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vrshrq_n_u64(__p0, __p1) __extension__ ({ \ uint64x2_t __ret; \ uint64x2_t __s0 = __p0; \ __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 51); \ __ret; \ }) #else #define vrshrq_n_u64(__p0, __p1) __extension__ ({ \ uint64x2_t __ret; \ uint64x2_t __s0 = __p0; \ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 51); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vrshrq_n_u16(__p0, __p1) __extension__ ({ \ uint16x8_t __ret; \ uint16x8_t __s0 = __p0; \ __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 49); \ __ret; \ }) #else #define vrshrq_n_u16(__p0, __p1) __extension__ ({ \ uint16x8_t __ret; \ uint16x8_t __s0 = __p0; \ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 49); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vrshrq_n_s8(__p0, __p1) __extension__ ({ \ int8x16_t __ret; \ int8x16_t __s0 = __p0; \ __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 32); \ __ret; \ }) #else #define vrshrq_n_s8(__p0, __p1) __extension__ ({ \ int8x16_t __ret; \ int8x16_t __s0 = __p0; \ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 32); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vrshrq_n_s32(__p0, __p1) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 34); \ __ret; \ }) #else #define vrshrq_n_s32(__p0, __p1) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 34); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vrshrq_n_s64(__p0, __p1) __extension__ ({ \ int64x2_t __ret; \ int64x2_t __s0 = __p0; \ __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 35); \ __ret; \ }) #else #define vrshrq_n_s64(__p0, __p1) __extension__ ({ \ int64x2_t __ret; \ int64x2_t __s0 = __p0; \ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 35); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vrshrq_n_s16(__p0, __p1) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 33); \ __ret; \ }) #else #define vrshrq_n_s16(__p0, __p1) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 33); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vrshr_n_u8(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint8x8_t __s0 = __p0; \ __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 16); \ __ret; \ }) #else #define vrshr_n_u8(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint8x8_t __s0 = __p0; \ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 16); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vrshr_n_u32(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint32x2_t __s0 = __p0; \ __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 18); \ __ret; \ }) #else #define vrshr_n_u32(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint32x2_t __s0 = __p0; \ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 18); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #define vrshr_n_u64(__p0, __p1) __extension__ ({ \ uint64x1_t __ret; \ uint64x1_t __s0 = __p0; \ __ret = (uint64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 19); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vrshr_n_u16(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ uint16x4_t __s0 = __p0; \ __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 17); \ __ret; \ }) #else #define vrshr_n_u16(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ uint16x4_t __s0 = __p0; \ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 17); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vrshr_n_s8(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int8x8_t __s0 = __p0; \ __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 0); \ __ret; \ }) #else #define vrshr_n_s8(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int8x8_t __s0 = __p0; \ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 0); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vrshr_n_s32(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 2); \ __ret; \ }) #else #define vrshr_n_s32(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #define vrshr_n_s64(__p0, __p1) __extension__ ({ \ int64x1_t __ret; \ int64x1_t __s0 = __p0; \ __ret = (int64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 3); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vrshr_n_s16(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 1); \ __ret; \ }) #else #define vrshr_n_s16(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 1); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vrshrn_n_u32(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ uint32x4_t __s0 = __p0; \ __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \ __ret; \ }) #else #define vrshrn_n_u32(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 17); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vrshrn_n_u32(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ uint32x4_t __s0 = __p0; \ __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vrshrn_n_u64(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint64x2_t __s0 = __p0; \ __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \ __ret; \ }) #else #define vrshrn_n_u64(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint64x2_t __s0 = __p0; \ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 18); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_vrshrn_n_u64(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint64x2_t __s0 = __p0; \ __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vrshrn_n_u16(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint16x8_t __s0 = __p0; \ __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \ __ret; \ }) #else #define vrshrn_n_u16(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint16x8_t __s0 = __p0; \ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 16); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vrshrn_n_u16(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint16x8_t __s0 = __p0; \ __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vrshrn_n_s32(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int32x4_t __s0 = __p0; \ __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \ __ret; \ }) #else #define vrshrn_n_s32(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 1); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vrshrn_n_s32(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int32x4_t __s0 = __p0; \ __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vrshrn_n_s64(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int64x2_t __s0 = __p0; \ __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \ __ret; \ }) #else #define vrshrn_n_s64(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int64x2_t __s0 = __p0; \ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_vrshrn_n_s64(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int64x2_t __s0 = __p0; \ __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vrshrn_n_s16(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int16x8_t __s0 = __p0; \ __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \ __ret; \ }) #else #define vrshrn_n_s16(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 0); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vrshrn_n_s16(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int16x8_t __s0 = __p0; \ __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 50); return __ret; } #else __ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vrsqrteq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 41); return __ret; } #else __ai float32x4_t vrsqrteq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 18); return __ret; } #else __ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vrsqrte_f32(float32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 9); return __ret; } #else __ai float32x2_t vrsqrte_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else __ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else __ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x16_t __ret; \ uint8x16_t __s0 = __p0; \ uint8x16_t __s1 = __p1; \ __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ __ret; \ }) #else #define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x16_t __ret; \ uint8x16_t __s0 = __p0; \ uint8x16_t __s1 = __p1; \ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __s1 = __p1; \ __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ __ret; \ }) #else #define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __s1 = __p1; \ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2_t __ret; \ uint64x2_t __s0 = __p0; \ uint64x2_t __s1 = __p1; \ __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ __ret; \ }) #else #define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2_t __ret; \ uint64x2_t __s0 = __p0; \ uint64x2_t __s1 = __p1; \ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x8_t __ret; \ uint16x8_t __s0 = __p0; \ uint16x8_t __s1 = __p1; \ __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ __ret; \ }) #else #define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x8_t __ret; \ uint16x8_t __s0 = __p0; \ uint16x8_t __s1 = __p1; \ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \ int8x16_t __ret; \ int8x16_t __s0 = __p0; \ int8x16_t __s1 = __p1; \ __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ __ret; \ }) #else #define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \ int8x16_t __ret; \ int8x16_t __s0 = __p0; \ int8x16_t __s1 = __p1; \ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __s1 = __p1; \ __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ __ret; \ }) #else #define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __s1 = __p1; \ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \ int64x2_t __ret; \ int64x2_t __s0 = __p0; \ int64x2_t __s1 = __p1; \ __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ __ret; \ }) #else #define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \ int64x2_t __ret; \ int64x2_t __s0 = __p0; \ int64x2_t __s1 = __p1; \ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __s1 = __p1; \ __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ __ret; \ }) #else #define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __s1 = __p1; \ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x8_t __ret; \ uint8x8_t __s0 = __p0; \ uint8x8_t __s1 = __p1; \ __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ __ret; \ }) #else #define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x8_t __ret; \ uint8x8_t __s0 = __p0; \ uint8x8_t __s1 = __p1; \ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x2_t __ret; \ uint32x2_t __s0 = __p0; \ uint32x2_t __s1 = __p1; \ __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ __ret; \ }) #else #define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x2_t __ret; \ uint32x2_t __s0 = __p0; \ uint32x2_t __s1 = __p1; \ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #define vrsra_n_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x1_t __ret; \ uint64x1_t __s0 = __p0; \ uint64x1_t __s1 = __p1; \ __ret = (uint64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x4_t __ret; \ uint16x4_t __s0 = __p0; \ uint16x4_t __s1 = __p1; \ __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ __ret; \ }) #else #define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x4_t __ret; \ uint16x4_t __s0 = __p0; \ uint16x4_t __s1 = __p1; \ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \ int8x8_t __ret; \ int8x8_t __s0 = __p0; \ int8x8_t __s1 = __p1; \ __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ __ret; \ }) #else #define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \ int8x8_t __ret; \ int8x8_t __s0 = __p0; \ int8x8_t __s1 = __p1; \ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ int32x2_t __s1 = __p1; \ __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ __ret; \ }) #else #define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ int32x2_t __s1 = __p1; \ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #define vrsra_n_s64(__p0, __p1, __p2) __extension__ ({ \ int64x1_t __ret; \ int64x1_t __s0 = __p0; \ int64x1_t __s1 = __p1; \ __ret = (int64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ int16x4_t __s1 = __p1; \ __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ __ret; \ }) #else #define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ int16x4_t __s1 = __p1; \ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); return __ret; } #else __ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { uint16x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai uint16x4_t __noswap_vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); return __ret; } #else __ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { uint32x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai uint32x2_t __noswap_vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); return __ret; } #else __ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { uint8x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai uint8x8_t __noswap_vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); return __ret; } #else __ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) { int16x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int16x4_t __noswap_vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); return __ret; } #else __ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) { int32x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai int32x2_t __noswap_vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); return __ret; } #else __ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) { int8x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai int8x8_t __noswap_vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x8_t __ret; \ poly8_t __s0 = __p0; \ poly8x8_t __s1 = __p1; \ __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__s1, __p2); \ __ret; \ }) #else #define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x8_t __ret; \ poly8_t __s0 = __p0; \ poly8x8_t __s1 = __p1; \ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x8_t __ret; \ poly8_t __s0 = __p0; \ poly8x8_t __s1 = __p1; \ __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__s1, __p2); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x4_t __ret; \ poly16_t __s0 = __p0; \ poly16x4_t __s1 = __p1; \ __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__s1, __p2); \ __ret; \ }) #else #define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x4_t __ret; \ poly16_t __s0 = __p0; \ poly16x4_t __s1 = __p1; \ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x4_t __ret; \ poly16_t __s0 = __p0; \ poly16x4_t __s1 = __p1; \ __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__s1, __p2); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x16_t __ret; \ poly8_t __s0 = __p0; \ poly8x16_t __s1 = __p1; \ __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__s1, __p2); \ __ret; \ }) #else #define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x16_t __ret; \ poly8_t __s0 = __p0; \ poly8x16_t __s1 = __p1; \ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x16_t __ret; \ poly8_t __s0 = __p0; \ poly8x16_t __s1 = __p1; \ __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__s1, __p2); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x8_t __ret; \ poly16_t __s0 = __p0; \ poly16x8_t __s1 = __p1; \ __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__s1, __p2); \ __ret; \ }) #else #define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x8_t __ret; \ poly16_t __s0 = __p0; \ poly16x8_t __s1 = __p1; \ poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x8_t __ret; \ poly16_t __s0 = __p0; \ poly16x8_t __s1 = __p1; \ __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__s1, __p2); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x16_t __ret; \ uint8_t __s0 = __p0; \ uint8x16_t __s1 = __p1; \ __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ __ret; \ }) #else #define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x16_t __ret; \ uint8_t __s0 = __p0; \ uint8x16_t __s1 = __p1; \ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x16_t __ret; \ uint8_t __s0 = __p0; \ uint8x16_t __s1 = __p1; \ __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x4_t __ret; \ uint32_t __s0 = __p0; \ uint32x4_t __s1 = __p1; \ __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \ __ret; \ }) #else #define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x4_t __ret; \ uint32_t __s0 = __p0; \ uint32x4_t __s1 = __p1; \ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x4_t __ret; \ uint32_t __s0 = __p0; \ uint32x4_t __s1 = __p1; \ __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2_t __ret; \ uint64_t __s0 = __p0; \ uint64x2_t __s1 = __p1; \ __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \ __ret; \ }) #else #define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2_t __ret; \ uint64_t __s0 = __p0; \ uint64x2_t __s1 = __p1; \ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2_t __ret; \ uint64_t __s0 = __p0; \ uint64x2_t __s1 = __p1; \ __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x8_t __ret; \ uint16_t __s0 = __p0; \ uint16x8_t __s1 = __p1; \ __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \ __ret; \ }) #else #define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x8_t __ret; \ uint16_t __s0 = __p0; \ uint16x8_t __s1 = __p1; \ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x8_t __ret; \ uint16_t __s0 = __p0; \ uint16x8_t __s1 = __p1; \ __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x16_t __ret; \ int8_t __s0 = __p0; \ int8x16_t __s1 = __p1; \ __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ __ret; \ }) #else #define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x16_t __ret; \ int8_t __s0 = __p0; \ int8x16_t __s1 = __p1; \ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x16_t __ret; \ int8_t __s0 = __p0; \ int8x16_t __s1 = __p1; \ __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x4_t __ret; \ float32_t __s0 = __p0; \ float32x4_t __s1 = __p1; \ __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__s1, __p2); \ __ret; \ }) #else #define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x4_t __ret; \ float32_t __s0 = __p0; \ float32x4_t __s1 = __p1; \ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x4_t __ret; \ float32_t __s0 = __p0; \ float32x4_t __s1 = __p1; \ __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__s1, __p2); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4_t __ret; \ int32_t __s0 = __p0; \ int32x4_t __s1 = __p1; \ __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \ __ret; \ }) #else #define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4_t __ret; \ int32_t __s0 = __p0; \ int32x4_t __s1 = __p1; \ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4_t __ret; \ int32_t __s0 = __p0; \ int32x4_t __s1 = __p1; \ __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \ int64x2_t __ret; \ int64_t __s0 = __p0; \ int64x2_t __s1 = __p1; \ __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \ __ret; \ }) #else #define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \ int64x2_t __ret; \ int64_t __s0 = __p0; \ int64x2_t __s1 = __p1; \ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \ int64x2_t __ret; \ int64_t __s0 = __p0; \ int64x2_t __s1 = __p1; \ __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8_t __ret; \ int16_t __s0 = __p0; \ int16x8_t __s1 = __p1; \ __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \ __ret; \ }) #else #define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8_t __ret; \ int16_t __s0 = __p0; \ int16x8_t __s1 = __p1; \ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8_t __ret; \ int16_t __s0 = __p0; \ int16x8_t __s1 = __p1; \ __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x8_t __ret; \ uint8_t __s0 = __p0; \ uint8x8_t __s1 = __p1; \ __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ __ret; \ }) #else #define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x8_t __ret; \ uint8_t __s0 = __p0; \ uint8x8_t __s1 = __p1; \ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x8_t __ret; \ uint8_t __s0 = __p0; \ uint8x8_t __s1 = __p1; \ __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x2_t __ret; \ uint32_t __s0 = __p0; \ uint32x2_t __s1 = __p1; \ __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \ __ret; \ }) #else #define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x2_t __ret; \ uint32_t __s0 = __p0; \ uint32x2_t __s1 = __p1; \ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x2_t __ret; \ uint32_t __s0 = __p0; \ uint32x2_t __s1 = __p1; \ __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \ __ret; \ }) #endif #define vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x1_t __ret; \ uint64_t __s0 = __p0; \ uint64x1_t __s1 = __p1; \ __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int64x1_t)__s1, __p2); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x4_t __ret; \ uint16_t __s0 = __p0; \ uint16x4_t __s1 = __p1; \ __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \ __ret; \ }) #else #define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x4_t __ret; \ uint16_t __s0 = __p0; \ uint16x4_t __s1 = __p1; \ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x4_t __ret; \ uint16_t __s0 = __p0; \ uint16x4_t __s1 = __p1; \ __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x8_t __ret; \ int8_t __s0 = __p0; \ int8x8_t __s1 = __p1; \ __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ __ret; \ }) #else #define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x8_t __ret; \ int8_t __s0 = __p0; \ int8x8_t __s1 = __p1; \ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x8_t __ret; \ int8_t __s0 = __p0; \ int8x8_t __s1 = __p1; \ __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x2_t __ret; \ float32_t __s0 = __p0; \ float32x2_t __s1 = __p1; \ __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__s1, __p2); \ __ret; \ }) #else #define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x2_t __ret; \ float32_t __s0 = __p0; \ float32x2_t __s1 = __p1; \ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x2_t __ret; \ float32_t __s0 = __p0; \ float32x2_t __s1 = __p1; \ __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__s1, __p2); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2_t __ret; \ int32_t __s0 = __p0; \ int32x2_t __s1 = __p1; \ __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \ __ret; \ }) #else #define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2_t __ret; \ int32_t __s0 = __p0; \ int32x2_t __s1 = __p1; \ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2_t __ret; \ int32_t __s0 = __p0; \ int32x2_t __s1 = __p1; \ __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \ __ret; \ }) #endif #define vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \ int64x1_t __ret; \ int64_t __s0 = __p0; \ int64x1_t __s1 = __p1; \ __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int64x1_t)__s1, __p2); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4_t __ret; \ int16_t __s0 = __p0; \ int16x4_t __s1 = __p1; \ __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \ __ret; \ }) #else #define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4_t __ret; \ int16_t __s0 = __p0; \ int16x4_t __s1 = __p1; \ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4_t __ret; \ int16_t __s0 = __p0; \ int16x4_t __s1 = __p1; \ __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else __ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else __ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else __ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else __ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else __ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else __ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); return __ret; } #else __ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else __ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else __ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else __ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vshl_u64(uint64x1_t __p0, int64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else __ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else __ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else __ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai int64x1_t vshl_s64(int64x1_t __p0, int64x1_t __p1) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else __ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vshlq_n_u8(__p0, __p1) __extension__ ({ \ uint8x16_t __ret; \ uint8x16_t __s0 = __p0; \ __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 48); \ __ret; \ }) #else #define vshlq_n_u8(__p0, __p1) __extension__ ({ \ uint8x16_t __ret; \ uint8x16_t __s0 = __p0; \ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 48); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshlq_n_u32(__p0, __p1) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 50); \ __ret; \ }) #else #define vshlq_n_u32(__p0, __p1) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 50); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshlq_n_u64(__p0, __p1) __extension__ ({ \ uint64x2_t __ret; \ uint64x2_t __s0 = __p0; \ __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 51); \ __ret; \ }) #else #define vshlq_n_u64(__p0, __p1) __extension__ ({ \ uint64x2_t __ret; \ uint64x2_t __s0 = __p0; \ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 51); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshlq_n_u16(__p0, __p1) __extension__ ({ \ uint16x8_t __ret; \ uint16x8_t __s0 = __p0; \ __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 49); \ __ret; \ }) #else #define vshlq_n_u16(__p0, __p1) __extension__ ({ \ uint16x8_t __ret; \ uint16x8_t __s0 = __p0; \ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 49); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshlq_n_s8(__p0, __p1) __extension__ ({ \ int8x16_t __ret; \ int8x16_t __s0 = __p0; \ __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 32); \ __ret; \ }) #else #define vshlq_n_s8(__p0, __p1) __extension__ ({ \ int8x16_t __ret; \ int8x16_t __s0 = __p0; \ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 32); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshlq_n_s32(__p0, __p1) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 34); \ __ret; \ }) #else #define vshlq_n_s32(__p0, __p1) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 34); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshlq_n_s64(__p0, __p1) __extension__ ({ \ int64x2_t __ret; \ int64x2_t __s0 = __p0; \ __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 35); \ __ret; \ }) #else #define vshlq_n_s64(__p0, __p1) __extension__ ({ \ int64x2_t __ret; \ int64x2_t __s0 = __p0; \ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 35); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshlq_n_s16(__p0, __p1) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 33); \ __ret; \ }) #else #define vshlq_n_s16(__p0, __p1) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 33); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshl_n_u8(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint8x8_t __s0 = __p0; \ __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 16); \ __ret; \ }) #else #define vshl_n_u8(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint8x8_t __s0 = __p0; \ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 16); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshl_n_u32(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint32x2_t __s0 = __p0; \ __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 18); \ __ret; \ }) #else #define vshl_n_u32(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint32x2_t __s0 = __p0; \ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 18); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #define vshl_n_u64(__p0, __p1) __extension__ ({ \ uint64x1_t __ret; \ uint64x1_t __s0 = __p0; \ __ret = (uint64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 19); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vshl_n_u16(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ uint16x4_t __s0 = __p0; \ __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 17); \ __ret; \ }) #else #define vshl_n_u16(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ uint16x4_t __s0 = __p0; \ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 17); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshl_n_s8(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int8x8_t __s0 = __p0; \ __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 0); \ __ret; \ }) #else #define vshl_n_s8(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int8x8_t __s0 = __p0; \ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 0); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshl_n_s32(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 2); \ __ret; \ }) #else #define vshl_n_s32(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #define vshl_n_s64(__p0, __p1) __extension__ ({ \ int64x1_t __ret; \ int64x1_t __s0 = __p0; \ __ret = (int64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 3); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vshl_n_s16(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 1); \ __ret; \ }) #else #define vshl_n_s16(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 1); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshll_n_u8(__p0, __p1) __extension__ ({ \ uint16x8_t __ret; \ uint8x8_t __s0 = __p0; \ __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \ __ret; \ }) #else #define vshll_n_u8(__p0, __p1) __extension__ ({ \ uint16x8_t __ret; \ uint8x8_t __s0 = __p0; \ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 49); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vshll_n_u8(__p0, __p1) __extension__ ({ \ uint16x8_t __ret; \ uint8x8_t __s0 = __p0; \ __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshll_n_u32(__p0, __p1) __extension__ ({ \ uint64x2_t __ret; \ uint32x2_t __s0 = __p0; \ __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \ __ret; \ }) #else #define vshll_n_u32(__p0, __p1) __extension__ ({ \ uint64x2_t __ret; \ uint32x2_t __s0 = __p0; \ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 51); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_vshll_n_u32(__p0, __p1) __extension__ ({ \ uint64x2_t __ret; \ uint32x2_t __s0 = __p0; \ __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshll_n_u16(__p0, __p1) __extension__ ({ \ uint32x4_t __ret; \ uint16x4_t __s0 = __p0; \ __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \ __ret; \ }) #else #define vshll_n_u16(__p0, __p1) __extension__ ({ \ uint32x4_t __ret; \ uint16x4_t __s0 = __p0; \ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 50); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vshll_n_u16(__p0, __p1) __extension__ ({ \ uint32x4_t __ret; \ uint16x4_t __s0 = __p0; \ __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshll_n_s8(__p0, __p1) __extension__ ({ \ int16x8_t __ret; \ int8x8_t __s0 = __p0; \ __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \ __ret; \ }) #else #define vshll_n_s8(__p0, __p1) __extension__ ({ \ int16x8_t __ret; \ int8x8_t __s0 = __p0; \ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 33); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vshll_n_s8(__p0, __p1) __extension__ ({ \ int16x8_t __ret; \ int8x8_t __s0 = __p0; \ __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshll_n_s32(__p0, __p1) __extension__ ({ \ int64x2_t __ret; \ int32x2_t __s0 = __p0; \ __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \ __ret; \ }) #else #define vshll_n_s32(__p0, __p1) __extension__ ({ \ int64x2_t __ret; \ int32x2_t __s0 = __p0; \ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 35); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_vshll_n_s32(__p0, __p1) __extension__ ({ \ int64x2_t __ret; \ int32x2_t __s0 = __p0; \ __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshll_n_s16(__p0, __p1) __extension__ ({ \ int32x4_t __ret; \ int16x4_t __s0 = __p0; \ __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \ __ret; \ }) #else #define vshll_n_s16(__p0, __p1) __extension__ ({ \ int32x4_t __ret; \ int16x4_t __s0 = __p0; \ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 34); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vshll_n_s16(__p0, __p1) __extension__ ({ \ int32x4_t __ret; \ int16x4_t __s0 = __p0; \ __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshrq_n_u8(__p0, __p1) __extension__ ({ \ uint8x16_t __ret; \ uint8x16_t __s0 = __p0; \ __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 48); \ __ret; \ }) #else #define vshrq_n_u8(__p0, __p1) __extension__ ({ \ uint8x16_t __ret; \ uint8x16_t __s0 = __p0; \ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 48); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshrq_n_u32(__p0, __p1) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 50); \ __ret; \ }) #else #define vshrq_n_u32(__p0, __p1) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 50); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshrq_n_u64(__p0, __p1) __extension__ ({ \ uint64x2_t __ret; \ uint64x2_t __s0 = __p0; \ __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 51); \ __ret; \ }) #else #define vshrq_n_u64(__p0, __p1) __extension__ ({ \ uint64x2_t __ret; \ uint64x2_t __s0 = __p0; \ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 51); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshrq_n_u16(__p0, __p1) __extension__ ({ \ uint16x8_t __ret; \ uint16x8_t __s0 = __p0; \ __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 49); \ __ret; \ }) #else #define vshrq_n_u16(__p0, __p1) __extension__ ({ \ uint16x8_t __ret; \ uint16x8_t __s0 = __p0; \ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 49); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshrq_n_s8(__p0, __p1) __extension__ ({ \ int8x16_t __ret; \ int8x16_t __s0 = __p0; \ __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 32); \ __ret; \ }) #else #define vshrq_n_s8(__p0, __p1) __extension__ ({ \ int8x16_t __ret; \ int8x16_t __s0 = __p0; \ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 32); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshrq_n_s32(__p0, __p1) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 34); \ __ret; \ }) #else #define vshrq_n_s32(__p0, __p1) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 34); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshrq_n_s64(__p0, __p1) __extension__ ({ \ int64x2_t __ret; \ int64x2_t __s0 = __p0; \ __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 35); \ __ret; \ }) #else #define vshrq_n_s64(__p0, __p1) __extension__ ({ \ int64x2_t __ret; \ int64x2_t __s0 = __p0; \ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 35); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshrq_n_s16(__p0, __p1) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 33); \ __ret; \ }) #else #define vshrq_n_s16(__p0, __p1) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 33); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshr_n_u8(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint8x8_t __s0 = __p0; \ __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 16); \ __ret; \ }) #else #define vshr_n_u8(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint8x8_t __s0 = __p0; \ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 16); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshr_n_u32(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint32x2_t __s0 = __p0; \ __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 18); \ __ret; \ }) #else #define vshr_n_u32(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint32x2_t __s0 = __p0; \ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 18); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #define vshr_n_u64(__p0, __p1) __extension__ ({ \ uint64x1_t __ret; \ uint64x1_t __s0 = __p0; \ __ret = (uint64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 19); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vshr_n_u16(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ uint16x4_t __s0 = __p0; \ __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 17); \ __ret; \ }) #else #define vshr_n_u16(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ uint16x4_t __s0 = __p0; \ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 17); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshr_n_s8(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int8x8_t __s0 = __p0; \ __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 0); \ __ret; \ }) #else #define vshr_n_s8(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int8x8_t __s0 = __p0; \ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 0); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshr_n_s32(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 2); \ __ret; \ }) #else #define vshr_n_s32(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #define vshr_n_s64(__p0, __p1) __extension__ ({ \ int64x1_t __ret; \ int64x1_t __s0 = __p0; \ __ret = (int64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 3); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vshr_n_s16(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 1); \ __ret; \ }) #else #define vshr_n_s16(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 1); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshrn_n_u32(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ uint32x4_t __s0 = __p0; \ __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \ __ret; \ }) #else #define vshrn_n_u32(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 17); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vshrn_n_u32(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ uint32x4_t __s0 = __p0; \ __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshrn_n_u64(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint64x2_t __s0 = __p0; \ __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \ __ret; \ }) #else #define vshrn_n_u64(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint64x2_t __s0 = __p0; \ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 18); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_vshrn_n_u64(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint64x2_t __s0 = __p0; \ __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshrn_n_u16(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint16x8_t __s0 = __p0; \ __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \ __ret; \ }) #else #define vshrn_n_u16(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint16x8_t __s0 = __p0; \ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 16); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vshrn_n_u16(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint16x8_t __s0 = __p0; \ __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshrn_n_s32(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int32x4_t __s0 = __p0; \ __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \ __ret; \ }) #else #define vshrn_n_s32(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 1); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vshrn_n_s32(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int32x4_t __s0 = __p0; \ __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshrn_n_s64(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int64x2_t __s0 = __p0; \ __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \ __ret; \ }) #else #define vshrn_n_s64(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int64x2_t __s0 = __p0; \ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_vshrn_n_s64(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int64x2_t __s0 = __p0; \ __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshrn_n_s16(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int16x8_t __s0 = __p0; \ __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \ __ret; \ }) #else #define vshrn_n_s16(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 0); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vshrn_n_s16(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int16x8_t __s0 = __p0; \ __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x8_t __ret; \ poly8x8_t __s0 = __p0; \ poly8x8_t __s1 = __p1; \ __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \ __ret; \ }) #else #define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x8_t __ret; \ poly8x8_t __s0 = __p0; \ poly8x8_t __s1 = __p1; \ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x4_t __ret; \ poly16x4_t __s0 = __p0; \ poly16x4_t __s1 = __p1; \ __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \ __ret; \ }) #else #define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x4_t __ret; \ poly16x4_t __s0 = __p0; \ poly16x4_t __s1 = __p1; \ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x16_t __ret; \ poly8x16_t __s0 = __p0; \ poly8x16_t __s1 = __p1; \ __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \ __ret; \ }) #else #define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x16_t __ret; \ poly8x16_t __s0 = __p0; \ poly8x16_t __s1 = __p1; \ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x8_t __ret; \ poly16x8_t __s0 = __p0; \ poly16x8_t __s1 = __p1; \ __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \ __ret; \ }) #else #define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x8_t __ret; \ poly16x8_t __s0 = __p0; \ poly16x8_t __s1 = __p1; \ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x16_t __ret; \ uint8x16_t __s0 = __p0; \ uint8x16_t __s1 = __p1; \ __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ __ret; \ }) #else #define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x16_t __ret; \ uint8x16_t __s0 = __p0; \ uint8x16_t __s1 = __p1; \ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __s1 = __p1; \ __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ __ret; \ }) #else #define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __s1 = __p1; \ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2_t __ret; \ uint64x2_t __s0 = __p0; \ uint64x2_t __s1 = __p1; \ __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ __ret; \ }) #else #define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2_t __ret; \ uint64x2_t __s0 = __p0; \ uint64x2_t __s1 = __p1; \ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x8_t __ret; \ uint16x8_t __s0 = __p0; \ uint16x8_t __s1 = __p1; \ __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ __ret; \ }) #else #define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x8_t __ret; \ uint16x8_t __s0 = __p0; \ uint16x8_t __s1 = __p1; \ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \ int8x16_t __ret; \ int8x16_t __s0 = __p0; \ int8x16_t __s1 = __p1; \ __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ __ret; \ }) #else #define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \ int8x16_t __ret; \ int8x16_t __s0 = __p0; \ int8x16_t __s1 = __p1; \ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __s1 = __p1; \ __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ __ret; \ }) #else #define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __s1 = __p1; \ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \ int64x2_t __ret; \ int64x2_t __s0 = __p0; \ int64x2_t __s1 = __p1; \ __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ __ret; \ }) #else #define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \ int64x2_t __ret; \ int64x2_t __s0 = __p0; \ int64x2_t __s1 = __p1; \ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __s1 = __p1; \ __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ __ret; \ }) #else #define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __s1 = __p1; \ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x8_t __ret; \ uint8x8_t __s0 = __p0; \ uint8x8_t __s1 = __p1; \ __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ __ret; \ }) #else #define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x8_t __ret; \ uint8x8_t __s0 = __p0; \ uint8x8_t __s1 = __p1; \ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x2_t __ret; \ uint32x2_t __s0 = __p0; \ uint32x2_t __s1 = __p1; \ __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ __ret; \ }) #else #define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x2_t __ret; \ uint32x2_t __s0 = __p0; \ uint32x2_t __s1 = __p1; \ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #define vsli_n_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x1_t __ret; \ uint64x1_t __s0 = __p0; \ uint64x1_t __s1 = __p1; \ __ret = (uint64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x4_t __ret; \ uint16x4_t __s0 = __p0; \ uint16x4_t __s1 = __p1; \ __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ __ret; \ }) #else #define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x4_t __ret; \ uint16x4_t __s0 = __p0; \ uint16x4_t __s1 = __p1; \ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \ int8x8_t __ret; \ int8x8_t __s0 = __p0; \ int8x8_t __s1 = __p1; \ __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ __ret; \ }) #else #define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \ int8x8_t __ret; \ int8x8_t __s0 = __p0; \ int8x8_t __s1 = __p1; \ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ int32x2_t __s1 = __p1; \ __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ __ret; \ }) #else #define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ int32x2_t __s1 = __p1; \ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #define vsli_n_s64(__p0, __p1, __p2) __extension__ ({ \ int64x1_t __ret; \ int64x1_t __s0 = __p0; \ int64x1_t __s1 = __p1; \ __ret = (int64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ int16x4_t __s1 = __p1; \ __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ __ret; \ }) #else #define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ int16x4_t __s1 = __p1; \ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x16_t __ret; \ uint8x16_t __s0 = __p0; \ uint8x16_t __s1 = __p1; \ __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ __ret; \ }) #else #define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x16_t __ret; \ uint8x16_t __s0 = __p0; \ uint8x16_t __s1 = __p1; \ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __s1 = __p1; \ __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ __ret; \ }) #else #define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __s1 = __p1; \ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2_t __ret; \ uint64x2_t __s0 = __p0; \ uint64x2_t __s1 = __p1; \ __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ __ret; \ }) #else #define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2_t __ret; \ uint64x2_t __s0 = __p0; \ uint64x2_t __s1 = __p1; \ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x8_t __ret; \ uint16x8_t __s0 = __p0; \ uint16x8_t __s1 = __p1; \ __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ __ret; \ }) #else #define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x8_t __ret; \ uint16x8_t __s0 = __p0; \ uint16x8_t __s1 = __p1; \ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \ int8x16_t __ret; \ int8x16_t __s0 = __p0; \ int8x16_t __s1 = __p1; \ __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ __ret; \ }) #else #define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \ int8x16_t __ret; \ int8x16_t __s0 = __p0; \ int8x16_t __s1 = __p1; \ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __s1 = __p1; \ __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ __ret; \ }) #else #define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __s1 = __p1; \ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \ int64x2_t __ret; \ int64x2_t __s0 = __p0; \ int64x2_t __s1 = __p1; \ __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ __ret; \ }) #else #define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \ int64x2_t __ret; \ int64x2_t __s0 = __p0; \ int64x2_t __s1 = __p1; \ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __s1 = __p1; \ __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ __ret; \ }) #else #define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __s1 = __p1; \ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x8_t __ret; \ uint8x8_t __s0 = __p0; \ uint8x8_t __s1 = __p1; \ __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ __ret; \ }) #else #define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x8_t __ret; \ uint8x8_t __s0 = __p0; \ uint8x8_t __s1 = __p1; \ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x2_t __ret; \ uint32x2_t __s0 = __p0; \ uint32x2_t __s1 = __p1; \ __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ __ret; \ }) #else #define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x2_t __ret; \ uint32x2_t __s0 = __p0; \ uint32x2_t __s1 = __p1; \ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #define vsra_n_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x1_t __ret; \ uint64x1_t __s0 = __p0; \ uint64x1_t __s1 = __p1; \ __ret = (uint64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x4_t __ret; \ uint16x4_t __s0 = __p0; \ uint16x4_t __s1 = __p1; \ __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ __ret; \ }) #else #define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x4_t __ret; \ uint16x4_t __s0 = __p0; \ uint16x4_t __s1 = __p1; \ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \ int8x8_t __ret; \ int8x8_t __s0 = __p0; \ int8x8_t __s1 = __p1; \ __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ __ret; \ }) #else #define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \ int8x8_t __ret; \ int8x8_t __s0 = __p0; \ int8x8_t __s1 = __p1; \ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ int32x2_t __s1 = __p1; \ __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ __ret; \ }) #else #define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ int32x2_t __s1 = __p1; \ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #define vsra_n_s64(__p0, __p1, __p2) __extension__ ({ \ int64x1_t __ret; \ int64x1_t __s0 = __p0; \ int64x1_t __s1 = __p1; \ __ret = (int64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ int16x4_t __s1 = __p1; \ __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ __ret; \ }) #else #define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ int16x4_t __s1 = __p1; \ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x8_t __ret; \ poly8x8_t __s0 = __p0; \ poly8x8_t __s1 = __p1; \ __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \ __ret; \ }) #else #define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x8_t __ret; \ poly8x8_t __s0 = __p0; \ poly8x8_t __s1 = __p1; \ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x4_t __ret; \ poly16x4_t __s0 = __p0; \ poly16x4_t __s1 = __p1; \ __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \ __ret; \ }) #else #define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x4_t __ret; \ poly16x4_t __s0 = __p0; \ poly16x4_t __s1 = __p1; \ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x16_t __ret; \ poly8x16_t __s0 = __p0; \ poly8x16_t __s1 = __p1; \ __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \ __ret; \ }) #else #define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x16_t __ret; \ poly8x16_t __s0 = __p0; \ poly8x16_t __s1 = __p1; \ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x8_t __ret; \ poly16x8_t __s0 = __p0; \ poly16x8_t __s1 = __p1; \ __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \ __ret; \ }) #else #define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x8_t __ret; \ poly16x8_t __s0 = __p0; \ poly16x8_t __s1 = __p1; \ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x16_t __ret; \ uint8x16_t __s0 = __p0; \ uint8x16_t __s1 = __p1; \ __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ __ret; \ }) #else #define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x16_t __ret; \ uint8x16_t __s0 = __p0; \ uint8x16_t __s1 = __p1; \ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __s1 = __p1; \ __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ __ret; \ }) #else #define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __s1 = __p1; \ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2_t __ret; \ uint64x2_t __s0 = __p0; \ uint64x2_t __s1 = __p1; \ __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ __ret; \ }) #else #define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2_t __ret; \ uint64x2_t __s0 = __p0; \ uint64x2_t __s1 = __p1; \ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x8_t __ret; \ uint16x8_t __s0 = __p0; \ uint16x8_t __s1 = __p1; \ __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ __ret; \ }) #else #define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x8_t __ret; \ uint16x8_t __s0 = __p0; \ uint16x8_t __s1 = __p1; \ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \ int8x16_t __ret; \ int8x16_t __s0 = __p0; \ int8x16_t __s1 = __p1; \ __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ __ret; \ }) #else #define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \ int8x16_t __ret; \ int8x16_t __s0 = __p0; \ int8x16_t __s1 = __p1; \ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __s1 = __p1; \ __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ __ret; \ }) #else #define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __s1 = __p1; \ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \ int64x2_t __ret; \ int64x2_t __s0 = __p0; \ int64x2_t __s1 = __p1; \ __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ __ret; \ }) #else #define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \ int64x2_t __ret; \ int64x2_t __s0 = __p0; \ int64x2_t __s1 = __p1; \ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __s1 = __p1; \ __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ __ret; \ }) #else #define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __s1 = __p1; \ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x8_t __ret; \ uint8x8_t __s0 = __p0; \ uint8x8_t __s1 = __p1; \ __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ __ret; \ }) #else #define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x8_t __ret; \ uint8x8_t __s0 = __p0; \ uint8x8_t __s1 = __p1; \ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x2_t __ret; \ uint32x2_t __s0 = __p0; \ uint32x2_t __s1 = __p1; \ __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ __ret; \ }) #else #define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x2_t __ret; \ uint32x2_t __s0 = __p0; \ uint32x2_t __s1 = __p1; \ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #define vsri_n_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x1_t __ret; \ uint64x1_t __s0 = __p0; \ uint64x1_t __s1 = __p1; \ __ret = (uint64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x4_t __ret; \ uint16x4_t __s0 = __p0; \ uint16x4_t __s1 = __p1; \ __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ __ret; \ }) #else #define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x4_t __ret; \ uint16x4_t __s0 = __p0; \ uint16x4_t __s1 = __p1; \ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \ int8x8_t __ret; \ int8x8_t __s0 = __p0; \ int8x8_t __s1 = __p1; \ __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ __ret; \ }) #else #define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \ int8x8_t __ret; \ int8x8_t __s0 = __p0; \ int8x8_t __s1 = __p1; \ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ int32x2_t __s1 = __p1; \ __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ __ret; \ }) #else #define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ int32x2_t __s1 = __p1; \ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #define vsri_n_s64(__p0, __p1, __p2) __extension__ ({ \ int64x1_t __ret; \ int64x1_t __s0 = __p0; \ int64x1_t __s1 = __p1; \ __ret = (int64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ int16x4_t __s1 = __p1; \ __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ __ret; \ }) #else #define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ int16x4_t __s1 = __p1; \ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_p8(__p0, __p1) __extension__ ({ \ poly8x8_t __s1 = __p1; \ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 4); \ }) #else #define vst1_p8(__p0, __p1) __extension__ ({ \ poly8x8_t __s1 = __p1; \ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 4); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_p16(__p0, __p1) __extension__ ({ \ poly16x4_t __s1 = __p1; \ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 5); \ }) #else #define vst1_p16(__p0, __p1) __extension__ ({ \ poly16x4_t __s1 = __p1; \ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 5); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_p8(__p0, __p1) __extension__ ({ \ poly8x16_t __s1 = __p1; \ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 36); \ }) #else #define vst1q_p8(__p0, __p1) __extension__ ({ \ poly8x16_t __s1 = __p1; \ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 36); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_p16(__p0, __p1) __extension__ ({ \ poly16x8_t __s1 = __p1; \ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 37); \ }) #else #define vst1q_p16(__p0, __p1) __extension__ ({ \ poly16x8_t __s1 = __p1; \ poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 37); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_u8(__p0, __p1) __extension__ ({ \ uint8x16_t __s1 = __p1; \ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 48); \ }) #else #define vst1q_u8(__p0, __p1) __extension__ ({ \ uint8x16_t __s1 = __p1; \ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 48); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_u32(__p0, __p1) __extension__ ({ \ uint32x4_t __s1 = __p1; \ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 50); \ }) #else #define vst1q_u32(__p0, __p1) __extension__ ({ \ uint32x4_t __s1 = __p1; \ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 50); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_u64(__p0, __p1) __extension__ ({ \ uint64x2_t __s1 = __p1; \ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 51); \ }) #else #define vst1q_u64(__p0, __p1) __extension__ ({ \ uint64x2_t __s1 = __p1; \ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 51); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_u16(__p0, __p1) __extension__ ({ \ uint16x8_t __s1 = __p1; \ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 49); \ }) #else #define vst1q_u16(__p0, __p1) __extension__ ({ \ uint16x8_t __s1 = __p1; \ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 49); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_s8(__p0, __p1) __extension__ ({ \ int8x16_t __s1 = __p1; \ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 32); \ }) #else #define vst1q_s8(__p0, __p1) __extension__ ({ \ int8x16_t __s1 = __p1; \ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 32); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_f32(__p0, __p1) __extension__ ({ \ float32x4_t __s1 = __p1; \ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 41); \ }) #else #define vst1q_f32(__p0, __p1) __extension__ ({ \ float32x4_t __s1 = __p1; \ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 41); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_s32(__p0, __p1) __extension__ ({ \ int32x4_t __s1 = __p1; \ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 34); \ }) #else #define vst1q_s32(__p0, __p1) __extension__ ({ \ int32x4_t __s1 = __p1; \ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 34); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_s64(__p0, __p1) __extension__ ({ \ int64x2_t __s1 = __p1; \ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 35); \ }) #else #define vst1q_s64(__p0, __p1) __extension__ ({ \ int64x2_t __s1 = __p1; \ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 35); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_s16(__p0, __p1) __extension__ ({ \ int16x8_t __s1 = __p1; \ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 33); \ }) #else #define vst1q_s16(__p0, __p1) __extension__ ({ \ int16x8_t __s1 = __p1; \ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 33); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_u8(__p0, __p1) __extension__ ({ \ uint8x8_t __s1 = __p1; \ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 16); \ }) #else #define vst1_u8(__p0, __p1) __extension__ ({ \ uint8x8_t __s1 = __p1; \ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 16); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_u32(__p0, __p1) __extension__ ({ \ uint32x2_t __s1 = __p1; \ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 18); \ }) #else #define vst1_u32(__p0, __p1) __extension__ ({ \ uint32x2_t __s1 = __p1; \ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 18); \ }) #endif #define vst1_u64(__p0, __p1) __extension__ ({ \ uint64x1_t __s1 = __p1; \ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 19); \ }) #ifdef __LITTLE_ENDIAN__ #define vst1_u16(__p0, __p1) __extension__ ({ \ uint16x4_t __s1 = __p1; \ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 17); \ }) #else #define vst1_u16(__p0, __p1) __extension__ ({ \ uint16x4_t __s1 = __p1; \ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 17); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_s8(__p0, __p1) __extension__ ({ \ int8x8_t __s1 = __p1; \ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 0); \ }) #else #define vst1_s8(__p0, __p1) __extension__ ({ \ int8x8_t __s1 = __p1; \ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 0); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_f32(__p0, __p1) __extension__ ({ \ float32x2_t __s1 = __p1; \ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 9); \ }) #else #define vst1_f32(__p0, __p1) __extension__ ({ \ float32x2_t __s1 = __p1; \ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 9); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_s32(__p0, __p1) __extension__ ({ \ int32x2_t __s1 = __p1; \ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 2); \ }) #else #define vst1_s32(__p0, __p1) __extension__ ({ \ int32x2_t __s1 = __p1; \ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 2); \ }) #endif #define vst1_s64(__p0, __p1) __extension__ ({ \ int64x1_t __s1 = __p1; \ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 3); \ }) #ifdef __LITTLE_ENDIAN__ #define vst1_s16(__p0, __p1) __extension__ ({ \ int16x4_t __s1 = __p1; \ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 1); \ }) #else #define vst1_s16(__p0, __p1) __extension__ ({ \ int16x4_t __s1 = __p1; \ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 1); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x8_t __s1 = __p1; \ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \ }) #else #define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x8_t __s1 = __p1; \ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x4_t __s1 = __p1; \ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \ }) #else #define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x4_t __s1 = __p1; \ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x16_t __s1 = __p1; \ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \ }) #else #define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x16_t __s1 = __p1; \ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x8_t __s1 = __p1; \ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \ }) #else #define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x8_t __s1 = __p1; \ poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x16_t __s1 = __p1; \ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \ }) #else #define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x16_t __s1 = __p1; \ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x4_t __s1 = __p1; \ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \ }) #else #define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x4_t __s1 = __p1; \ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2_t __s1 = __p1; \ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \ }) #else #define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2_t __s1 = __p1; \ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x8_t __s1 = __p1; \ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \ }) #else #define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x8_t __s1 = __p1; \ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x16_t __s1 = __p1; \ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \ }) #else #define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x16_t __s1 = __p1; \ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x4_t __s1 = __p1; \ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \ }) #else #define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x4_t __s1 = __p1; \ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4_t __s1 = __p1; \ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \ }) #else #define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4_t __s1 = __p1; \ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ int64x2_t __s1 = __p1; \ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \ }) #else #define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ int64x2_t __s1 = __p1; \ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8_t __s1 = __p1; \ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \ }) #else #define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8_t __s1 = __p1; \ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x8_t __s1 = __p1; \ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \ }) #else #define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x8_t __s1 = __p1; \ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x2_t __s1 = __p1; \ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \ }) #else #define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x2_t __s1 = __p1; \ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \ }) #endif #define vst1_lane_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x1_t __s1 = __p1; \ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \ }) #ifdef __LITTLE_ENDIAN__ #define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x4_t __s1 = __p1; \ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \ }) #else #define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x4_t __s1 = __p1; \ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x8_t __s1 = __p1; \ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \ }) #else #define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x8_t __s1 = __p1; \ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x2_t __s1 = __p1; \ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \ }) #else #define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x2_t __s1 = __p1; \ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2_t __s1 = __p1; \ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \ }) #else #define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2_t __s1 = __p1; \ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \ }) #endif #define vst1_lane_s64(__p0, __p1, __p2) __extension__ ({ \ int64x1_t __s1 = __p1; \ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \ }) #ifdef __LITTLE_ENDIAN__ #define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4_t __s1 = __p1; \ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \ }) #else #define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4_t __s1 = __p1; \ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_p8_x2(__p0, __p1) __extension__ ({ \ poly8x8x2_t __s1 = __p1; \ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \ }) #else #define vst1_p8_x2(__p0, __p1) __extension__ ({ \ poly8x8x2_t __s1 = __p1; \ poly8x8x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_p16_x2(__p0, __p1) __extension__ ({ \ poly16x4x2_t __s1 = __p1; \ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \ }) #else #define vst1_p16_x2(__p0, __p1) __extension__ ({ \ poly16x4x2_t __s1 = __p1; \ poly16x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_p8_x2(__p0, __p1) __extension__ ({ \ poly8x16x2_t __s1 = __p1; \ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \ }) #else #define vst1q_p8_x2(__p0, __p1) __extension__ ({ \ poly8x16x2_t __s1 = __p1; \ poly8x16x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_p16_x2(__p0, __p1) __extension__ ({ \ poly16x8x2_t __s1 = __p1; \ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \ }) #else #define vst1q_p16_x2(__p0, __p1) __extension__ ({ \ poly16x8x2_t __s1 = __p1; \ poly16x8x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_u8_x2(__p0, __p1) __extension__ ({ \ uint8x16x2_t __s1 = __p1; \ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \ }) #else #define vst1q_u8_x2(__p0, __p1) __extension__ ({ \ uint8x16x2_t __s1 = __p1; \ uint8x16x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_u32_x2(__p0, __p1) __extension__ ({ \ uint32x4x2_t __s1 = __p1; \ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \ }) #else #define vst1q_u32_x2(__p0, __p1) __extension__ ({ \ uint32x4x2_t __s1 = __p1; \ uint32x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_u64_x2(__p0, __p1) __extension__ ({ \ uint64x2x2_t __s1 = __p1; \ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \ }) #else #define vst1q_u64_x2(__p0, __p1) __extension__ ({ \ uint64x2x2_t __s1 = __p1; \ uint64x2x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_u16_x2(__p0, __p1) __extension__ ({ \ uint16x8x2_t __s1 = __p1; \ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \ }) #else #define vst1q_u16_x2(__p0, __p1) __extension__ ({ \ uint16x8x2_t __s1 = __p1; \ uint16x8x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_s8_x2(__p0, __p1) __extension__ ({ \ int8x16x2_t __s1 = __p1; \ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \ }) #else #define vst1q_s8_x2(__p0, __p1) __extension__ ({ \ int8x16x2_t __s1 = __p1; \ int8x16x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_f32_x2(__p0, __p1) __extension__ ({ \ float32x4x2_t __s1 = __p1; \ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 41); \ }) #else #define vst1q_f32_x2(__p0, __p1) __extension__ ({ \ float32x4x2_t __s1 = __p1; \ float32x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 41); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_s32_x2(__p0, __p1) __extension__ ({ \ int32x4x2_t __s1 = __p1; \ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 34); \ }) #else #define vst1q_s32_x2(__p0, __p1) __extension__ ({ \ int32x4x2_t __s1 = __p1; \ int32x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 34); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_s64_x2(__p0, __p1) __extension__ ({ \ int64x2x2_t __s1 = __p1; \ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 35); \ }) #else #define vst1q_s64_x2(__p0, __p1) __extension__ ({ \ int64x2x2_t __s1 = __p1; \ int64x2x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 35); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_s16_x2(__p0, __p1) __extension__ ({ \ int16x8x2_t __s1 = __p1; \ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 33); \ }) #else #define vst1q_s16_x2(__p0, __p1) __extension__ ({ \ int16x8x2_t __s1 = __p1; \ int16x8x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 33); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_u8_x2(__p0, __p1) __extension__ ({ \ uint8x8x2_t __s1 = __p1; \ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \ }) #else #define vst1_u8_x2(__p0, __p1) __extension__ ({ \ uint8x8x2_t __s1 = __p1; \ uint8x8x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_u32_x2(__p0, __p1) __extension__ ({ \ uint32x2x2_t __s1 = __p1; \ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \ }) #else #define vst1_u32_x2(__p0, __p1) __extension__ ({ \ uint32x2x2_t __s1 = __p1; \ uint32x2x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \ }) #endif #define vst1_u64_x2(__p0, __p1) __extension__ ({ \ uint64x1x2_t __s1 = __p1; \ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \ }) #ifdef __LITTLE_ENDIAN__ #define vst1_u16_x2(__p0, __p1) __extension__ ({ \ uint16x4x2_t __s1 = __p1; \ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \ }) #else #define vst1_u16_x2(__p0, __p1) __extension__ ({ \ uint16x4x2_t __s1 = __p1; \ uint16x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_s8_x2(__p0, __p1) __extension__ ({ \ int8x8x2_t __s1 = __p1; \ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \ }) #else #define vst1_s8_x2(__p0, __p1) __extension__ ({ \ int8x8x2_t __s1 = __p1; \ int8x8x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_f32_x2(__p0, __p1) __extension__ ({ \ float32x2x2_t __s1 = __p1; \ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 9); \ }) #else #define vst1_f32_x2(__p0, __p1) __extension__ ({ \ float32x2x2_t __s1 = __p1; \ float32x2x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 9); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_s32_x2(__p0, __p1) __extension__ ({ \ int32x2x2_t __s1 = __p1; \ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 2); \ }) #else #define vst1_s32_x2(__p0, __p1) __extension__ ({ \ int32x2x2_t __s1 = __p1; \ int32x2x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 2); \ }) #endif #define vst1_s64_x2(__p0, __p1) __extension__ ({ \ int64x1x2_t __s1 = __p1; \ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 3); \ }) #ifdef __LITTLE_ENDIAN__ #define vst1_s16_x2(__p0, __p1) __extension__ ({ \ int16x4x2_t __s1 = __p1; \ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 1); \ }) #else #define vst1_s16_x2(__p0, __p1) __extension__ ({ \ int16x4x2_t __s1 = __p1; \ int16x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 1); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_p8_x3(__p0, __p1) __extension__ ({ \ poly8x8x3_t __s1 = __p1; \ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \ }) #else #define vst1_p8_x3(__p0, __p1) __extension__ ({ \ poly8x8x3_t __s1 = __p1; \ poly8x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_p16_x3(__p0, __p1) __extension__ ({ \ poly16x4x3_t __s1 = __p1; \ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \ }) #else #define vst1_p16_x3(__p0, __p1) __extension__ ({ \ poly16x4x3_t __s1 = __p1; \ poly16x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_p8_x3(__p0, __p1) __extension__ ({ \ poly8x16x3_t __s1 = __p1; \ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \ }) #else #define vst1q_p8_x3(__p0, __p1) __extension__ ({ \ poly8x16x3_t __s1 = __p1; \ poly8x16x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_p16_x3(__p0, __p1) __extension__ ({ \ poly16x8x3_t __s1 = __p1; \ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \ }) #else #define vst1q_p16_x3(__p0, __p1) __extension__ ({ \ poly16x8x3_t __s1 = __p1; \ poly16x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_u8_x3(__p0, __p1) __extension__ ({ \ uint8x16x3_t __s1 = __p1; \ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \ }) #else #define vst1q_u8_x3(__p0, __p1) __extension__ ({ \ uint8x16x3_t __s1 = __p1; \ uint8x16x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_u32_x3(__p0, __p1) __extension__ ({ \ uint32x4x3_t __s1 = __p1; \ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \ }) #else #define vst1q_u32_x3(__p0, __p1) __extension__ ({ \ uint32x4x3_t __s1 = __p1; \ uint32x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_u64_x3(__p0, __p1) __extension__ ({ \ uint64x2x3_t __s1 = __p1; \ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \ }) #else #define vst1q_u64_x3(__p0, __p1) __extension__ ({ \ uint64x2x3_t __s1 = __p1; \ uint64x2x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_u16_x3(__p0, __p1) __extension__ ({ \ uint16x8x3_t __s1 = __p1; \ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \ }) #else #define vst1q_u16_x3(__p0, __p1) __extension__ ({ \ uint16x8x3_t __s1 = __p1; \ uint16x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_s8_x3(__p0, __p1) __extension__ ({ \ int8x16x3_t __s1 = __p1; \ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \ }) #else #define vst1q_s8_x3(__p0, __p1) __extension__ ({ \ int8x16x3_t __s1 = __p1; \ int8x16x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_f32_x3(__p0, __p1) __extension__ ({ \ float32x4x3_t __s1 = __p1; \ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 41); \ }) #else #define vst1q_f32_x3(__p0, __p1) __extension__ ({ \ float32x4x3_t __s1 = __p1; \ float32x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 41); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_s32_x3(__p0, __p1) __extension__ ({ \ int32x4x3_t __s1 = __p1; \ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 34); \ }) #else #define vst1q_s32_x3(__p0, __p1) __extension__ ({ \ int32x4x3_t __s1 = __p1; \ int32x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 34); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_s64_x3(__p0, __p1) __extension__ ({ \ int64x2x3_t __s1 = __p1; \ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 35); \ }) #else #define vst1q_s64_x3(__p0, __p1) __extension__ ({ \ int64x2x3_t __s1 = __p1; \ int64x2x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 35); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_s16_x3(__p0, __p1) __extension__ ({ \ int16x8x3_t __s1 = __p1; \ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 33); \ }) #else #define vst1q_s16_x3(__p0, __p1) __extension__ ({ \ int16x8x3_t __s1 = __p1; \ int16x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 33); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_u8_x3(__p0, __p1) __extension__ ({ \ uint8x8x3_t __s1 = __p1; \ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \ }) #else #define vst1_u8_x3(__p0, __p1) __extension__ ({ \ uint8x8x3_t __s1 = __p1; \ uint8x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_u32_x3(__p0, __p1) __extension__ ({ \ uint32x2x3_t __s1 = __p1; \ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \ }) #else #define vst1_u32_x3(__p0, __p1) __extension__ ({ \ uint32x2x3_t __s1 = __p1; \ uint32x2x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \ }) #endif #define vst1_u64_x3(__p0, __p1) __extension__ ({ \ uint64x1x3_t __s1 = __p1; \ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \ }) #ifdef __LITTLE_ENDIAN__ #define vst1_u16_x3(__p0, __p1) __extension__ ({ \ uint16x4x3_t __s1 = __p1; \ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \ }) #else #define vst1_u16_x3(__p0, __p1) __extension__ ({ \ uint16x4x3_t __s1 = __p1; \ uint16x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_s8_x3(__p0, __p1) __extension__ ({ \ int8x8x3_t __s1 = __p1; \ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \ }) #else #define vst1_s8_x3(__p0, __p1) __extension__ ({ \ int8x8x3_t __s1 = __p1; \ int8x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_f32_x3(__p0, __p1) __extension__ ({ \ float32x2x3_t __s1 = __p1; \ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 9); \ }) #else #define vst1_f32_x3(__p0, __p1) __extension__ ({ \ float32x2x3_t __s1 = __p1; \ float32x2x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 9); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_s32_x3(__p0, __p1) __extension__ ({ \ int32x2x3_t __s1 = __p1; \ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 2); \ }) #else #define vst1_s32_x3(__p0, __p1) __extension__ ({ \ int32x2x3_t __s1 = __p1; \ int32x2x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 2); \ }) #endif #define vst1_s64_x3(__p0, __p1) __extension__ ({ \ int64x1x3_t __s1 = __p1; \ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 3); \ }) #ifdef __LITTLE_ENDIAN__ #define vst1_s16_x3(__p0, __p1) __extension__ ({ \ int16x4x3_t __s1 = __p1; \ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 1); \ }) #else #define vst1_s16_x3(__p0, __p1) __extension__ ({ \ int16x4x3_t __s1 = __p1; \ int16x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 1); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_p8_x4(__p0, __p1) __extension__ ({ \ poly8x8x4_t __s1 = __p1; \ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \ }) #else #define vst1_p8_x4(__p0, __p1) __extension__ ({ \ poly8x8x4_t __s1 = __p1; \ poly8x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_p16_x4(__p0, __p1) __extension__ ({ \ poly16x4x4_t __s1 = __p1; \ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \ }) #else #define vst1_p16_x4(__p0, __p1) __extension__ ({ \ poly16x4x4_t __s1 = __p1; \ poly16x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_p8_x4(__p0, __p1) __extension__ ({ \ poly8x16x4_t __s1 = __p1; \ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \ }) #else #define vst1q_p8_x4(__p0, __p1) __extension__ ({ \ poly8x16x4_t __s1 = __p1; \ poly8x16x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_p16_x4(__p0, __p1) __extension__ ({ \ poly16x8x4_t __s1 = __p1; \ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \ }) #else #define vst1q_p16_x4(__p0, __p1) __extension__ ({ \ poly16x8x4_t __s1 = __p1; \ poly16x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_u8_x4(__p0, __p1) __extension__ ({ \ uint8x16x4_t __s1 = __p1; \ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \ }) #else #define vst1q_u8_x4(__p0, __p1) __extension__ ({ \ uint8x16x4_t __s1 = __p1; \ uint8x16x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_u32_x4(__p0, __p1) __extension__ ({ \ uint32x4x4_t __s1 = __p1; \ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \ }) #else #define vst1q_u32_x4(__p0, __p1) __extension__ ({ \ uint32x4x4_t __s1 = __p1; \ uint32x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_u64_x4(__p0, __p1) __extension__ ({ \ uint64x2x4_t __s1 = __p1; \ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \ }) #else #define vst1q_u64_x4(__p0, __p1) __extension__ ({ \ uint64x2x4_t __s1 = __p1; \ uint64x2x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_u16_x4(__p0, __p1) __extension__ ({ \ uint16x8x4_t __s1 = __p1; \ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \ }) #else #define vst1q_u16_x4(__p0, __p1) __extension__ ({ \ uint16x8x4_t __s1 = __p1; \ uint16x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_s8_x4(__p0, __p1) __extension__ ({ \ int8x16x4_t __s1 = __p1; \ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \ }) #else #define vst1q_s8_x4(__p0, __p1) __extension__ ({ \ int8x16x4_t __s1 = __p1; \ int8x16x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_f32_x4(__p0, __p1) __extension__ ({ \ float32x4x4_t __s1 = __p1; \ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 41); \ }) #else #define vst1q_f32_x4(__p0, __p1) __extension__ ({ \ float32x4x4_t __s1 = __p1; \ float32x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 41); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_s32_x4(__p0, __p1) __extension__ ({ \ int32x4x4_t __s1 = __p1; \ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 34); \ }) #else #define vst1q_s32_x4(__p0, __p1) __extension__ ({ \ int32x4x4_t __s1 = __p1; \ int32x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 34); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_s64_x4(__p0, __p1) __extension__ ({ \ int64x2x4_t __s1 = __p1; \ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 35); \ }) #else #define vst1q_s64_x4(__p0, __p1) __extension__ ({ \ int64x2x4_t __s1 = __p1; \ int64x2x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 35); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_s16_x4(__p0, __p1) __extension__ ({ \ int16x8x4_t __s1 = __p1; \ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 33); \ }) #else #define vst1q_s16_x4(__p0, __p1) __extension__ ({ \ int16x8x4_t __s1 = __p1; \ int16x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 33); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_u8_x4(__p0, __p1) __extension__ ({ \ uint8x8x4_t __s1 = __p1; \ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \ }) #else #define vst1_u8_x4(__p0, __p1) __extension__ ({ \ uint8x8x4_t __s1 = __p1; \ uint8x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_u32_x4(__p0, __p1) __extension__ ({ \ uint32x2x4_t __s1 = __p1; \ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \ }) #else #define vst1_u32_x4(__p0, __p1) __extension__ ({ \ uint32x2x4_t __s1 = __p1; \ uint32x2x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \ }) #endif #define vst1_u64_x4(__p0, __p1) __extension__ ({ \ uint64x1x4_t __s1 = __p1; \ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \ }) #ifdef __LITTLE_ENDIAN__ #define vst1_u16_x4(__p0, __p1) __extension__ ({ \ uint16x4x4_t __s1 = __p1; \ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \ }) #else #define vst1_u16_x4(__p0, __p1) __extension__ ({ \ uint16x4x4_t __s1 = __p1; \ uint16x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_s8_x4(__p0, __p1) __extension__ ({ \ int8x8x4_t __s1 = __p1; \ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \ }) #else #define vst1_s8_x4(__p0, __p1) __extension__ ({ \ int8x8x4_t __s1 = __p1; \ int8x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_f32_x4(__p0, __p1) __extension__ ({ \ float32x2x4_t __s1 = __p1; \ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 9); \ }) #else #define vst1_f32_x4(__p0, __p1) __extension__ ({ \ float32x2x4_t __s1 = __p1; \ float32x2x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 9); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_s32_x4(__p0, __p1) __extension__ ({ \ int32x2x4_t __s1 = __p1; \ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 2); \ }) #else #define vst1_s32_x4(__p0, __p1) __extension__ ({ \ int32x2x4_t __s1 = __p1; \ int32x2x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 2); \ }) #endif #define vst1_s64_x4(__p0, __p1) __extension__ ({ \ int64x1x4_t __s1 = __p1; \ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 3); \ }) #ifdef __LITTLE_ENDIAN__ #define vst1_s16_x4(__p0, __p1) __extension__ ({ \ int16x4x4_t __s1 = __p1; \ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 1); \ }) #else #define vst1_s16_x4(__p0, __p1) __extension__ ({ \ int16x4x4_t __s1 = __p1; \ int16x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 1); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2_p8(__p0, __p1) __extension__ ({ \ poly8x8x2_t __s1 = __p1; \ __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \ }) #else #define vst2_p8(__p0, __p1) __extension__ ({ \ poly8x8x2_t __s1 = __p1; \ poly8x8x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2_p16(__p0, __p1) __extension__ ({ \ poly16x4x2_t __s1 = __p1; \ __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \ }) #else #define vst2_p16(__p0, __p1) __extension__ ({ \ poly16x4x2_t __s1 = __p1; \ poly16x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2q_p8(__p0, __p1) __extension__ ({ \ poly8x16x2_t __s1 = __p1; \ __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \ }) #else #define vst2q_p8(__p0, __p1) __extension__ ({ \ poly8x16x2_t __s1 = __p1; \ poly8x16x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2q_p16(__p0, __p1) __extension__ ({ \ poly16x8x2_t __s1 = __p1; \ __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \ }) #else #define vst2q_p16(__p0, __p1) __extension__ ({ \ poly16x8x2_t __s1 = __p1; \ poly16x8x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2q_u8(__p0, __p1) __extension__ ({ \ uint8x16x2_t __s1 = __p1; \ __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \ }) #else #define vst2q_u8(__p0, __p1) __extension__ ({ \ uint8x16x2_t __s1 = __p1; \ uint8x16x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2q_u32(__p0, __p1) __extension__ ({ \ uint32x4x2_t __s1 = __p1; \ __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \ }) #else #define vst2q_u32(__p0, __p1) __extension__ ({ \ uint32x4x2_t __s1 = __p1; \ uint32x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2q_u16(__p0, __p1) __extension__ ({ \ uint16x8x2_t __s1 = __p1; \ __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \ }) #else #define vst2q_u16(__p0, __p1) __extension__ ({ \ uint16x8x2_t __s1 = __p1; \ uint16x8x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2q_s8(__p0, __p1) __extension__ ({ \ int8x16x2_t __s1 = __p1; \ __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \ }) #else #define vst2q_s8(__p0, __p1) __extension__ ({ \ int8x16x2_t __s1 = __p1; \ int8x16x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2q_f32(__p0, __p1) __extension__ ({ \ float32x4x2_t __s1 = __p1; \ __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 41); \ }) #else #define vst2q_f32(__p0, __p1) __extension__ ({ \ float32x4x2_t __s1 = __p1; \ float32x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 41); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2q_s32(__p0, __p1) __extension__ ({ \ int32x4x2_t __s1 = __p1; \ __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 34); \ }) #else #define vst2q_s32(__p0, __p1) __extension__ ({ \ int32x4x2_t __s1 = __p1; \ int32x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 34); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2q_s16(__p0, __p1) __extension__ ({ \ int16x8x2_t __s1 = __p1; \ __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 33); \ }) #else #define vst2q_s16(__p0, __p1) __extension__ ({ \ int16x8x2_t __s1 = __p1; \ int16x8x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 33); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2_u8(__p0, __p1) __extension__ ({ \ uint8x8x2_t __s1 = __p1; \ __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \ }) #else #define vst2_u8(__p0, __p1) __extension__ ({ \ uint8x8x2_t __s1 = __p1; \ uint8x8x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2_u32(__p0, __p1) __extension__ ({ \ uint32x2x2_t __s1 = __p1; \ __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \ }) #else #define vst2_u32(__p0, __p1) __extension__ ({ \ uint32x2x2_t __s1 = __p1; \ uint32x2x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \ }) #endif #define vst2_u64(__p0, __p1) __extension__ ({ \ uint64x1x2_t __s1 = __p1; \ __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \ }) #ifdef __LITTLE_ENDIAN__ #define vst2_u16(__p0, __p1) __extension__ ({ \ uint16x4x2_t __s1 = __p1; \ __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \ }) #else #define vst2_u16(__p0, __p1) __extension__ ({ \ uint16x4x2_t __s1 = __p1; \ uint16x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2_s8(__p0, __p1) __extension__ ({ \ int8x8x2_t __s1 = __p1; \ __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \ }) #else #define vst2_s8(__p0, __p1) __extension__ ({ \ int8x8x2_t __s1 = __p1; \ int8x8x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2_f32(__p0, __p1) __extension__ ({ \ float32x2x2_t __s1 = __p1; \ __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 9); \ }) #else #define vst2_f32(__p0, __p1) __extension__ ({ \ float32x2x2_t __s1 = __p1; \ float32x2x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 9); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2_s32(__p0, __p1) __extension__ ({ \ int32x2x2_t __s1 = __p1; \ __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 2); \ }) #else #define vst2_s32(__p0, __p1) __extension__ ({ \ int32x2x2_t __s1 = __p1; \ int32x2x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 2); \ }) #endif #define vst2_s64(__p0, __p1) __extension__ ({ \ int64x1x2_t __s1 = __p1; \ __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 3); \ }) #ifdef __LITTLE_ENDIAN__ #define vst2_s16(__p0, __p1) __extension__ ({ \ int16x4x2_t __s1 = __p1; \ __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 1); \ }) #else #define vst2_s16(__p0, __p1) __extension__ ({ \ int16x4x2_t __s1 = __p1; \ int16x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 1); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x8x2_t __s1 = __p1; \ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \ }) #else #define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x8x2_t __s1 = __p1; \ poly8x8x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x4x2_t __s1 = __p1; \ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \ }) #else #define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x4x2_t __s1 = __p1; \ poly16x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x8x2_t __s1 = __p1; \ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \ }) #else #define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x8x2_t __s1 = __p1; \ poly16x8x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x4x2_t __s1 = __p1; \ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \ }) #else #define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x4x2_t __s1 = __p1; \ uint32x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x8x2_t __s1 = __p1; \ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \ }) #else #define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x8x2_t __s1 = __p1; \ uint16x8x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x4x2_t __s1 = __p1; \ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 41); \ }) #else #define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x4x2_t __s1 = __p1; \ float32x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 41); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4x2_t __s1 = __p1; \ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 34); \ }) #else #define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4x2_t __s1 = __p1; \ int32x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 34); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8x2_t __s1 = __p1; \ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 33); \ }) #else #define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8x2_t __s1 = __p1; \ int16x8x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 33); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x8x2_t __s1 = __p1; \ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \ }) #else #define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x8x2_t __s1 = __p1; \ uint8x8x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x2x2_t __s1 = __p1; \ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \ }) #else #define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x2x2_t __s1 = __p1; \ uint32x2x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x4x2_t __s1 = __p1; \ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \ }) #else #define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x4x2_t __s1 = __p1; \ uint16x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x8x2_t __s1 = __p1; \ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \ }) #else #define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x8x2_t __s1 = __p1; \ int8x8x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x2x2_t __s1 = __p1; \ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 9); \ }) #else #define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x2x2_t __s1 = __p1; \ float32x2x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 9); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2x2_t __s1 = __p1; \ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 2); \ }) #else #define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2x2_t __s1 = __p1; \ int32x2x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 2); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4x2_t __s1 = __p1; \ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 1); \ }) #else #define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4x2_t __s1 = __p1; \ int16x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 1); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3_p8(__p0, __p1) __extension__ ({ \ poly8x8x3_t __s1 = __p1; \ __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \ }) #else #define vst3_p8(__p0, __p1) __extension__ ({ \ poly8x8x3_t __s1 = __p1; \ poly8x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3_p16(__p0, __p1) __extension__ ({ \ poly16x4x3_t __s1 = __p1; \ __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \ }) #else #define vst3_p16(__p0, __p1) __extension__ ({ \ poly16x4x3_t __s1 = __p1; \ poly16x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3q_p8(__p0, __p1) __extension__ ({ \ poly8x16x3_t __s1 = __p1; \ __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \ }) #else #define vst3q_p8(__p0, __p1) __extension__ ({ \ poly8x16x3_t __s1 = __p1; \ poly8x16x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3q_p16(__p0, __p1) __extension__ ({ \ poly16x8x3_t __s1 = __p1; \ __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \ }) #else #define vst3q_p16(__p0, __p1) __extension__ ({ \ poly16x8x3_t __s1 = __p1; \ poly16x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3q_u8(__p0, __p1) __extension__ ({ \ uint8x16x3_t __s1 = __p1; \ __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \ }) #else #define vst3q_u8(__p0, __p1) __extension__ ({ \ uint8x16x3_t __s1 = __p1; \ uint8x16x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3q_u32(__p0, __p1) __extension__ ({ \ uint32x4x3_t __s1 = __p1; \ __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \ }) #else #define vst3q_u32(__p0, __p1) __extension__ ({ \ uint32x4x3_t __s1 = __p1; \ uint32x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3q_u16(__p0, __p1) __extension__ ({ \ uint16x8x3_t __s1 = __p1; \ __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \ }) #else #define vst3q_u16(__p0, __p1) __extension__ ({ \ uint16x8x3_t __s1 = __p1; \ uint16x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3q_s8(__p0, __p1) __extension__ ({ \ int8x16x3_t __s1 = __p1; \ __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \ }) #else #define vst3q_s8(__p0, __p1) __extension__ ({ \ int8x16x3_t __s1 = __p1; \ int8x16x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3q_f32(__p0, __p1) __extension__ ({ \ float32x4x3_t __s1 = __p1; \ __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 41); \ }) #else #define vst3q_f32(__p0, __p1) __extension__ ({ \ float32x4x3_t __s1 = __p1; \ float32x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 41); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3q_s32(__p0, __p1) __extension__ ({ \ int32x4x3_t __s1 = __p1; \ __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 34); \ }) #else #define vst3q_s32(__p0, __p1) __extension__ ({ \ int32x4x3_t __s1 = __p1; \ int32x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 34); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3q_s16(__p0, __p1) __extension__ ({ \ int16x8x3_t __s1 = __p1; \ __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 33); \ }) #else #define vst3q_s16(__p0, __p1) __extension__ ({ \ int16x8x3_t __s1 = __p1; \ int16x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 33); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3_u8(__p0, __p1) __extension__ ({ \ uint8x8x3_t __s1 = __p1; \ __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \ }) #else #define vst3_u8(__p0, __p1) __extension__ ({ \ uint8x8x3_t __s1 = __p1; \ uint8x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3_u32(__p0, __p1) __extension__ ({ \ uint32x2x3_t __s1 = __p1; \ __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \ }) #else #define vst3_u32(__p0, __p1) __extension__ ({ \ uint32x2x3_t __s1 = __p1; \ uint32x2x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \ }) #endif #define vst3_u64(__p0, __p1) __extension__ ({ \ uint64x1x3_t __s1 = __p1; \ __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \ }) #ifdef __LITTLE_ENDIAN__ #define vst3_u16(__p0, __p1) __extension__ ({ \ uint16x4x3_t __s1 = __p1; \ __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \ }) #else #define vst3_u16(__p0, __p1) __extension__ ({ \ uint16x4x3_t __s1 = __p1; \ uint16x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3_s8(__p0, __p1) __extension__ ({ \ int8x8x3_t __s1 = __p1; \ __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \ }) #else #define vst3_s8(__p0, __p1) __extension__ ({ \ int8x8x3_t __s1 = __p1; \ int8x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3_f32(__p0, __p1) __extension__ ({ \ float32x2x3_t __s1 = __p1; \ __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 9); \ }) #else #define vst3_f32(__p0, __p1) __extension__ ({ \ float32x2x3_t __s1 = __p1; \ float32x2x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 9); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3_s32(__p0, __p1) __extension__ ({ \ int32x2x3_t __s1 = __p1; \ __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 2); \ }) #else #define vst3_s32(__p0, __p1) __extension__ ({ \ int32x2x3_t __s1 = __p1; \ int32x2x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 2); \ }) #endif #define vst3_s64(__p0, __p1) __extension__ ({ \ int64x1x3_t __s1 = __p1; \ __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 3); \ }) #ifdef __LITTLE_ENDIAN__ #define vst3_s16(__p0, __p1) __extension__ ({ \ int16x4x3_t __s1 = __p1; \ __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 1); \ }) #else #define vst3_s16(__p0, __p1) __extension__ ({ \ int16x4x3_t __s1 = __p1; \ int16x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 1); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x8x3_t __s1 = __p1; \ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \ }) #else #define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x8x3_t __s1 = __p1; \ poly8x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x4x3_t __s1 = __p1; \ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \ }) #else #define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x4x3_t __s1 = __p1; \ poly16x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x8x3_t __s1 = __p1; \ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \ }) #else #define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x8x3_t __s1 = __p1; \ poly16x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x4x3_t __s1 = __p1; \ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \ }) #else #define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x4x3_t __s1 = __p1; \ uint32x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x8x3_t __s1 = __p1; \ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \ }) #else #define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x8x3_t __s1 = __p1; \ uint16x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x4x3_t __s1 = __p1; \ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 41); \ }) #else #define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x4x3_t __s1 = __p1; \ float32x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 41); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4x3_t __s1 = __p1; \ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 34); \ }) #else #define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4x3_t __s1 = __p1; \ int32x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 34); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8x3_t __s1 = __p1; \ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 33); \ }) #else #define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8x3_t __s1 = __p1; \ int16x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 33); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x8x3_t __s1 = __p1; \ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \ }) #else #define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x8x3_t __s1 = __p1; \ uint8x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x2x3_t __s1 = __p1; \ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \ }) #else #define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x2x3_t __s1 = __p1; \ uint32x2x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x4x3_t __s1 = __p1; \ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \ }) #else #define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x4x3_t __s1 = __p1; \ uint16x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x8x3_t __s1 = __p1; \ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \ }) #else #define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x8x3_t __s1 = __p1; \ int8x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x2x3_t __s1 = __p1; \ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 9); \ }) #else #define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x2x3_t __s1 = __p1; \ float32x2x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 9); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2x3_t __s1 = __p1; \ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 2); \ }) #else #define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2x3_t __s1 = __p1; \ int32x2x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 2); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4x3_t __s1 = __p1; \ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 1); \ }) #else #define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4x3_t __s1 = __p1; \ int16x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 1); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4_p8(__p0, __p1) __extension__ ({ \ poly8x8x4_t __s1 = __p1; \ __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \ }) #else #define vst4_p8(__p0, __p1) __extension__ ({ \ poly8x8x4_t __s1 = __p1; \ poly8x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4_p16(__p0, __p1) __extension__ ({ \ poly16x4x4_t __s1 = __p1; \ __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \ }) #else #define vst4_p16(__p0, __p1) __extension__ ({ \ poly16x4x4_t __s1 = __p1; \ poly16x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4q_p8(__p0, __p1) __extension__ ({ \ poly8x16x4_t __s1 = __p1; \ __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \ }) #else #define vst4q_p8(__p0, __p1) __extension__ ({ \ poly8x16x4_t __s1 = __p1; \ poly8x16x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4q_p16(__p0, __p1) __extension__ ({ \ poly16x8x4_t __s1 = __p1; \ __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \ }) #else #define vst4q_p16(__p0, __p1) __extension__ ({ \ poly16x8x4_t __s1 = __p1; \ poly16x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4q_u8(__p0, __p1) __extension__ ({ \ uint8x16x4_t __s1 = __p1; \ __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \ }) #else #define vst4q_u8(__p0, __p1) __extension__ ({ \ uint8x16x4_t __s1 = __p1; \ uint8x16x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4q_u32(__p0, __p1) __extension__ ({ \ uint32x4x4_t __s1 = __p1; \ __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \ }) #else #define vst4q_u32(__p0, __p1) __extension__ ({ \ uint32x4x4_t __s1 = __p1; \ uint32x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4q_u16(__p0, __p1) __extension__ ({ \ uint16x8x4_t __s1 = __p1; \ __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \ }) #else #define vst4q_u16(__p0, __p1) __extension__ ({ \ uint16x8x4_t __s1 = __p1; \ uint16x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4q_s8(__p0, __p1) __extension__ ({ \ int8x16x4_t __s1 = __p1; \ __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \ }) #else #define vst4q_s8(__p0, __p1) __extension__ ({ \ int8x16x4_t __s1 = __p1; \ int8x16x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4q_f32(__p0, __p1) __extension__ ({ \ float32x4x4_t __s1 = __p1; \ __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 41); \ }) #else #define vst4q_f32(__p0, __p1) __extension__ ({ \ float32x4x4_t __s1 = __p1; \ float32x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 41); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4q_s32(__p0, __p1) __extension__ ({ \ int32x4x4_t __s1 = __p1; \ __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 34); \ }) #else #define vst4q_s32(__p0, __p1) __extension__ ({ \ int32x4x4_t __s1 = __p1; \ int32x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 34); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4q_s16(__p0, __p1) __extension__ ({ \ int16x8x4_t __s1 = __p1; \ __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 33); \ }) #else #define vst4q_s16(__p0, __p1) __extension__ ({ \ int16x8x4_t __s1 = __p1; \ int16x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 33); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4_u8(__p0, __p1) __extension__ ({ \ uint8x8x4_t __s1 = __p1; \ __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \ }) #else #define vst4_u8(__p0, __p1) __extension__ ({ \ uint8x8x4_t __s1 = __p1; \ uint8x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4_u32(__p0, __p1) __extension__ ({ \ uint32x2x4_t __s1 = __p1; \ __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \ }) #else #define vst4_u32(__p0, __p1) __extension__ ({ \ uint32x2x4_t __s1 = __p1; \ uint32x2x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \ }) #endif #define vst4_u64(__p0, __p1) __extension__ ({ \ uint64x1x4_t __s1 = __p1; \ __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \ }) #ifdef __LITTLE_ENDIAN__ #define vst4_u16(__p0, __p1) __extension__ ({ \ uint16x4x4_t __s1 = __p1; \ __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \ }) #else #define vst4_u16(__p0, __p1) __extension__ ({ \ uint16x4x4_t __s1 = __p1; \ uint16x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4_s8(__p0, __p1) __extension__ ({ \ int8x8x4_t __s1 = __p1; \ __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \ }) #else #define vst4_s8(__p0, __p1) __extension__ ({ \ int8x8x4_t __s1 = __p1; \ int8x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4_f32(__p0, __p1) __extension__ ({ \ float32x2x4_t __s1 = __p1; \ __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 9); \ }) #else #define vst4_f32(__p0, __p1) __extension__ ({ \ float32x2x4_t __s1 = __p1; \ float32x2x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 9); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4_s32(__p0, __p1) __extension__ ({ \ int32x2x4_t __s1 = __p1; \ __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 2); \ }) #else #define vst4_s32(__p0, __p1) __extension__ ({ \ int32x2x4_t __s1 = __p1; \ int32x2x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 2); \ }) #endif #define vst4_s64(__p0, __p1) __extension__ ({ \ int64x1x4_t __s1 = __p1; \ __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 3); \ }) #ifdef __LITTLE_ENDIAN__ #define vst4_s16(__p0, __p1) __extension__ ({ \ int16x4x4_t __s1 = __p1; \ __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 1); \ }) #else #define vst4_s16(__p0, __p1) __extension__ ({ \ int16x4x4_t __s1 = __p1; \ int16x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 1); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x8x4_t __s1 = __p1; \ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \ }) #else #define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x8x4_t __s1 = __p1; \ poly8x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x4x4_t __s1 = __p1; \ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \ }) #else #define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x4x4_t __s1 = __p1; \ poly16x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x8x4_t __s1 = __p1; \ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \ }) #else #define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x8x4_t __s1 = __p1; \ poly16x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x4x4_t __s1 = __p1; \ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \ }) #else #define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x4x4_t __s1 = __p1; \ uint32x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x8x4_t __s1 = __p1; \ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \ }) #else #define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x8x4_t __s1 = __p1; \ uint16x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x4x4_t __s1 = __p1; \ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 41); \ }) #else #define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x4x4_t __s1 = __p1; \ float32x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 41); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4x4_t __s1 = __p1; \ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 34); \ }) #else #define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4x4_t __s1 = __p1; \ int32x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 34); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8x4_t __s1 = __p1; \ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 33); \ }) #else #define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8x4_t __s1 = __p1; \ int16x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 33); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x8x4_t __s1 = __p1; \ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \ }) #else #define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x8x4_t __s1 = __p1; \ uint8x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x2x4_t __s1 = __p1; \ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \ }) #else #define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x2x4_t __s1 = __p1; \ uint32x2x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x4x4_t __s1 = __p1; \ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \ }) #else #define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x4x4_t __s1 = __p1; \ uint16x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x8x4_t __s1 = __p1; \ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \ }) #else #define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x8x4_t __s1 = __p1; \ int8x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x2x4_t __s1 = __p1; \ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 9); \ }) #else #define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \ float32x2x4_t __s1 = __p1; \ float32x2x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 9); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2x4_t __s1 = __p1; \ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 2); \ }) #else #define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2x4_t __s1 = __p1; \ int32x2x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 2); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4x4_t __s1 = __p1; \ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 1); \ }) #else #define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4x4_t __s1 = __p1; \ int16x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 1); \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = __p0 - __p1; return __ret; } #else __ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 - __rev1; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = __p0 - __p1; return __ret; } #else __ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 - __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = __p0 - __p1; return __ret; } #else __ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 - __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = __p0 - __p1; return __ret; } #else __ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 - __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = __p0 - __p1; return __ret; } #else __ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 - __rev1; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = __p0 - __p1; return __ret; } #else __ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 - __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = __p0 - __p1; return __ret; } #else __ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 - __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = __p0 - __p1; return __ret; } #else __ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 - __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = __p0 - __p1; return __ret; } #else __ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 - __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = __p0 - __p1; return __ret; } #else __ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 - __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = __p0 - __p1; return __ret; } #else __ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 - __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vsub_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x1_t __ret; __ret = __p0 - __p1; return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = __p0 - __p1; return __ret; } #else __ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 - __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = __p0 - __p1; return __ret; } #else __ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 - __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = __p0 - __p1; return __ret; } #else __ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 - __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = __p0 - __p1; return __ret; } #else __ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 - __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai int64x1_t vsub_s64(int64x1_t __p0, int64x1_t __p1) { int64x1_t __ret; __ret = __p0 - __p1; return __ret; } #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = __p0 - __p1; return __ret; } #else __ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 - __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); return __ret; } #else __ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { uint16x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai uint16x4_t __noswap_vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); return __ret; } #else __ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { uint32x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai uint32x2_t __noswap_vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); return __ret; } #else __ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { uint8x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai uint8x8_t __noswap_vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); return __ret; } #else __ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) { int16x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int16x4_t __noswap_vsubhn_s32(int32x4_t __p0, int32x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); return __ret; } #else __ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) { int32x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai int32x2_t __noswap_vsubhn_s64(int64x2_t __p0, int64x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); return __ret; } #else __ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) { int8x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai int8x8_t __noswap_vsubhn_s16(int16x8_t __p0, int16x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; __ret = vmovl_u8(__p0) - vmovl_u8(__p1); return __ret; } #else __ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vmovl_u8(__rev0) - __noswap_vmovl_u8(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; __ret = vmovl_u32(__p0) - vmovl_u32(__p1); return __ret; } #else __ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __noswap_vmovl_u32(__rev0) - __noswap_vmovl_u32(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; __ret = vmovl_u16(__p0) - vmovl_u16(__p1); return __ret; } #else __ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __noswap_vmovl_u16(__rev0) - __noswap_vmovl_u16(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) { int16x8_t __ret; __ret = vmovl_s8(__p0) - vmovl_s8(__p1); return __ret; } #else __ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) { int16x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vmovl_s8(__rev0) - __noswap_vmovl_s8(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) { int64x2_t __ret; __ret = vmovl_s32(__p0) - vmovl_s32(__p1); return __ret; } #else __ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) { int64x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __noswap_vmovl_s32(__rev0) - __noswap_vmovl_s32(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) { int32x4_t __ret; __ret = vmovl_s16(__p0) - vmovl_s16(__p1); return __ret; } #else __ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) { int32x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __noswap_vmovl_s16(__rev0) - __noswap_vmovl_s16(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; __ret = __p0 - vmovl_u8(__p1); return __ret; } #else __ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 - __noswap_vmovl_u8(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; __ret = __p0 - vmovl_u32(__p1); return __ret; } #else __ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 - __noswap_vmovl_u32(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; __ret = __p0 - vmovl_u16(__p1); return __ret; } #else __ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 - __noswap_vmovl_u16(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) { int16x8_t __ret; __ret = __p0 - vmovl_s8(__p1); return __ret; } #else __ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 - __noswap_vmovl_s8(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) { int64x2_t __ret; __ret = __p0 - vmovl_s32(__p1); return __ret; } #else __ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 - __noswap_vmovl_s32(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) { int32x4_t __ret; __ret = __p0 - vmovl_s16(__p1); return __ret; } #else __ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 - __noswap_vmovl_s16(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 4); return __ret; } #else __ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else __ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else __ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 4); return __ret; } #else __ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) { poly8x8_t __ret; poly8x8x2_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 4); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 16); return __ret; } #else __ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8x2_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 0); return __ret; } #else __ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8x2_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 4); return __ret; } #else __ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) { poly8x8_t __ret; poly8x8x3_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 4); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 16); return __ret; } #else __ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8x3_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 0); return __ret; } #else __ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8x3_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 4); return __ret; } #else __ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) { poly8x8_t __ret; poly8x8x4_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 4); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 16); return __ret; } #else __ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8x4_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 0); return __ret; } #else __ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8x4_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4); return __ret; } #else __ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16); return __ret; } #else __ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0); return __ret; } #else __ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 4); return __ret; } #else __ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8x2_t __rev1; __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 4); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 16); return __ret; } #else __ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8x2_t __rev1; __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 0); return __ret; } #else __ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8x2_t __rev1; __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 4); return __ret; } #else __ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8x3_t __rev1; __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 4); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 16); return __ret; } #else __ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8x3_t __rev1; __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 0); return __ret; } #else __ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8x3_t __rev1; __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 4); return __ret; } #else __ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8x4_t __rev1; __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 4); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 16); return __ret; } #else __ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8x4_t __rev1; __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 0); return __ret; } #else __ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8x4_t __rev1; __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8x2_t __ret; __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4); return __ret; } #else __ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8x2_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4x2_t __ret; __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5); return __ret; } #else __ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4x2_t __ret; poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16x2_t __ret; __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36); return __ret; } #else __ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16x2_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8x2_t __ret; __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37); return __ret; } #else __ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8x2_t __ret; poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16x2_t __ret; __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else __ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16x2_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4x2_t __ret; __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else __ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4x2_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8x2_t __ret; __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else __ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8x2_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16x2_t __ret; __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else __ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16x2_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4x2_t __ret; __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else __ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4x2_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4x2_t __ret; __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else __ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4x2_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8x2_t __ret; __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else __ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8x2_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8x2_t __ret; __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else __ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8x2_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2x2_t __ret; __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else __ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4x2_t __ret; __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else __ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4x2_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) { int8x8x2_t __ret; __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else __ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) { int8x8x2_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) { float32x2x2_t __ret; __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else __ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) { float32x2x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) { int32x2x2_t __ret; __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else __ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) { int32x2x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) { int16x4x2_t __ret; __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else __ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) { int16x4x2_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else __ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) { uint8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else __ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) { uint16x4_t __ret; poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else __ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) { uint8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else __ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) { uint16x8_t __ret; poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else __ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else __ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else __ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else __ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else __ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else __ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else __ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else __ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else __ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else __ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else __ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else __ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8x2_t __ret; __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4); return __ret; } #else __ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8x2_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4x2_t __ret; __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5); return __ret; } #else __ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4x2_t __ret; poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16x2_t __ret; __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36); return __ret; } #else __ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16x2_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8x2_t __ret; __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37); return __ret; } #else __ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8x2_t __ret; poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16x2_t __ret; __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else __ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16x2_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4x2_t __ret; __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else __ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4x2_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8x2_t __ret; __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else __ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8x2_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16x2_t __ret; __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else __ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16x2_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4x2_t __ret; __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else __ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4x2_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4x2_t __ret; __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else __ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4x2_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8x2_t __ret; __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else __ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8x2_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8x2_t __ret; __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else __ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8x2_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2x2_t __ret; __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else __ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4x2_t __ret; __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else __ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4x2_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) { int8x8x2_t __ret; __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else __ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) { int8x8x2_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) { float32x2x2_t __ret; __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else __ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) { float32x2x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) { int32x2x2_t __ret; __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else __ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) { int32x2x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) { int16x4x2_t __ret; __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else __ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) { int16x4x2_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8x2_t __ret; __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4); return __ret; } #else __ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8x2_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4x2_t __ret; __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5); return __ret; } #else __ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4x2_t __ret; poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16x2_t __ret; __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36); return __ret; } #else __ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16x2_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8x2_t __ret; __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37); return __ret; } #else __ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8x2_t __ret; poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16x2_t __ret; __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else __ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16x2_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4x2_t __ret; __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else __ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4x2_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8x2_t __ret; __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else __ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8x2_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16x2_t __ret; __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else __ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16x2_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4x2_t __ret; __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else __ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4x2_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4x2_t __ret; __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else __ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4x2_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8x2_t __ret; __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else __ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8x2_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8x2_t __ret; __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else __ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8x2_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2x2_t __ret; __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else __ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4x2_t __ret; __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else __ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4x2_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) { int8x8x2_t __ret; __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else __ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) { int8x8x2_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) { float32x2x2_t __ret; __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else __ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) { float32x2x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) { int32x2x2_t __ret; __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else __ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) { int32x2x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) { int16x4x2_t __ret; __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else __ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) { int16x4x2_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); return __ret; } #endif #if !defined(__aarch64__) #ifdef __LITTLE_ENDIAN__ #define vdupq_lane_f16(__p0_122, __p1_122) __extension__ ({ \ float16x8_t __ret_122; \ float16x4_t __s0_122 = __p0_122; \ __ret_122 = splatq_lane_f16(__s0_122, __p1_122); \ __ret_122; \ }) #else #define vdupq_lane_f16(__p0_123, __p1_123) __extension__ ({ \ float16x8_t __ret_123; \ float16x4_t __s0_123 = __p0_123; \ float16x4_t __rev0_123; __rev0_123 = __builtin_shufflevector(__s0_123, __s0_123, 3, 2, 1, 0); \ __ret_123 = __noswap_splatq_lane_f16(__rev0_123, __p1_123); \ __ret_123 = __builtin_shufflevector(__ret_123, __ret_123, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_123; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdup_lane_f16(__p0_124, __p1_124) __extension__ ({ \ float16x4_t __ret_124; \ float16x4_t __s0_124 = __p0_124; \ __ret_124 = splat_lane_f16(__s0_124, __p1_124); \ __ret_124; \ }) #else #define vdup_lane_f16(__p0_125, __p1_125) __extension__ ({ \ float16x4_t __ret_125; \ float16x4_t __s0_125 = __p0_125; \ float16x4_t __rev0_125; __rev0_125 = __builtin_shufflevector(__s0_125, __s0_125, 3, 2, 1, 0); \ __ret_125 = __noswap_splat_lane_f16(__rev0_125, __p1_125); \ __ret_125 = __builtin_shufflevector(__ret_125, __ret_125, 3, 2, 1, 0); \ __ret_125; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupq_n_f16(__p0) __extension__ ({ \ float16x8_t __ret; \ float16_t __s0 = __p0; \ __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ __ret; \ }) #else #define vdupq_n_f16(__p0) __extension__ ({ \ float16x8_t __ret; \ float16_t __s0 = __p0; \ __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdup_n_f16(__p0) __extension__ ({ \ float16x4_t __ret; \ float16_t __s0 = __p0; \ __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ __ret; \ }) #else #define vdup_n_f16(__p0) __extension__ ({ \ float16x4_t __ret; \ float16_t __s0 = __p0; \ __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmovq_n_f16(__p0) __extension__ ({ \ float16x8_t __ret; \ float16_t __s0 = __p0; \ __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ __ret; \ }) #else #define vmovq_n_f16(__p0) __extension__ ({ \ float16x8_t __ret; \ float16_t __s0 = __p0; \ __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmov_n_f16(__p0) __extension__ ({ \ float16x4_t __ret; \ float16_t __s0 = __p0; \ __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ __ret; \ }) #else #define vmov_n_f16(__p0) __extension__ ({ \ float16x4_t __ret; \ float16_t __s0 = __p0; \ __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmulhq_lane_s32(__p0_126, __p1_126, __p2_126) __extension__ ({ \ int32x4_t __ret_126; \ int32x4_t __s0_126 = __p0_126; \ int32x2_t __s1_126 = __p1_126; \ __ret_126 = vqdmulhq_s32(__s0_126, splatq_lane_s32(__s1_126, __p2_126)); \ __ret_126; \ }) #else #define vqdmulhq_lane_s32(__p0_127, __p1_127, __p2_127) __extension__ ({ \ int32x4_t __ret_127; \ int32x4_t __s0_127 = __p0_127; \ int32x2_t __s1_127 = __p1_127; \ int32x4_t __rev0_127; __rev0_127 = __builtin_shufflevector(__s0_127, __s0_127, 3, 2, 1, 0); \ int32x2_t __rev1_127; __rev1_127 = __builtin_shufflevector(__s1_127, __s1_127, 1, 0); \ __ret_127 = __noswap_vqdmulhq_s32(__rev0_127, __noswap_splatq_lane_s32(__rev1_127, __p2_127)); \ __ret_127 = __builtin_shufflevector(__ret_127, __ret_127, 3, 2, 1, 0); \ __ret_127; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmulhq_lane_s16(__p0_128, __p1_128, __p2_128) __extension__ ({ \ int16x8_t __ret_128; \ int16x8_t __s0_128 = __p0_128; \ int16x4_t __s1_128 = __p1_128; \ __ret_128 = vqdmulhq_s16(__s0_128, splatq_lane_s16(__s1_128, __p2_128)); \ __ret_128; \ }) #else #define vqdmulhq_lane_s16(__p0_129, __p1_129, __p2_129) __extension__ ({ \ int16x8_t __ret_129; \ int16x8_t __s0_129 = __p0_129; \ int16x4_t __s1_129 = __p1_129; \ int16x8_t __rev0_129; __rev0_129 = __builtin_shufflevector(__s0_129, __s0_129, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x4_t __rev1_129; __rev1_129 = __builtin_shufflevector(__s1_129, __s1_129, 3, 2, 1, 0); \ __ret_129 = __noswap_vqdmulhq_s16(__rev0_129, __noswap_splatq_lane_s16(__rev1_129, __p2_129)); \ __ret_129 = __builtin_shufflevector(__ret_129, __ret_129, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_129; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmulh_lane_s32(__p0_130, __p1_130, __p2_130) __extension__ ({ \ int32x2_t __ret_130; \ int32x2_t __s0_130 = __p0_130; \ int32x2_t __s1_130 = __p1_130; \ __ret_130 = vqdmulh_s32(__s0_130, splat_lane_s32(__s1_130, __p2_130)); \ __ret_130; \ }) #else #define vqdmulh_lane_s32(__p0_131, __p1_131, __p2_131) __extension__ ({ \ int32x2_t __ret_131; \ int32x2_t __s0_131 = __p0_131; \ int32x2_t __s1_131 = __p1_131; \ int32x2_t __rev0_131; __rev0_131 = __builtin_shufflevector(__s0_131, __s0_131, 1, 0); \ int32x2_t __rev1_131; __rev1_131 = __builtin_shufflevector(__s1_131, __s1_131, 1, 0); \ __ret_131 = __noswap_vqdmulh_s32(__rev0_131, __noswap_splat_lane_s32(__rev1_131, __p2_131)); \ __ret_131 = __builtin_shufflevector(__ret_131, __ret_131, 1, 0); \ __ret_131; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmulh_lane_s16(__p0_132, __p1_132, __p2_132) __extension__ ({ \ int16x4_t __ret_132; \ int16x4_t __s0_132 = __p0_132; \ int16x4_t __s1_132 = __p1_132; \ __ret_132 = vqdmulh_s16(__s0_132, splat_lane_s16(__s1_132, __p2_132)); \ __ret_132; \ }) #else #define vqdmulh_lane_s16(__p0_133, __p1_133, __p2_133) __extension__ ({ \ int16x4_t __ret_133; \ int16x4_t __s0_133 = __p0_133; \ int16x4_t __s1_133 = __p1_133; \ int16x4_t __rev0_133; __rev0_133 = __builtin_shufflevector(__s0_133, __s0_133, 3, 2, 1, 0); \ int16x4_t __rev1_133; __rev1_133 = __builtin_shufflevector(__s1_133, __s1_133, 3, 2, 1, 0); \ __ret_133 = __noswap_vqdmulh_s16(__rev0_133, __noswap_splat_lane_s16(__rev1_133, __p2_133)); \ __ret_133 = __builtin_shufflevector(__ret_133, __ret_133, 3, 2, 1, 0); \ __ret_133; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmulhq_lane_s32(__p0_134, __p1_134, __p2_134) __extension__ ({ \ int32x4_t __ret_134; \ int32x4_t __s0_134 = __p0_134; \ int32x2_t __s1_134 = __p1_134; \ __ret_134 = vqrdmulhq_s32(__s0_134, splatq_lane_s32(__s1_134, __p2_134)); \ __ret_134; \ }) #else #define vqrdmulhq_lane_s32(__p0_135, __p1_135, __p2_135) __extension__ ({ \ int32x4_t __ret_135; \ int32x4_t __s0_135 = __p0_135; \ int32x2_t __s1_135 = __p1_135; \ int32x4_t __rev0_135; __rev0_135 = __builtin_shufflevector(__s0_135, __s0_135, 3, 2, 1, 0); \ int32x2_t __rev1_135; __rev1_135 = __builtin_shufflevector(__s1_135, __s1_135, 1, 0); \ __ret_135 = __noswap_vqrdmulhq_s32(__rev0_135, __noswap_splatq_lane_s32(__rev1_135, __p2_135)); \ __ret_135 = __builtin_shufflevector(__ret_135, __ret_135, 3, 2, 1, 0); \ __ret_135; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmulhq_lane_s16(__p0_136, __p1_136, __p2_136) __extension__ ({ \ int16x8_t __ret_136; \ int16x8_t __s0_136 = __p0_136; \ int16x4_t __s1_136 = __p1_136; \ __ret_136 = vqrdmulhq_s16(__s0_136, splatq_lane_s16(__s1_136, __p2_136)); \ __ret_136; \ }) #else #define vqrdmulhq_lane_s16(__p0_137, __p1_137, __p2_137) __extension__ ({ \ int16x8_t __ret_137; \ int16x8_t __s0_137 = __p0_137; \ int16x4_t __s1_137 = __p1_137; \ int16x8_t __rev0_137; __rev0_137 = __builtin_shufflevector(__s0_137, __s0_137, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x4_t __rev1_137; __rev1_137 = __builtin_shufflevector(__s1_137, __s1_137, 3, 2, 1, 0); \ __ret_137 = __noswap_vqrdmulhq_s16(__rev0_137, __noswap_splatq_lane_s16(__rev1_137, __p2_137)); \ __ret_137 = __builtin_shufflevector(__ret_137, __ret_137, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_137; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmulh_lane_s32(__p0_138, __p1_138, __p2_138) __extension__ ({ \ int32x2_t __ret_138; \ int32x2_t __s0_138 = __p0_138; \ int32x2_t __s1_138 = __p1_138; \ __ret_138 = vqrdmulh_s32(__s0_138, splat_lane_s32(__s1_138, __p2_138)); \ __ret_138; \ }) #else #define vqrdmulh_lane_s32(__p0_139, __p1_139, __p2_139) __extension__ ({ \ int32x2_t __ret_139; \ int32x2_t __s0_139 = __p0_139; \ int32x2_t __s1_139 = __p1_139; \ int32x2_t __rev0_139; __rev0_139 = __builtin_shufflevector(__s0_139, __s0_139, 1, 0); \ int32x2_t __rev1_139; __rev1_139 = __builtin_shufflevector(__s1_139, __s1_139, 1, 0); \ __ret_139 = __noswap_vqrdmulh_s32(__rev0_139, __noswap_splat_lane_s32(__rev1_139, __p2_139)); \ __ret_139 = __builtin_shufflevector(__ret_139, __ret_139, 1, 0); \ __ret_139; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmulh_lane_s16(__p0_140, __p1_140, __p2_140) __extension__ ({ \ int16x4_t __ret_140; \ int16x4_t __s0_140 = __p0_140; \ int16x4_t __s1_140 = __p1_140; \ __ret_140 = vqrdmulh_s16(__s0_140, splat_lane_s16(__s1_140, __p2_140)); \ __ret_140; \ }) #else #define vqrdmulh_lane_s16(__p0_141, __p1_141, __p2_141) __extension__ ({ \ int16x4_t __ret_141; \ int16x4_t __s0_141 = __p0_141; \ int16x4_t __s1_141 = __p1_141; \ int16x4_t __rev0_141; __rev0_141 = __builtin_shufflevector(__s0_141, __s0_141, 3, 2, 1, 0); \ int16x4_t __rev1_141; __rev1_141 = __builtin_shufflevector(__s1_141, __s1_141, 3, 2, 1, 0); \ __ret_141 = __noswap_vqrdmulh_s16(__rev0_141, __noswap_splat_lane_s16(__rev1_141, __p2_141)); \ __ret_141 = __builtin_shufflevector(__ret_141, __ret_141, 3, 2, 1, 0); \ __ret_141; \ }) #endif __ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } __ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } __ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } __ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } __ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } __ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } __ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } __ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } __ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } __ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } __ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } __ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } __ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } __ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } __ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } __ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } __ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } __ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } __ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } __ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } __ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } __ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } __ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } __ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } __ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } __ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } __ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } __ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } __ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } __ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } __ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } __ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } __ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } __ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } __ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } __ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } __ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } __ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } __ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } __ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } __ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } __ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } __ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } __ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } __ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } __ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } __ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } __ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } __ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } __ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } __ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } __ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } __ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } __ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } __ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } __ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } __ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } __ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } __ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } __ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } __ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } __ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } __ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } __ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } __ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } __ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } __ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } __ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } __ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } __ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } __ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } __ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } __ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } __ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } __ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } __ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } __ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } __ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } __ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } __ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } __ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } __ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } __ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } __ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } __ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } __ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } __ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } __ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } __ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } __ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } __ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } __ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } __ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } __ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } __ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } __ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } __ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } __ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } __ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } __ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } __ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } __ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } __ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } __ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } __ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } __ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } __ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } __ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } __ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } __ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } __ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } __ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } __ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } __ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } __ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } __ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } __ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } __ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } __ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } __ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } __ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } __ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } __ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } __ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } __ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } __ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } __ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } __ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } __ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } __ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } __ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } __ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } __ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } __ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } __ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } __ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } __ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } __ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } __ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } __ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } __ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } __ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } __ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } __ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } __ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } __ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } __ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } __ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } __ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } __ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } __ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } __ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } __ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } __ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } __ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } __ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } __ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } __ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } __ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } __ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } __ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } __ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } __ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } __ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } __ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } __ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } __ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } __ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } __ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } __ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } __ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } __ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } __ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } __ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } __ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } __ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } __ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } __ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } __ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } __ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } __ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } __ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } __ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } __ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } __ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } __ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } __ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } __ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } __ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } __ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } __ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } __ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } __ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } __ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } __ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } __ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } __ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } __ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } __ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } __ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } __ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } __ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } __ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } __ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } __ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } __ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } __ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } __ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } __ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } __ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } __ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } __ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } __ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } __ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } __ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } __ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } __ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } __ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } __ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } __ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } __ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } __ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } __ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } __ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } __ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } __ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } __ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } __ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } __ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } __ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } __ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } __ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } __ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } __ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } __ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } __ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } __ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } __ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } __ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } __ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } __ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } __ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } __ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } __ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } __ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } __ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } __ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } __ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } __ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } __ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } __ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } __ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } __ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } __ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } __ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } __ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } __ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } __ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } __ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } __ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } __ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } __ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } __ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } __ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } #endif #if (__ARM_FP & 2) #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vcvt_f16_f32(float32x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 41); return __ret; } #else __ai float16x4_t vcvt_f16_f32(float32x4_t __p0) { float16x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__rev0, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai float16x4_t __noswap_vcvt_f16_f32(float32x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 41); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vcvt_f32_f16(float16x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 8); return __ret; } #else __ai float32x4_t vcvt_f32_f16(float16x4_t __p0) { float32x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__rev0, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai float32x4_t __noswap_vcvt_f32_f16(float16x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 8); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_f16(__p0) __extension__ ({ \ float16x8_t __ret; \ __ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \ __ret; \ }) #else #define vld1q_f16(__p0) __extension__ ({ \ float16x8_t __ret; \ __ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_f16(__p0) __extension__ ({ \ float16x4_t __ret; \ __ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \ __ret; \ }) #else #define vld1_f16(__p0) __extension__ ({ \ float16x4_t __ret; \ __ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_dup_f16(__p0) __extension__ ({ \ float16x8_t __ret; \ __ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \ __ret; \ }) #else #define vld1q_dup_f16(__p0) __extension__ ({ \ float16x8_t __ret; \ __ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_dup_f16(__p0) __extension__ ({ \ float16x4_t __ret; \ __ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \ __ret; \ }) #else #define vld1_dup_f16(__p0) __extension__ ({ \ float16x4_t __ret; \ __ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ float16x8_t __ret; \ float16x8_t __s1 = __p1; \ __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \ __ret; \ }) #else #define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ float16x8_t __ret; \ float16x8_t __s1 = __p1; \ float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \ float16x4_t __ret; \ float16x4_t __s1 = __p1; \ __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \ __ret; \ }) #else #define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \ float16x4_t __ret; \ float16x4_t __s1 = __p1; \ float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_f16_x2(__p0) __extension__ ({ \ float16x8x2_t __ret; \ __builtin_neon_vld1q_x2_v(&__ret, __p0, 40); \ __ret; \ }) #else #define vld1q_f16_x2(__p0) __extension__ ({ \ float16x8x2_t __ret; \ __builtin_neon_vld1q_x2_v(&__ret, __p0, 40); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_f16_x2(__p0) __extension__ ({ \ float16x4x2_t __ret; \ __builtin_neon_vld1_x2_v(&__ret, __p0, 8); \ __ret; \ }) #else #define vld1_f16_x2(__p0) __extension__ ({ \ float16x4x2_t __ret; \ __builtin_neon_vld1_x2_v(&__ret, __p0, 8); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_f16_x3(__p0) __extension__ ({ \ float16x8x3_t __ret; \ __builtin_neon_vld1q_x3_v(&__ret, __p0, 40); \ __ret; \ }) #else #define vld1q_f16_x3(__p0) __extension__ ({ \ float16x8x3_t __ret; \ __builtin_neon_vld1q_x3_v(&__ret, __p0, 40); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_f16_x3(__p0) __extension__ ({ \ float16x4x3_t __ret; \ __builtin_neon_vld1_x3_v(&__ret, __p0, 8); \ __ret; \ }) #else #define vld1_f16_x3(__p0) __extension__ ({ \ float16x4x3_t __ret; \ __builtin_neon_vld1_x3_v(&__ret, __p0, 8); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_f16_x4(__p0) __extension__ ({ \ float16x8x4_t __ret; \ __builtin_neon_vld1q_x4_v(&__ret, __p0, 40); \ __ret; \ }) #else #define vld1q_f16_x4(__p0) __extension__ ({ \ float16x8x4_t __ret; \ __builtin_neon_vld1q_x4_v(&__ret, __p0, 40); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_f16_x4(__p0) __extension__ ({ \ float16x4x4_t __ret; \ __builtin_neon_vld1_x4_v(&__ret, __p0, 8); \ __ret; \ }) #else #define vld1_f16_x4(__p0) __extension__ ({ \ float16x4x4_t __ret; \ __builtin_neon_vld1_x4_v(&__ret, __p0, 8); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_f16(__p0) __extension__ ({ \ float16x8x2_t __ret; \ __builtin_neon_vld2q_v(&__ret, __p0, 40); \ __ret; \ }) #else #define vld2q_f16(__p0) __extension__ ({ \ float16x8x2_t __ret; \ __builtin_neon_vld2q_v(&__ret, __p0, 40); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2_f16(__p0) __extension__ ({ \ float16x4x2_t __ret; \ __builtin_neon_vld2_v(&__ret, __p0, 8); \ __ret; \ }) #else #define vld2_f16(__p0) __extension__ ({ \ float16x4x2_t __ret; \ __builtin_neon_vld2_v(&__ret, __p0, 8); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_dup_f16(__p0) __extension__ ({ \ float16x8x2_t __ret; \ __builtin_neon_vld2q_dup_v(&__ret, __p0, 40); \ __ret; \ }) #else #define vld2q_dup_f16(__p0) __extension__ ({ \ float16x8x2_t __ret; \ __builtin_neon_vld2q_dup_v(&__ret, __p0, 40); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2_dup_f16(__p0) __extension__ ({ \ float16x4x2_t __ret; \ __builtin_neon_vld2_dup_v(&__ret, __p0, 8); \ __ret; \ }) #else #define vld2_dup_f16(__p0) __extension__ ({ \ float16x4x2_t __ret; \ __builtin_neon_vld2_dup_v(&__ret, __p0, 8); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ float16x8x2_t __ret; \ float16x8x2_t __s1 = __p1; \ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 40); \ __ret; \ }) #else #define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ float16x8x2_t __ret; \ float16x8x2_t __s1 = __p1; \ float16x8x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 40); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \ float16x4x2_t __ret; \ float16x4x2_t __s1 = __p1; \ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 8); \ __ret; \ }) #else #define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \ float16x4x2_t __ret; \ float16x4x2_t __s1 = __p1; \ float16x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 8); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_f16(__p0) __extension__ ({ \ float16x8x3_t __ret; \ __builtin_neon_vld3q_v(&__ret, __p0, 40); \ __ret; \ }) #else #define vld3q_f16(__p0) __extension__ ({ \ float16x8x3_t __ret; \ __builtin_neon_vld3q_v(&__ret, __p0, 40); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3_f16(__p0) __extension__ ({ \ float16x4x3_t __ret; \ __builtin_neon_vld3_v(&__ret, __p0, 8); \ __ret; \ }) #else #define vld3_f16(__p0) __extension__ ({ \ float16x4x3_t __ret; \ __builtin_neon_vld3_v(&__ret, __p0, 8); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_dup_f16(__p0) __extension__ ({ \ float16x8x3_t __ret; \ __builtin_neon_vld3q_dup_v(&__ret, __p0, 40); \ __ret; \ }) #else #define vld3q_dup_f16(__p0) __extension__ ({ \ float16x8x3_t __ret; \ __builtin_neon_vld3q_dup_v(&__ret, __p0, 40); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3_dup_f16(__p0) __extension__ ({ \ float16x4x3_t __ret; \ __builtin_neon_vld3_dup_v(&__ret, __p0, 8); \ __ret; \ }) #else #define vld3_dup_f16(__p0) __extension__ ({ \ float16x4x3_t __ret; \ __builtin_neon_vld3_dup_v(&__ret, __p0, 8); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ float16x8x3_t __ret; \ float16x8x3_t __s1 = __p1; \ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 40); \ __ret; \ }) #else #define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ float16x8x3_t __ret; \ float16x8x3_t __s1 = __p1; \ float16x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 40); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \ float16x4x3_t __ret; \ float16x4x3_t __s1 = __p1; \ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 8); \ __ret; \ }) #else #define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \ float16x4x3_t __ret; \ float16x4x3_t __s1 = __p1; \ float16x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 8); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_f16(__p0) __extension__ ({ \ float16x8x4_t __ret; \ __builtin_neon_vld4q_v(&__ret, __p0, 40); \ __ret; \ }) #else #define vld4q_f16(__p0) __extension__ ({ \ float16x8x4_t __ret; \ __builtin_neon_vld4q_v(&__ret, __p0, 40); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4_f16(__p0) __extension__ ({ \ float16x4x4_t __ret; \ __builtin_neon_vld4_v(&__ret, __p0, 8); \ __ret; \ }) #else #define vld4_f16(__p0) __extension__ ({ \ float16x4x4_t __ret; \ __builtin_neon_vld4_v(&__ret, __p0, 8); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_dup_f16(__p0) __extension__ ({ \ float16x8x4_t __ret; \ __builtin_neon_vld4q_dup_v(&__ret, __p0, 40); \ __ret; \ }) #else #define vld4q_dup_f16(__p0) __extension__ ({ \ float16x8x4_t __ret; \ __builtin_neon_vld4q_dup_v(&__ret, __p0, 40); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4_dup_f16(__p0) __extension__ ({ \ float16x4x4_t __ret; \ __builtin_neon_vld4_dup_v(&__ret, __p0, 8); \ __ret; \ }) #else #define vld4_dup_f16(__p0) __extension__ ({ \ float16x4x4_t __ret; \ __builtin_neon_vld4_dup_v(&__ret, __p0, 8); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ float16x8x4_t __ret; \ float16x8x4_t __s1 = __p1; \ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 40); \ __ret; \ }) #else #define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ float16x8x4_t __ret; \ float16x8x4_t __s1 = __p1; \ float16x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 40); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \ float16x4x4_t __ret; \ float16x4x4_t __s1 = __p1; \ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 8); \ __ret; \ }) #else #define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \ float16x4x4_t __ret; \ float16x4x4_t __s1 = __p1; \ float16x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 8); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_f16(__p0, __p1) __extension__ ({ \ float16x8_t __s1 = __p1; \ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 40); \ }) #else #define vst1q_f16(__p0, __p1) __extension__ ({ \ float16x8_t __s1 = __p1; \ float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 40); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_f16(__p0, __p1) __extension__ ({ \ float16x4_t __s1 = __p1; \ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 8); \ }) #else #define vst1_f16(__p0, __p1) __extension__ ({ \ float16x4_t __s1 = __p1; \ float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 8); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ float16x8_t __s1 = __p1; \ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \ }) #else #define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ float16x8_t __s1 = __p1; \ float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \ float16x4_t __s1 = __p1; \ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \ }) #else #define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \ float16x4_t __s1 = __p1; \ float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_f16_x2(__p0, __p1) __extension__ ({ \ float16x8x2_t __s1 = __p1; \ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 40); \ }) #else #define vst1q_f16_x2(__p0, __p1) __extension__ ({ \ float16x8x2_t __s1 = __p1; \ float16x8x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 40); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_f16_x2(__p0, __p1) __extension__ ({ \ float16x4x2_t __s1 = __p1; \ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 8); \ }) #else #define vst1_f16_x2(__p0, __p1) __extension__ ({ \ float16x4x2_t __s1 = __p1; \ float16x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 8); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_f16_x3(__p0, __p1) __extension__ ({ \ float16x8x3_t __s1 = __p1; \ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 40); \ }) #else #define vst1q_f16_x3(__p0, __p1) __extension__ ({ \ float16x8x3_t __s1 = __p1; \ float16x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 40); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_f16_x3(__p0, __p1) __extension__ ({ \ float16x4x3_t __s1 = __p1; \ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 8); \ }) #else #define vst1_f16_x3(__p0, __p1) __extension__ ({ \ float16x4x3_t __s1 = __p1; \ float16x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 8); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_f16_x4(__p0, __p1) __extension__ ({ \ float16x8x4_t __s1 = __p1; \ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 40); \ }) #else #define vst1q_f16_x4(__p0, __p1) __extension__ ({ \ float16x8x4_t __s1 = __p1; \ float16x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 40); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_f16_x4(__p0, __p1) __extension__ ({ \ float16x4x4_t __s1 = __p1; \ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 8); \ }) #else #define vst1_f16_x4(__p0, __p1) __extension__ ({ \ float16x4x4_t __s1 = __p1; \ float16x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 8); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2q_f16(__p0, __p1) __extension__ ({ \ float16x8x2_t __s1 = __p1; \ __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 40); \ }) #else #define vst2q_f16(__p0, __p1) __extension__ ({ \ float16x8x2_t __s1 = __p1; \ float16x8x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 40); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2_f16(__p0, __p1) __extension__ ({ \ float16x4x2_t __s1 = __p1; \ __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 8); \ }) #else #define vst2_f16(__p0, __p1) __extension__ ({ \ float16x4x2_t __s1 = __p1; \ float16x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 8); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ float16x8x2_t __s1 = __p1; \ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 40); \ }) #else #define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ float16x8x2_t __s1 = __p1; \ float16x8x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 40); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \ float16x4x2_t __s1 = __p1; \ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 8); \ }) #else #define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \ float16x4x2_t __s1 = __p1; \ float16x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 8); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3q_f16(__p0, __p1) __extension__ ({ \ float16x8x3_t __s1 = __p1; \ __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 40); \ }) #else #define vst3q_f16(__p0, __p1) __extension__ ({ \ float16x8x3_t __s1 = __p1; \ float16x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 40); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3_f16(__p0, __p1) __extension__ ({ \ float16x4x3_t __s1 = __p1; \ __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 8); \ }) #else #define vst3_f16(__p0, __p1) __extension__ ({ \ float16x4x3_t __s1 = __p1; \ float16x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 8); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ float16x8x3_t __s1 = __p1; \ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 40); \ }) #else #define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ float16x8x3_t __s1 = __p1; \ float16x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 40); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \ float16x4x3_t __s1 = __p1; \ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 8); \ }) #else #define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \ float16x4x3_t __s1 = __p1; \ float16x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 8); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4q_f16(__p0, __p1) __extension__ ({ \ float16x8x4_t __s1 = __p1; \ __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 40); \ }) #else #define vst4q_f16(__p0, __p1) __extension__ ({ \ float16x8x4_t __s1 = __p1; \ float16x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 40); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4_f16(__p0, __p1) __extension__ ({ \ float16x4x4_t __s1 = __p1; \ __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 8); \ }) #else #define vst4_f16(__p0, __p1) __extension__ ({ \ float16x4x4_t __s1 = __p1; \ float16x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 8); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ float16x8x4_t __s1 = __p1; \ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 40); \ }) #else #define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ float16x8x4_t __s1 = __p1; \ float16x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 40); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \ float16x4x4_t __s1 = __p1; \ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 8); \ }) #else #define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \ float16x4x4_t __s1 = __p1; \ float16x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 8); \ }) #endif #endif #if __ARM_ARCH >= 8 #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__p0, 34); return __ret; } #else __ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) { int32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__rev0, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vcvta_s32_f32(float32x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__p0, 2); return __ret; } #else __ai int32x2_t vcvta_s32_f32(float32x2_t __p0) { int32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__rev0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__p0, 50); return __ret; } #else __ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__rev0, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__p0, 18); return __ret; } #else __ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__rev0, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__p0, 34); return __ret; } #else __ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) { int32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__rev0, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__p0, 2); return __ret; } #else __ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) { int32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__rev0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__p0, 50); return __ret; } #else __ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__rev0, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__p0, 18); return __ret; } #else __ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__rev0, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__p0, 34); return __ret; } #else __ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) { int32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__rev0, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__p0, 2); return __ret; } #else __ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) { int32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__rev0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__p0, 50); return __ret; } #else __ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__rev0, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__p0, 18); return __ret; } #else __ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__rev0, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__p0, 34); return __ret; } #else __ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) { int32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__rev0, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__p0, 2); return __ret; } #else __ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) { int32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__rev0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__p0, 50); return __ret; } #else __ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__rev0, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__p0, 18); return __ret; } #else __ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__rev0, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #endif #if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_AES) #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vaesdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else __ai uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vaesdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vaeseq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else __ai uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vaeseq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vaesimcq_u8(uint8x16_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vaesimcq_v((int8x16_t)__p0, 48); return __ret; } #else __ai uint8x16_t vaesimcq_u8(uint8x16_t __p0) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vaesimcq_v((int8x16_t)__rev0, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vaesmcq_u8(uint8x16_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vaesmcq_v((int8x16_t)__p0, 48); return __ret; } #else __ai uint8x16_t vaesmcq_u8(uint8x16_t __p0) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vaesmcq_v((int8x16_t)__rev0, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #endif #if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING) #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vrndq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 41); return __ret; } #else __ai float32x4_t vrndq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vrnd_f32(float32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 9); return __ret; } #else __ai float32x2_t vrnd_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vrndaq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 41); return __ret; } #else __ai float32x4_t vrndaq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vrnda_f32(float32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 9); return __ret; } #else __ai float32x2_t vrnda_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vrndiq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 41); return __ret; } #else __ai float32x4_t vrndiq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vrndi_f32(float32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 9); return __ret; } #else __ai float32x2_t vrndi_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vrndmq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 41); return __ret; } #else __ai float32x4_t vrndmq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vrndm_f32(float32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 9); return __ret; } #else __ai float32x2_t vrndm_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vrndnq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 41); return __ret; } #else __ai float32x4_t vrndnq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vrndn_f32(float32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 9); return __ret; } #else __ai float32x2_t vrndn_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai float32_t vrndns_f32(float32_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vrndns_f32(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vrndpq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 41); return __ret; } #else __ai float32x4_t vrndpq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vrndp_f32(float32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 9); return __ret; } #else __ai float32x2_t vrndp_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vrndxq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 41); return __ret; } #else __ai float32x4_t vrndxq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vrndx_f32(float32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 9); return __ret; } #else __ai float32x2_t vrndx_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #endif #if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vrndq_f16(float16x8_t __p0) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 40); return __ret; } #else __ai float16x8_t vrndq_f16(float16x8_t __p0) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vrnd_f16(float16x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 8); return __ret; } #else __ai float16x4_t vrnd_f16(float16x4_t __p0) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vrndaq_f16(float16x8_t __p0) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 40); return __ret; } #else __ai float16x8_t vrndaq_f16(float16x8_t __p0) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vrnda_f16(float16x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 8); return __ret; } #else __ai float16x4_t vrnda_f16(float16x4_t __p0) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vrndmq_f16(float16x8_t __p0) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 40); return __ret; } #else __ai float16x8_t vrndmq_f16(float16x8_t __p0) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vrndm_f16(float16x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 8); return __ret; } #else __ai float16x4_t vrndm_f16(float16x4_t __p0) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vrndnq_f16(float16x8_t __p0) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 40); return __ret; } #else __ai float16x8_t vrndnq_f16(float16x8_t __p0) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vrndn_f16(float16x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 8); return __ret; } #else __ai float16x4_t vrndn_f16(float16x4_t __p0) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vrndpq_f16(float16x8_t __p0) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 40); return __ret; } #else __ai float16x8_t vrndpq_f16(float16x8_t __p0) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vrndp_f16(float16x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 8); return __ret; } #else __ai float16x4_t vrndp_f16(float16x4_t __p0) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vrndxq_f16(float16x8_t __p0) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 40); return __ret; } #else __ai float16x8_t vrndxq_f16(float16x8_t __p0) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vrndx_f16(float16x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 8); return __ret; } #else __ai float16x4_t vrndx_f16(float16x4_t __p0) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #endif #if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN) #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else __ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else __ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else __ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else __ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #endif #if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else __ai float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else __ai float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else __ai float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else __ai float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #endif #if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_SHA2) #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32(__p0, __p1, __p2); return __ret; } #else __ai uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32(__rev0, __p1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif __ai uint32_t vsha1h_u32(uint32_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vsha1h_u32(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32(__p0, __p1, __p2); return __ret; } #else __ai uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32(__rev0, __p1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32(__p0, __p1, __p2); return __ret; } #else __ai uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32(__rev0, __p1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsha1su0q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #else __ai uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vsha1su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsha1su1q_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else __ai uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vsha1su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsha256hq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #else __ai uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vsha256hq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsha256h2q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #else __ai uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vsha256h2q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsha256su0q_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else __ai uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vsha256su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsha256su1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #else __ai uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vsha256su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #endif #if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_SHA3) && defined(__aarch64__) #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vbcaxq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48); return __ret; } #else __ai uint8x16_t vbcaxq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vbcaxq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #else __ai uint32x4_t vbcaxq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vbcaxq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); return __ret; } #else __ai uint64x2_t vbcaxq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = (uint64x2_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vbcaxq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49); return __ret; } #else __ai uint16x8_t vbcaxq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vbcaxq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32); return __ret; } #else __ai int8x16_t vbcaxq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vbcaxq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); return __ret; } #else __ai int32x4_t vbcaxq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vbcaxq_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35); return __ret; } #else __ai int64x2_t vbcaxq_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = (int64x2_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vbcaxq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); return __ret; } #else __ai int16x8_t vbcaxq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t veor3q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48); return __ret; } #else __ai uint8x16_t veor3q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t veor3q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #else __ai uint32x4_t veor3q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t veor3q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); return __ret; } #else __ai uint64x2_t veor3q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = (uint64x2_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t veor3q_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49); return __ret; } #else __ai uint16x8_t veor3q_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t veor3q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32); return __ret; } #else __ai int8x16_t veor3q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t veor3q_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); return __ret; } #else __ai int32x4_t veor3q_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t veor3q_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35); return __ret; } #else __ai int64x2_t veor3q_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = (int64x2_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t veor3q_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); return __ret; } #else __ai int16x8_t veor3q_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vrax1q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vrax1q_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else __ai uint64x2_t vrax1q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t) __builtin_neon_vrax1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vxarq_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2_t __ret; \ uint64x2_t __s0 = __p0; \ uint64x2_t __s1 = __p1; \ __ret = (uint64x2_t) __builtin_neon_vxarq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ __ret; \ }) #else #define vxarq_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2_t __ret; \ uint64x2_t __s0 = __p0; \ uint64x2_t __s1 = __p1; \ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (uint64x2_t) __builtin_neon_vxarq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #endif #if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_SHA512) && defined(__aarch64__) #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vsha512hq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vsha512hq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); return __ret; } #else __ai uint64x2_t vsha512hq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = (uint64x2_t) __builtin_neon_vsha512hq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vsha512h2q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vsha512h2q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); return __ret; } #else __ai uint64x2_t vsha512h2q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = (uint64x2_t) __builtin_neon_vsha512h2q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vsha512su0q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vsha512su0q_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else __ai uint64x2_t vsha512su0q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t) __builtin_neon_vsha512su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vsha512su1q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vsha512su1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); return __ret; } #else __ai uint64x2_t vsha512su1q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = (uint64x2_t) __builtin_neon_vsha512su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #endif #if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_SM3) && defined(__aarch64__) #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vsm3partw1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsm3partw1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #else __ai uint32x4_t vsm3partw1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vsm3partw1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vsm3partw2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsm3partw2q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #else __ai uint32x4_t vsm3partw2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vsm3partw2q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vsm3ss1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsm3ss1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #else __ai uint32x4_t vsm3ss1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vsm3ss1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vsm3tt1aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __s1 = __p1; \ uint32x4_t __s2 = __p2; \ __ret = (uint32x4_t) __builtin_neon_vsm3tt1aq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \ __ret; \ }) #else #define vsm3tt1aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __s1 = __p1; \ uint32x4_t __s2 = __p2; \ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ __ret = (uint32x4_t) __builtin_neon_vsm3tt1aq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsm3tt1bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __s1 = __p1; \ uint32x4_t __s2 = __p2; \ __ret = (uint32x4_t) __builtin_neon_vsm3tt1bq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \ __ret; \ }) #else #define vsm3tt1bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __s1 = __p1; \ uint32x4_t __s2 = __p2; \ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ __ret = (uint32x4_t) __builtin_neon_vsm3tt1bq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsm3tt2aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __s1 = __p1; \ uint32x4_t __s2 = __p2; \ __ret = (uint32x4_t) __builtin_neon_vsm3tt2aq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \ __ret; \ }) #else #define vsm3tt2aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __s1 = __p1; \ uint32x4_t __s2 = __p2; \ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ __ret = (uint32x4_t) __builtin_neon_vsm3tt2aq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsm3tt2bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __s1 = __p1; \ uint32x4_t __s2 = __p2; \ __ret = (uint32x4_t) __builtin_neon_vsm3tt2bq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \ __ret; \ }) #else #define vsm3tt2bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __s1 = __p1; \ uint32x4_t __s2 = __p2; \ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ __ret = (uint32x4_t) __builtin_neon_vsm3tt2bq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #endif #if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_SM4) && defined(__aarch64__) #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vsm4eq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsm4eq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else __ai uint32x4_t vsm4eq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vsm4eq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vsm4ekeyq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsm4ekeyq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else __ai uint32x4_t vsm4ekeyq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vsm4ekeyq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #endif #if __ARM_ARCH >= 8 && defined(__aarch64__) #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__p0, 35); return __ret; } #else __ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) { int64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__rev0, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai int64x1_t vcvta_s64_f64(float64x1_t __p0) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vcvta_s64_v((int8x8_t)__p0, 3); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__p0, 51); return __ret; } #else __ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__rev0, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vcvta_u64_f64(float64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vcvta_u64_v((int8x8_t)__p0, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__p0, 35); return __ret; } #else __ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) { int64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__rev0, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai int64x1_t vcvtm_s64_f64(float64x1_t __p0) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vcvtm_s64_v((int8x8_t)__p0, 3); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__p0, 51); return __ret; } #else __ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__rev0, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vcvtm_u64_f64(float64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vcvtm_u64_v((int8x8_t)__p0, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__p0, 35); return __ret; } #else __ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) { int64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__rev0, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai int64x1_t vcvtn_s64_f64(float64x1_t __p0) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vcvtn_s64_v((int8x8_t)__p0, 3); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__p0, 51); return __ret; } #else __ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__rev0, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vcvtn_u64_f64(float64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vcvtn_u64_v((int8x8_t)__p0, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__p0, 35); return __ret; } #else __ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) { int64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__rev0, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai int64x1_t vcvtp_s64_f64(float64x1_t __p0) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vcvtp_s64_v((int8x8_t)__p0, 3); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__p0, 51); return __ret; } #else __ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__rev0, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vcvtp_u64_f64(float64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vcvtp_u64_v((int8x8_t)__p0, 19); return __ret; } __ai poly8x8_t vreinterpret_p8_p64(poly64x1_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } __ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } __ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } __ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } __ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } __ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } __ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } __ai poly8x8_t vreinterpret_p8_f64(float64x1_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } __ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } __ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } __ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } __ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } __ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } __ai poly64x1_t vreinterpret_p64_p8(poly8x8_t __p0) { poly64x1_t __ret; __ret = (poly64x1_t)(__p0); return __ret; } __ai poly64x1_t vreinterpret_p64_p16(poly16x4_t __p0) { poly64x1_t __ret; __ret = (poly64x1_t)(__p0); return __ret; } __ai poly64x1_t vreinterpret_p64_u8(uint8x8_t __p0) { poly64x1_t __ret; __ret = (poly64x1_t)(__p0); return __ret; } __ai poly64x1_t vreinterpret_p64_u32(uint32x2_t __p0) { poly64x1_t __ret; __ret = (poly64x1_t)(__p0); return __ret; } __ai poly64x1_t vreinterpret_p64_u64(uint64x1_t __p0) { poly64x1_t __ret; __ret = (poly64x1_t)(__p0); return __ret; } __ai poly64x1_t vreinterpret_p64_u16(uint16x4_t __p0) { poly64x1_t __ret; __ret = (poly64x1_t)(__p0); return __ret; } __ai poly64x1_t vreinterpret_p64_s8(int8x8_t __p0) { poly64x1_t __ret; __ret = (poly64x1_t)(__p0); return __ret; } __ai poly64x1_t vreinterpret_p64_f64(float64x1_t __p0) { poly64x1_t __ret; __ret = (poly64x1_t)(__p0); return __ret; } __ai poly64x1_t vreinterpret_p64_f32(float32x2_t __p0) { poly64x1_t __ret; __ret = (poly64x1_t)(__p0); return __ret; } __ai poly64x1_t vreinterpret_p64_f16(float16x4_t __p0) { poly64x1_t __ret; __ret = (poly64x1_t)(__p0); return __ret; } __ai poly64x1_t vreinterpret_p64_s32(int32x2_t __p0) { poly64x1_t __ret; __ret = (poly64x1_t)(__p0); return __ret; } __ai poly64x1_t vreinterpret_p64_s64(int64x1_t __p0) { poly64x1_t __ret; __ret = (poly64x1_t)(__p0); return __ret; } __ai poly64x1_t vreinterpret_p64_s16(int16x4_t __p0) { poly64x1_t __ret; __ret = (poly64x1_t)(__p0); return __ret; } __ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } __ai poly16x4_t vreinterpret_p16_p64(poly64x1_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } __ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } __ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } __ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } __ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } __ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } __ai poly16x4_t vreinterpret_p16_f64(float64x1_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } __ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } __ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } __ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } __ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } __ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } __ai poly8x16_t vreinterpretq_p8_p128(poly128_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } __ai poly8x16_t vreinterpretq_p8_p64(poly64x2_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } __ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } __ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } __ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } __ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } __ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } __ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } __ai poly8x16_t vreinterpretq_p8_f64(float64x2_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } __ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } __ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } __ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } __ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } __ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } __ai poly128_t vreinterpretq_p128_p8(poly8x16_t __p0) { poly128_t __ret; __ret = (poly128_t)(__p0); return __ret; } __ai poly128_t vreinterpretq_p128_p64(poly64x2_t __p0) { poly128_t __ret; __ret = (poly128_t)(__p0); return __ret; } __ai poly128_t vreinterpretq_p128_p16(poly16x8_t __p0) { poly128_t __ret; __ret = (poly128_t)(__p0); return __ret; } __ai poly128_t vreinterpretq_p128_u8(uint8x16_t __p0) { poly128_t __ret; __ret = (poly128_t)(__p0); return __ret; } __ai poly128_t vreinterpretq_p128_u32(uint32x4_t __p0) { poly128_t __ret; __ret = (poly128_t)(__p0); return __ret; } __ai poly128_t vreinterpretq_p128_u64(uint64x2_t __p0) { poly128_t __ret; __ret = (poly128_t)(__p0); return __ret; } __ai poly128_t vreinterpretq_p128_u16(uint16x8_t __p0) { poly128_t __ret; __ret = (poly128_t)(__p0); return __ret; } __ai poly128_t vreinterpretq_p128_s8(int8x16_t __p0) { poly128_t __ret; __ret = (poly128_t)(__p0); return __ret; } __ai poly128_t vreinterpretq_p128_f64(float64x2_t __p0) { poly128_t __ret; __ret = (poly128_t)(__p0); return __ret; } __ai poly128_t vreinterpretq_p128_f32(float32x4_t __p0) { poly128_t __ret; __ret = (poly128_t)(__p0); return __ret; } __ai poly128_t vreinterpretq_p128_f16(float16x8_t __p0) { poly128_t __ret; __ret = (poly128_t)(__p0); return __ret; } __ai poly128_t vreinterpretq_p128_s32(int32x4_t __p0) { poly128_t __ret; __ret = (poly128_t)(__p0); return __ret; } __ai poly128_t vreinterpretq_p128_s64(int64x2_t __p0) { poly128_t __ret; __ret = (poly128_t)(__p0); return __ret; } __ai poly128_t vreinterpretq_p128_s16(int16x8_t __p0) { poly128_t __ret; __ret = (poly128_t)(__p0); return __ret; } __ai poly64x2_t vreinterpretq_p64_p8(poly8x16_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t)(__p0); return __ret; } __ai poly64x2_t vreinterpretq_p64_p128(poly128_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t)(__p0); return __ret; } __ai poly64x2_t vreinterpretq_p64_p16(poly16x8_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t)(__p0); return __ret; } __ai poly64x2_t vreinterpretq_p64_u8(uint8x16_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t)(__p0); return __ret; } __ai poly64x2_t vreinterpretq_p64_u32(uint32x4_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t)(__p0); return __ret; } __ai poly64x2_t vreinterpretq_p64_u64(uint64x2_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t)(__p0); return __ret; } __ai poly64x2_t vreinterpretq_p64_u16(uint16x8_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t)(__p0); return __ret; } __ai poly64x2_t vreinterpretq_p64_s8(int8x16_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t)(__p0); return __ret; } __ai poly64x2_t vreinterpretq_p64_f64(float64x2_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t)(__p0); return __ret; } __ai poly64x2_t vreinterpretq_p64_f32(float32x4_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t)(__p0); return __ret; } __ai poly64x2_t vreinterpretq_p64_f16(float16x8_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t)(__p0); return __ret; } __ai poly64x2_t vreinterpretq_p64_s32(int32x4_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t)(__p0); return __ret; } __ai poly64x2_t vreinterpretq_p64_s64(int64x2_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t)(__p0); return __ret; } __ai poly64x2_t vreinterpretq_p64_s16(int16x8_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t)(__p0); return __ret; } __ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } __ai poly16x8_t vreinterpretq_p16_p128(poly128_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } __ai poly16x8_t vreinterpretq_p16_p64(poly64x2_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } __ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } __ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } __ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } __ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } __ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } __ai poly16x8_t vreinterpretq_p16_f64(float64x2_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } __ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } __ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } __ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } __ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } __ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } __ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } __ai uint8x16_t vreinterpretq_u8_p128(poly128_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } __ai uint8x16_t vreinterpretq_u8_p64(poly64x2_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } __ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } __ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } __ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } __ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } __ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } __ai uint8x16_t vreinterpretq_u8_f64(float64x2_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } __ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } __ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } __ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } __ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } __ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } __ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } __ai uint32x4_t vreinterpretq_u32_p128(poly128_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } __ai uint32x4_t vreinterpretq_u32_p64(poly64x2_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } __ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } __ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } __ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } __ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } __ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } __ai uint32x4_t vreinterpretq_u32_f64(float64x2_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } __ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } __ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } __ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } __ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } __ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } __ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } __ai uint64x2_t vreinterpretq_u64_p128(poly128_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } __ai uint64x2_t vreinterpretq_u64_p64(poly64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } __ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } __ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } __ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } __ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } __ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } __ai uint64x2_t vreinterpretq_u64_f64(float64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } __ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } __ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } __ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } __ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } __ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } __ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } __ai uint16x8_t vreinterpretq_u16_p128(poly128_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } __ai uint16x8_t vreinterpretq_u16_p64(poly64x2_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } __ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } __ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } __ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } __ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } __ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } __ai uint16x8_t vreinterpretq_u16_f64(float64x2_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } __ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } __ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } __ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } __ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } __ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } __ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } __ai int8x16_t vreinterpretq_s8_p128(poly128_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } __ai int8x16_t vreinterpretq_s8_p64(poly64x2_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } __ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } __ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } __ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } __ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } __ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } __ai int8x16_t vreinterpretq_s8_f64(float64x2_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } __ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } __ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } __ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } __ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } __ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } __ai float64x2_t vreinterpretq_f64_p8(poly8x16_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } __ai float64x2_t vreinterpretq_f64_p128(poly128_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } __ai float64x2_t vreinterpretq_f64_p64(poly64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } __ai float64x2_t vreinterpretq_f64_p16(poly16x8_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } __ai float64x2_t vreinterpretq_f64_u8(uint8x16_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } __ai float64x2_t vreinterpretq_f64_u32(uint32x4_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } __ai float64x2_t vreinterpretq_f64_u64(uint64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } __ai float64x2_t vreinterpretq_f64_u16(uint16x8_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } __ai float64x2_t vreinterpretq_f64_s8(int8x16_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } __ai float64x2_t vreinterpretq_f64_f32(float32x4_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } __ai float64x2_t vreinterpretq_f64_f16(float16x8_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } __ai float64x2_t vreinterpretq_f64_s32(int32x4_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } __ai float64x2_t vreinterpretq_f64_s64(int64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } __ai float64x2_t vreinterpretq_f64_s16(int16x8_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } __ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } __ai float32x4_t vreinterpretq_f32_p128(poly128_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } __ai float32x4_t vreinterpretq_f32_p64(poly64x2_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } __ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } __ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } __ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } __ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } __ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } __ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } __ai float32x4_t vreinterpretq_f32_f64(float64x2_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } __ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } __ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } __ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } __ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } __ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } __ai float16x8_t vreinterpretq_f16_p128(poly128_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } __ai float16x8_t vreinterpretq_f16_p64(poly64x2_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } __ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } __ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } __ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } __ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } __ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } __ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } __ai float16x8_t vreinterpretq_f16_f64(float64x2_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } __ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } __ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } __ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } __ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } __ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } __ai int32x4_t vreinterpretq_s32_p128(poly128_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } __ai int32x4_t vreinterpretq_s32_p64(poly64x2_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } __ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } __ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } __ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } __ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } __ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } __ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } __ai int32x4_t vreinterpretq_s32_f64(float64x2_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } __ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } __ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } __ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } __ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } __ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } __ai int64x2_t vreinterpretq_s64_p128(poly128_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } __ai int64x2_t vreinterpretq_s64_p64(poly64x2_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } __ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } __ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } __ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } __ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } __ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } __ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } __ai int64x2_t vreinterpretq_s64_f64(float64x2_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } __ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } __ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } __ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } __ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } __ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } __ai int16x8_t vreinterpretq_s16_p128(poly128_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } __ai int16x8_t vreinterpretq_s16_p64(poly64x2_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } __ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } __ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } __ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } __ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } __ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } __ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } __ai int16x8_t vreinterpretq_s16_f64(float64x2_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } __ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } __ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } __ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } __ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } __ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } __ai uint8x8_t vreinterpret_u8_p64(poly64x1_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } __ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } __ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } __ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } __ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } __ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } __ai uint8x8_t vreinterpret_u8_f64(float64x1_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } __ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } __ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } __ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } __ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } __ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } __ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } __ai uint32x2_t vreinterpret_u32_p64(poly64x1_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } __ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } __ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } __ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } __ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } __ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } __ai uint32x2_t vreinterpret_u32_f64(float64x1_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } __ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } __ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } __ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } __ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } __ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } __ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } __ai uint64x1_t vreinterpret_u64_p64(poly64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } __ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } __ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } __ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } __ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } __ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } __ai uint64x1_t vreinterpret_u64_f64(float64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } __ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } __ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } __ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } __ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } __ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } __ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } __ai uint16x4_t vreinterpret_u16_p64(poly64x1_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } __ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } __ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } __ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } __ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } __ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } __ai uint16x4_t vreinterpret_u16_f64(float64x1_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } __ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } __ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } __ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } __ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } __ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } __ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } __ai int8x8_t vreinterpret_s8_p64(poly64x1_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } __ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } __ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } __ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } __ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } __ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } __ai int8x8_t vreinterpret_s8_f64(float64x1_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } __ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } __ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } __ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } __ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } __ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } __ai float64x1_t vreinterpret_f64_p8(poly8x8_t __p0) { float64x1_t __ret; __ret = (float64x1_t)(__p0); return __ret; } __ai float64x1_t vreinterpret_f64_p64(poly64x1_t __p0) { float64x1_t __ret; __ret = (float64x1_t)(__p0); return __ret; } __ai float64x1_t vreinterpret_f64_p16(poly16x4_t __p0) { float64x1_t __ret; __ret = (float64x1_t)(__p0); return __ret; } __ai float64x1_t vreinterpret_f64_u8(uint8x8_t __p0) { float64x1_t __ret; __ret = (float64x1_t)(__p0); return __ret; } __ai float64x1_t vreinterpret_f64_u32(uint32x2_t __p0) { float64x1_t __ret; __ret = (float64x1_t)(__p0); return __ret; } __ai float64x1_t vreinterpret_f64_u64(uint64x1_t __p0) { float64x1_t __ret; __ret = (float64x1_t)(__p0); return __ret; } __ai float64x1_t vreinterpret_f64_u16(uint16x4_t __p0) { float64x1_t __ret; __ret = (float64x1_t)(__p0); return __ret; } __ai float64x1_t vreinterpret_f64_s8(int8x8_t __p0) { float64x1_t __ret; __ret = (float64x1_t)(__p0); return __ret; } __ai float64x1_t vreinterpret_f64_f32(float32x2_t __p0) { float64x1_t __ret; __ret = (float64x1_t)(__p0); return __ret; } __ai float64x1_t vreinterpret_f64_f16(float16x4_t __p0) { float64x1_t __ret; __ret = (float64x1_t)(__p0); return __ret; } __ai float64x1_t vreinterpret_f64_s32(int32x2_t __p0) { float64x1_t __ret; __ret = (float64x1_t)(__p0); return __ret; } __ai float64x1_t vreinterpret_f64_s64(int64x1_t __p0) { float64x1_t __ret; __ret = (float64x1_t)(__p0); return __ret; } __ai float64x1_t vreinterpret_f64_s16(int16x4_t __p0) { float64x1_t __ret; __ret = (float64x1_t)(__p0); return __ret; } __ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } __ai float32x2_t vreinterpret_f32_p64(poly64x1_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } __ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } __ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } __ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } __ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } __ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } __ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } __ai float32x2_t vreinterpret_f32_f64(float64x1_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } __ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } __ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } __ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } __ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } __ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } __ai float16x4_t vreinterpret_f16_p64(poly64x1_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } __ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } __ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } __ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } __ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } __ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } __ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } __ai float16x4_t vreinterpret_f16_f64(float64x1_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } __ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } __ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } __ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } __ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } __ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } __ai int32x2_t vreinterpret_s32_p64(poly64x1_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } __ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } __ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } __ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } __ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } __ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } __ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } __ai int32x2_t vreinterpret_s32_f64(float64x1_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } __ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } __ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } __ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } __ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } __ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } __ai int64x1_t vreinterpret_s64_p64(poly64x1_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } __ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } __ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } __ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } __ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } __ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } __ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } __ai int64x1_t vreinterpret_s64_f64(float64x1_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } __ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } __ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } __ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } __ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } __ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } __ai int16x4_t vreinterpret_s16_p64(poly64x1_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } __ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } __ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } __ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } __ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } __ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } __ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } __ai int16x4_t vreinterpret_s16_f64(float64x1_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } __ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } __ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } __ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } __ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } #endif #if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_DIRECTED_ROUNDING) #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vrndq_f64(float64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 42); return __ret; } #else __ai float64x2_t vrndq_f64(float64x2_t __p0) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai float64x1_t vrnd_f64(float64x1_t __p0) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 10); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vrndaq_f64(float64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 42); return __ret; } #else __ai float64x2_t vrndaq_f64(float64x2_t __p0) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai float64x1_t vrnda_f64(float64x1_t __p0) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 10); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vrndiq_f64(float64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 42); return __ret; } #else __ai float64x2_t vrndiq_f64(float64x2_t __p0) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai float64x1_t vrndi_f64(float64x1_t __p0) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 10); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vrndmq_f64(float64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 42); return __ret; } #else __ai float64x2_t vrndmq_f64(float64x2_t __p0) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai float64x1_t vrndm_f64(float64x1_t __p0) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 10); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vrndnq_f64(float64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 42); return __ret; } #else __ai float64x2_t vrndnq_f64(float64x2_t __p0) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai float64x1_t vrndn_f64(float64x1_t __p0) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 10); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vrndpq_f64(float64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 42); return __ret; } #else __ai float64x2_t vrndpq_f64(float64x2_t __p0) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai float64x1_t vrndp_f64(float64x1_t __p0) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 10); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vrndxq_f64(float64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 42); return __ret; } #else __ai float64x2_t vrndxq_f64(float64x2_t __p0) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai float64x1_t vrndx_f64(float64x1_t __p0) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 10); return __ret; } #endif #if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_FRINT) #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vrnd32xq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vrnd32xq_v((int8x16_t)__p0, 41); return __ret; } #else __ai float32x4_t vrnd32xq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vrnd32xq_v((int8x16_t)__rev0, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vrnd32x_f32(float32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vrnd32x_v((int8x8_t)__p0, 9); return __ret; } #else __ai float32x2_t vrnd32x_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vrnd32x_v((int8x8_t)__rev0, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vrnd32zq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vrnd32zq_v((int8x16_t)__p0, 41); return __ret; } #else __ai float32x4_t vrnd32zq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vrnd32zq_v((int8x16_t)__rev0, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vrnd32z_f32(float32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vrnd32z_v((int8x8_t)__p0, 9); return __ret; } #else __ai float32x2_t vrnd32z_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vrnd32z_v((int8x8_t)__rev0, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vrnd64xq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vrnd64xq_v((int8x16_t)__p0, 41); return __ret; } #else __ai float32x4_t vrnd64xq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vrnd64xq_v((int8x16_t)__rev0, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vrnd64x_f32(float32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vrnd64x_v((int8x8_t)__p0, 9); return __ret; } #else __ai float32x2_t vrnd64x_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vrnd64x_v((int8x8_t)__rev0, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vrnd64zq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vrnd64zq_v((int8x16_t)__p0, 41); return __ret; } #else __ai float32x4_t vrnd64zq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vrnd64zq_v((int8x16_t)__rev0, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vrnd64z_f32(float32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vrnd64z_v((int8x8_t)__p0, 9); return __ret; } #else __ai float32x2_t vrnd64z_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vrnd64z_v((int8x8_t)__rev0, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #endif #if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_NUMERIC_MAXMIN) #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #else __ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai float64x1_t vmaxnm_f64(float64x1_t __p0, float64x1_t __p1) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #else __ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai float64x1_t vminnm_f64(float64x1_t __p0, float64x1_t __p1) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10); return __ret; } #endif #if defined(__ARM_FEATURE_BF16) && !defined(__aarch64__) __ai poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } __ai poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) { poly64x1_t __ret; __ret = (poly64x1_t)(__p0); return __ret; } __ai poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } __ai poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } __ai poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t)(__p0); return __ret; } __ai poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } __ai uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } __ai uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } __ai uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } __ai uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } __ai int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } __ai float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } __ai float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } __ai int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } __ai int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } __ai int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } __ai uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } __ai uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } __ai uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } __ai uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } __ai int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } __ai float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } __ai float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } __ai int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } __ai int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } __ai int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } __ai bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } __ai bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } __ai bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } __ai bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } __ai bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } __ai bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } __ai bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } __ai bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } __ai bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } __ai bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } __ai bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } __ai bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } __ai bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } __ai bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } __ai bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } __ai bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } __ai bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } __ai bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } __ai bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } __ai bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } __ai bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } __ai bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } __ai bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } __ai bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } __ai bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } __ai bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } #endif #if defined(__ARM_FEATURE_BF16) && defined(__aarch64__) __ai poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } __ai poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) { poly64x1_t __ret; __ret = (poly64x1_t)(__p0); return __ret; } __ai poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } __ai poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } __ai poly128_t vreinterpretq_p128_bf16(bfloat16x8_t __p0) { poly128_t __ret; __ret = (poly128_t)(__p0); return __ret; } __ai poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t)(__p0); return __ret; } __ai poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } __ai uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } __ai uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } __ai uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } __ai uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } __ai int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } __ai float64x2_t vreinterpretq_f64_bf16(bfloat16x8_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } __ai float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } __ai float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } __ai int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } __ai int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } __ai int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } __ai uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } __ai uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } __ai uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } __ai uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } __ai int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } __ai float64x1_t vreinterpret_f64_bf16(bfloat16x4_t __p0) { float64x1_t __ret; __ret = (float64x1_t)(__p0); return __ret; } __ai float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } __ai float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } __ai int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } __ai int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } __ai int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } __ai bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } __ai bfloat16x8_t vreinterpretq_bf16_p128(poly128_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } __ai bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } __ai bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } __ai bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } __ai bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } __ai bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } __ai bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } __ai bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } __ai bfloat16x8_t vreinterpretq_bf16_f64(float64x2_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } __ai bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } __ai bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } __ai bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } __ai bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } __ai bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } __ai bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } __ai bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } __ai bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } __ai bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } __ai bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } __ai bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } __ai bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } __ai bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } __ai bfloat16x4_t vreinterpret_bf16_f64(float64x1_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } __ai bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } __ai bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } __ai bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } __ai bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } __ai bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } #endif #if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) #ifdef __LITTLE_ENDIAN__ #define splatq_lane_bf16(__p0, __p1) __extension__ ({ \ bfloat16x8_t __ret; \ bfloat16x4_t __s0 = __p0; \ __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 11); \ __ret; \ }) #else #define splatq_lane_bf16(__p0, __p1) __extension__ ({ \ bfloat16x8_t __ret; \ bfloat16x4_t __s0 = __p0; \ bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 11); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splatq_lane_bf16(__p0, __p1) __extension__ ({ \ bfloat16x8_t __ret; \ bfloat16x4_t __s0 = __p0; \ __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 11); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splat_lane_bf16(__p0, __p1) __extension__ ({ \ bfloat16x4_t __ret; \ bfloat16x4_t __s0 = __p0; \ __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 11); \ __ret; \ }) #else #define splat_lane_bf16(__p0, __p1) __extension__ ({ \ bfloat16x4_t __ret; \ bfloat16x4_t __s0 = __p0; \ bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 11); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splat_lane_bf16(__p0, __p1) __extension__ ({ \ bfloat16x4_t __ret; \ bfloat16x4_t __s0 = __p0; \ __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 11); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \ bfloat16x8_t __ret; \ bfloat16x8_t __s0 = __p0; \ __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 43); \ __ret; \ }) #else #define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \ bfloat16x8_t __ret; \ bfloat16x8_t __s0 = __p0; \ bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 43); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splatq_laneq_bf16(__p0, __p1) __extension__ ({ \ bfloat16x8_t __ret; \ bfloat16x8_t __s0 = __p0; \ __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 43); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define splat_laneq_bf16(__p0, __p1) __extension__ ({ \ bfloat16x4_t __ret; \ bfloat16x8_t __s0 = __p0; \ __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 43); \ __ret; \ }) #else #define splat_laneq_bf16(__p0, __p1) __extension__ ({ \ bfloat16x4_t __ret; \ bfloat16x8_t __s0 = __p0; \ bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 43); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_splat_laneq_bf16(__p0, __p1) __extension__ ({ \ bfloat16x4_t __ret; \ bfloat16x8_t __s0 = __p0; \ __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 43); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #else __ai float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai float32x4_t __noswap_vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #else __ai float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); bfloat16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai float32x2_t __noswap_vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vbfdotq_lane_f32(__p0_142, __p1_142, __p2_142, __p3_142) __extension__ ({ \ float32x4_t __ret_142; \ float32x4_t __s0_142 = __p0_142; \ bfloat16x8_t __s1_142 = __p1_142; \ bfloat16x4_t __s2_142 = __p2_142; \ bfloat16x4_t __reint_142 = __s2_142; \ float32x4_t __reint1_142 = splatq_lane_f32(*(float32x2_t *) &__reint_142, __p3_142); \ __ret_142 = vbfdotq_f32(__s0_142, __s1_142, *(bfloat16x8_t *) &__reint1_142); \ __ret_142; \ }) #else #define vbfdotq_lane_f32(__p0_143, __p1_143, __p2_143, __p3_143) __extension__ ({ \ float32x4_t __ret_143; \ float32x4_t __s0_143 = __p0_143; \ bfloat16x8_t __s1_143 = __p1_143; \ bfloat16x4_t __s2_143 = __p2_143; \ float32x4_t __rev0_143; __rev0_143 = __builtin_shufflevector(__s0_143, __s0_143, 3, 2, 1, 0); \ bfloat16x8_t __rev1_143; __rev1_143 = __builtin_shufflevector(__s1_143, __s1_143, 7, 6, 5, 4, 3, 2, 1, 0); \ bfloat16x4_t __rev2_143; __rev2_143 = __builtin_shufflevector(__s2_143, __s2_143, 3, 2, 1, 0); \ bfloat16x4_t __reint_143 = __rev2_143; \ float32x4_t __reint1_143 = __noswap_splatq_lane_f32(*(float32x2_t *) &__reint_143, __p3_143); \ __ret_143 = __noswap_vbfdotq_f32(__rev0_143, __rev1_143, *(bfloat16x8_t *) &__reint1_143); \ __ret_143 = __builtin_shufflevector(__ret_143, __ret_143, 3, 2, 1, 0); \ __ret_143; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vbfdot_lane_f32(__p0_144, __p1_144, __p2_144, __p3_144) __extension__ ({ \ float32x2_t __ret_144; \ float32x2_t __s0_144 = __p0_144; \ bfloat16x4_t __s1_144 = __p1_144; \ bfloat16x4_t __s2_144 = __p2_144; \ bfloat16x4_t __reint_144 = __s2_144; \ float32x2_t __reint1_144 = splat_lane_f32(*(float32x2_t *) &__reint_144, __p3_144); \ __ret_144 = vbfdot_f32(__s0_144, __s1_144, *(bfloat16x4_t *) &__reint1_144); \ __ret_144; \ }) #else #define vbfdot_lane_f32(__p0_145, __p1_145, __p2_145, __p3_145) __extension__ ({ \ float32x2_t __ret_145; \ float32x2_t __s0_145 = __p0_145; \ bfloat16x4_t __s1_145 = __p1_145; \ bfloat16x4_t __s2_145 = __p2_145; \ float32x2_t __rev0_145; __rev0_145 = __builtin_shufflevector(__s0_145, __s0_145, 1, 0); \ bfloat16x4_t __rev1_145; __rev1_145 = __builtin_shufflevector(__s1_145, __s1_145, 3, 2, 1, 0); \ bfloat16x4_t __rev2_145; __rev2_145 = __builtin_shufflevector(__s2_145, __s2_145, 3, 2, 1, 0); \ bfloat16x4_t __reint_145 = __rev2_145; \ float32x2_t __reint1_145 = __noswap_splat_lane_f32(*(float32x2_t *) &__reint_145, __p3_145); \ __ret_145 = __noswap_vbfdot_f32(__rev0_145, __rev1_145, *(bfloat16x4_t *) &__reint1_145); \ __ret_145 = __builtin_shufflevector(__ret_145, __ret_145, 1, 0); \ __ret_145; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vbfdotq_laneq_f32(__p0_146, __p1_146, __p2_146, __p3_146) __extension__ ({ \ float32x4_t __ret_146; \ float32x4_t __s0_146 = __p0_146; \ bfloat16x8_t __s1_146 = __p1_146; \ bfloat16x8_t __s2_146 = __p2_146; \ bfloat16x8_t __reint_146 = __s2_146; \ float32x4_t __reint1_146 = splatq_laneq_f32(*(float32x4_t *) &__reint_146, __p3_146); \ __ret_146 = vbfdotq_f32(__s0_146, __s1_146, *(bfloat16x8_t *) &__reint1_146); \ __ret_146; \ }) #else #define vbfdotq_laneq_f32(__p0_147, __p1_147, __p2_147, __p3_147) __extension__ ({ \ float32x4_t __ret_147; \ float32x4_t __s0_147 = __p0_147; \ bfloat16x8_t __s1_147 = __p1_147; \ bfloat16x8_t __s2_147 = __p2_147; \ float32x4_t __rev0_147; __rev0_147 = __builtin_shufflevector(__s0_147, __s0_147, 3, 2, 1, 0); \ bfloat16x8_t __rev1_147; __rev1_147 = __builtin_shufflevector(__s1_147, __s1_147, 7, 6, 5, 4, 3, 2, 1, 0); \ bfloat16x8_t __rev2_147; __rev2_147 = __builtin_shufflevector(__s2_147, __s2_147, 7, 6, 5, 4, 3, 2, 1, 0); \ bfloat16x8_t __reint_147 = __rev2_147; \ float32x4_t __reint1_147 = __noswap_splatq_laneq_f32(*(float32x4_t *) &__reint_147, __p3_147); \ __ret_147 = __noswap_vbfdotq_f32(__rev0_147, __rev1_147, *(bfloat16x8_t *) &__reint1_147); \ __ret_147 = __builtin_shufflevector(__ret_147, __ret_147, 3, 2, 1, 0); \ __ret_147; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vbfdot_laneq_f32(__p0_148, __p1_148, __p2_148, __p3_148) __extension__ ({ \ float32x2_t __ret_148; \ float32x2_t __s0_148 = __p0_148; \ bfloat16x4_t __s1_148 = __p1_148; \ bfloat16x8_t __s2_148 = __p2_148; \ bfloat16x8_t __reint_148 = __s2_148; \ float32x2_t __reint1_148 = splat_laneq_f32(*(float32x4_t *) &__reint_148, __p3_148); \ __ret_148 = vbfdot_f32(__s0_148, __s1_148, *(bfloat16x4_t *) &__reint1_148); \ __ret_148; \ }) #else #define vbfdot_laneq_f32(__p0_149, __p1_149, __p2_149, __p3_149) __extension__ ({ \ float32x2_t __ret_149; \ float32x2_t __s0_149 = __p0_149; \ bfloat16x4_t __s1_149 = __p1_149; \ bfloat16x8_t __s2_149 = __p2_149; \ float32x2_t __rev0_149; __rev0_149 = __builtin_shufflevector(__s0_149, __s0_149, 1, 0); \ bfloat16x4_t __rev1_149; __rev1_149 = __builtin_shufflevector(__s1_149, __s1_149, 3, 2, 1, 0); \ bfloat16x8_t __rev2_149; __rev2_149 = __builtin_shufflevector(__s2_149, __s2_149, 7, 6, 5, 4, 3, 2, 1, 0); \ bfloat16x8_t __reint_149 = __rev2_149; \ float32x2_t __reint1_149 = __noswap_splat_laneq_f32(*(float32x4_t *) &__reint_149, __p3_149); \ __ret_149 = __noswap_vbfdot_f32(__rev0_149, __rev1_149, *(bfloat16x4_t *) &__reint1_149); \ __ret_149 = __builtin_shufflevector(__ret_149, __ret_149, 1, 0); \ __ret_149; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #else __ai float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai float32x4_t __noswap_vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #else __ai float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai float32x4_t __noswap_vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vbfmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #else __ai float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vbfmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) { bfloat16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); return __ret; } #else __ai bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) { bfloat16x8_t __ret; bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai bfloat16x8_t __noswap_vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) { bfloat16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); return __ret; } #endif #define vcreate_bf16(__p0) __extension__ ({ \ bfloat16x4_t __ret; \ uint64_t __promote = __p0; \ __ret = (bfloat16x4_t)(__promote); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_150) { float32x4_t __ret_150; bfloat16x4_t __reint_150 = __p0_150; int32x4_t __reint1_150 = vshll_n_s16(*(int16x4_t *) &__reint_150, 16); __ret_150 = *(float32x4_t *) &__reint1_150; return __ret_150; } #else __ai float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_151) { float32x4_t __ret_151; bfloat16x4_t __rev0_151; __rev0_151 = __builtin_shufflevector(__p0_151, __p0_151, 3, 2, 1, 0); bfloat16x4_t __reint_151 = __rev0_151; int32x4_t __reint1_151 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_151, 16); __ret_151 = *(float32x4_t *) &__reint1_151; __ret_151 = __builtin_shufflevector(__ret_151, __ret_151, 3, 2, 1, 0); return __ret_151; } __ai float32x4_t __noswap_vcvt_f32_bf16(bfloat16x4_t __p0_152) { float32x4_t __ret_152; bfloat16x4_t __reint_152 = __p0_152; int32x4_t __reint1_152 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_152, 16); __ret_152 = *(float32x4_t *) &__reint1_152; return __ret_152; } #endif __ai float32_t vcvtah_f32_bf16(bfloat16_t __p0) { float32_t __ret; bfloat16_t __reint = __p0; int32_t __reint1 = *(int32_t *) &__reint << 16; __ret = *(float32_t *) &__reint1; return __ret; } __ai bfloat16_t vcvth_bf16_f32(float32_t __p0) { bfloat16_t __ret; __ret = (bfloat16_t) __builtin_neon_vcvth_bf16_f32(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ #define vduph_lane_bf16(__p0, __p1) __extension__ ({ \ bfloat16_t __ret; \ bfloat16x4_t __s0 = __p0; \ __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__s0, __p1); \ __ret; \ }) #else #define vduph_lane_bf16(__p0, __p1) __extension__ ({ \ bfloat16_t __ret; \ bfloat16x4_t __s0 = __p0; \ bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__rev0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupq_lane_bf16(__p0_153, __p1_153) __extension__ ({ \ bfloat16x8_t __ret_153; \ bfloat16x4_t __s0_153 = __p0_153; \ __ret_153 = splatq_lane_bf16(__s0_153, __p1_153); \ __ret_153; \ }) #else #define vdupq_lane_bf16(__p0_154, __p1_154) __extension__ ({ \ bfloat16x8_t __ret_154; \ bfloat16x4_t __s0_154 = __p0_154; \ bfloat16x4_t __rev0_154; __rev0_154 = __builtin_shufflevector(__s0_154, __s0_154, 3, 2, 1, 0); \ __ret_154 = __noswap_splatq_lane_bf16(__rev0_154, __p1_154); \ __ret_154 = __builtin_shufflevector(__ret_154, __ret_154, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_154; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdup_lane_bf16(__p0_155, __p1_155) __extension__ ({ \ bfloat16x4_t __ret_155; \ bfloat16x4_t __s0_155 = __p0_155; \ __ret_155 = splat_lane_bf16(__s0_155, __p1_155); \ __ret_155; \ }) #else #define vdup_lane_bf16(__p0_156, __p1_156) __extension__ ({ \ bfloat16x4_t __ret_156; \ bfloat16x4_t __s0_156 = __p0_156; \ bfloat16x4_t __rev0_156; __rev0_156 = __builtin_shufflevector(__s0_156, __s0_156, 3, 2, 1, 0); \ __ret_156 = __noswap_splat_lane_bf16(__rev0_156, __p1_156); \ __ret_156 = __builtin_shufflevector(__ret_156, __ret_156, 3, 2, 1, 0); \ __ret_156; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \ bfloat16_t __ret; \ bfloat16x8_t __s0 = __p0; \ __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__s0, __p1); \ __ret; \ }) #else #define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \ bfloat16_t __ret; \ bfloat16x8_t __s0 = __p0; \ bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__rev0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupq_laneq_bf16(__p0_157, __p1_157) __extension__ ({ \ bfloat16x8_t __ret_157; \ bfloat16x8_t __s0_157 = __p0_157; \ __ret_157 = splatq_laneq_bf16(__s0_157, __p1_157); \ __ret_157; \ }) #else #define vdupq_laneq_bf16(__p0_158, __p1_158) __extension__ ({ \ bfloat16x8_t __ret_158; \ bfloat16x8_t __s0_158 = __p0_158; \ bfloat16x8_t __rev0_158; __rev0_158 = __builtin_shufflevector(__s0_158, __s0_158, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_158 = __noswap_splatq_laneq_bf16(__rev0_158, __p1_158); \ __ret_158 = __builtin_shufflevector(__ret_158, __ret_158, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_158; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdup_laneq_bf16(__p0_159, __p1_159) __extension__ ({ \ bfloat16x4_t __ret_159; \ bfloat16x8_t __s0_159 = __p0_159; \ __ret_159 = splat_laneq_bf16(__s0_159, __p1_159); \ __ret_159; \ }) #else #define vdup_laneq_bf16(__p0_160, __p1_160) __extension__ ({ \ bfloat16x4_t __ret_160; \ bfloat16x8_t __s0_160 = __p0_160; \ bfloat16x8_t __rev0_160; __rev0_160 = __builtin_shufflevector(__s0_160, __s0_160, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_160 = __noswap_splat_laneq_bf16(__rev0_160, __p1_160); \ __ret_160 = __builtin_shufflevector(__ret_160, __ret_160, 3, 2, 1, 0); \ __ret_160; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else __ai bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0}; return __ret; } #else __ai bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) { bfloat16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); return __ret; } #else __ai bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) { bfloat16x4_t __ret; bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai bfloat16x4_t __noswap_vget_high_bf16(bfloat16x8_t __p0) { bfloat16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \ bfloat16_t __ret; \ bfloat16x8_t __s0 = __p0; \ __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \ __ret; \ }) #else #define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \ bfloat16_t __ret; \ bfloat16x8_t __s0 = __p0; \ bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__rev0, __p1); \ __ret; \ }) #define __noswap_vgetq_lane_bf16(__p0, __p1) __extension__ ({ \ bfloat16_t __ret; \ bfloat16x8_t __s0 = __p0; \ __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vget_lane_bf16(__p0, __p1) __extension__ ({ \ bfloat16_t __ret; \ bfloat16x4_t __s0 = __p0; \ __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \ __ret; \ }) #else #define vget_lane_bf16(__p0, __p1) __extension__ ({ \ bfloat16_t __ret; \ bfloat16x4_t __s0 = __p0; \ bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__rev0, __p1); \ __ret; \ }) #define __noswap_vget_lane_bf16(__p0, __p1) __extension__ ({ \ bfloat16_t __ret; \ bfloat16x4_t __s0 = __p0; \ __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) { bfloat16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); return __ret; } #else __ai bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) { bfloat16x4_t __ret; bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai bfloat16x4_t __noswap_vget_low_bf16(bfloat16x8_t __p0) { bfloat16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_bf16(__p0) __extension__ ({ \ bfloat16x8_t __ret; \ __ret = (bfloat16x8_t) __builtin_neon_vld1q_v(__p0, 43); \ __ret; \ }) #else #define vld1q_bf16(__p0) __extension__ ({ \ bfloat16x8_t __ret; \ __ret = (bfloat16x8_t) __builtin_neon_vld1q_v(__p0, 43); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_bf16(__p0) __extension__ ({ \ bfloat16x4_t __ret; \ __ret = (bfloat16x4_t) __builtin_neon_vld1_v(__p0, 11); \ __ret; \ }) #else #define vld1_bf16(__p0) __extension__ ({ \ bfloat16x4_t __ret; \ __ret = (bfloat16x4_t) __builtin_neon_vld1_v(__p0, 11); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_dup_bf16(__p0) __extension__ ({ \ bfloat16x8_t __ret; \ __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_v(__p0, 43); \ __ret; \ }) #else #define vld1q_dup_bf16(__p0) __extension__ ({ \ bfloat16x8_t __ret; \ __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_v(__p0, 43); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_dup_bf16(__p0) __extension__ ({ \ bfloat16x4_t __ret; \ __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_v(__p0, 11); \ __ret; \ }) #else #define vld1_dup_bf16(__p0) __extension__ ({ \ bfloat16x4_t __ret; \ __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_v(__p0, 11); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x8_t __ret; \ bfloat16x8_t __s1 = __p1; \ __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 43); \ __ret; \ }) #else #define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x8_t __ret; \ bfloat16x8_t __s1 = __p1; \ bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 43); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x4_t __ret; \ bfloat16x4_t __s1 = __p1; \ __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 11); \ __ret; \ }) #else #define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x4_t __ret; \ bfloat16x4_t __s1 = __p1; \ bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 11); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_bf16_x2(__p0) __extension__ ({ \ bfloat16x8x2_t __ret; \ __builtin_neon_vld1q_x2_v(&__ret, __p0, 43); \ __ret; \ }) #else #define vld1q_bf16_x2(__p0) __extension__ ({ \ bfloat16x8x2_t __ret; \ __builtin_neon_vld1q_x2_v(&__ret, __p0, 43); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_bf16_x2(__p0) __extension__ ({ \ bfloat16x4x2_t __ret; \ __builtin_neon_vld1_x2_v(&__ret, __p0, 11); \ __ret; \ }) #else #define vld1_bf16_x2(__p0) __extension__ ({ \ bfloat16x4x2_t __ret; \ __builtin_neon_vld1_x2_v(&__ret, __p0, 11); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_bf16_x3(__p0) __extension__ ({ \ bfloat16x8x3_t __ret; \ __builtin_neon_vld1q_x3_v(&__ret, __p0, 43); \ __ret; \ }) #else #define vld1q_bf16_x3(__p0) __extension__ ({ \ bfloat16x8x3_t __ret; \ __builtin_neon_vld1q_x3_v(&__ret, __p0, 43); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_bf16_x3(__p0) __extension__ ({ \ bfloat16x4x3_t __ret; \ __builtin_neon_vld1_x3_v(&__ret, __p0, 11); \ __ret; \ }) #else #define vld1_bf16_x3(__p0) __extension__ ({ \ bfloat16x4x3_t __ret; \ __builtin_neon_vld1_x3_v(&__ret, __p0, 11); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_bf16_x4(__p0) __extension__ ({ \ bfloat16x8x4_t __ret; \ __builtin_neon_vld1q_x4_v(&__ret, __p0, 43); \ __ret; \ }) #else #define vld1q_bf16_x4(__p0) __extension__ ({ \ bfloat16x8x4_t __ret; \ __builtin_neon_vld1q_x4_v(&__ret, __p0, 43); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1_bf16_x4(__p0) __extension__ ({ \ bfloat16x4x4_t __ret; \ __builtin_neon_vld1_x4_v(&__ret, __p0, 11); \ __ret; \ }) #else #define vld1_bf16_x4(__p0) __extension__ ({ \ bfloat16x4x4_t __ret; \ __builtin_neon_vld1_x4_v(&__ret, __p0, 11); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_bf16(__p0) __extension__ ({ \ bfloat16x8x2_t __ret; \ __builtin_neon_vld2q_v(&__ret, __p0, 43); \ __ret; \ }) #else #define vld2q_bf16(__p0) __extension__ ({ \ bfloat16x8x2_t __ret; \ __builtin_neon_vld2q_v(&__ret, __p0, 43); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2_bf16(__p0) __extension__ ({ \ bfloat16x4x2_t __ret; \ __builtin_neon_vld2_v(&__ret, __p0, 11); \ __ret; \ }) #else #define vld2_bf16(__p0) __extension__ ({ \ bfloat16x4x2_t __ret; \ __builtin_neon_vld2_v(&__ret, __p0, 11); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_dup_bf16(__p0) __extension__ ({ \ bfloat16x8x2_t __ret; \ __builtin_neon_vld2q_dup_v(&__ret, __p0, 43); \ __ret; \ }) #else #define vld2q_dup_bf16(__p0) __extension__ ({ \ bfloat16x8x2_t __ret; \ __builtin_neon_vld2q_dup_v(&__ret, __p0, 43); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2_dup_bf16(__p0) __extension__ ({ \ bfloat16x4x2_t __ret; \ __builtin_neon_vld2_dup_v(&__ret, __p0, 11); \ __ret; \ }) #else #define vld2_dup_bf16(__p0) __extension__ ({ \ bfloat16x4x2_t __ret; \ __builtin_neon_vld2_dup_v(&__ret, __p0, 11); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x8x2_t __ret; \ bfloat16x8x2_t __s1 = __p1; \ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \ __ret; \ }) #else #define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x8x2_t __ret; \ bfloat16x8x2_t __s1 = __p1; \ bfloat16x8x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x4x2_t __ret; \ bfloat16x4x2_t __s1 = __p1; \ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \ __ret; \ }) #else #define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x4x2_t __ret; \ bfloat16x4x2_t __s1 = __p1; \ bfloat16x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_bf16(__p0) __extension__ ({ \ bfloat16x8x3_t __ret; \ __builtin_neon_vld3q_v(&__ret, __p0, 43); \ __ret; \ }) #else #define vld3q_bf16(__p0) __extension__ ({ \ bfloat16x8x3_t __ret; \ __builtin_neon_vld3q_v(&__ret, __p0, 43); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3_bf16(__p0) __extension__ ({ \ bfloat16x4x3_t __ret; \ __builtin_neon_vld3_v(&__ret, __p0, 11); \ __ret; \ }) #else #define vld3_bf16(__p0) __extension__ ({ \ bfloat16x4x3_t __ret; \ __builtin_neon_vld3_v(&__ret, __p0, 11); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_dup_bf16(__p0) __extension__ ({ \ bfloat16x8x3_t __ret; \ __builtin_neon_vld3q_dup_v(&__ret, __p0, 43); \ __ret; \ }) #else #define vld3q_dup_bf16(__p0) __extension__ ({ \ bfloat16x8x3_t __ret; \ __builtin_neon_vld3q_dup_v(&__ret, __p0, 43); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3_dup_bf16(__p0) __extension__ ({ \ bfloat16x4x3_t __ret; \ __builtin_neon_vld3_dup_v(&__ret, __p0, 11); \ __ret; \ }) #else #define vld3_dup_bf16(__p0) __extension__ ({ \ bfloat16x4x3_t __ret; \ __builtin_neon_vld3_dup_v(&__ret, __p0, 11); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x8x3_t __ret; \ bfloat16x8x3_t __s1 = __p1; \ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \ __ret; \ }) #else #define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x8x3_t __ret; \ bfloat16x8x3_t __s1 = __p1; \ bfloat16x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x4x3_t __ret; \ bfloat16x4x3_t __s1 = __p1; \ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \ __ret; \ }) #else #define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x4x3_t __ret; \ bfloat16x4x3_t __s1 = __p1; \ bfloat16x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_bf16(__p0) __extension__ ({ \ bfloat16x8x4_t __ret; \ __builtin_neon_vld4q_v(&__ret, __p0, 43); \ __ret; \ }) #else #define vld4q_bf16(__p0) __extension__ ({ \ bfloat16x8x4_t __ret; \ __builtin_neon_vld4q_v(&__ret, __p0, 43); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4_bf16(__p0) __extension__ ({ \ bfloat16x4x4_t __ret; \ __builtin_neon_vld4_v(&__ret, __p0, 11); \ __ret; \ }) #else #define vld4_bf16(__p0) __extension__ ({ \ bfloat16x4x4_t __ret; \ __builtin_neon_vld4_v(&__ret, __p0, 11); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_dup_bf16(__p0) __extension__ ({ \ bfloat16x8x4_t __ret; \ __builtin_neon_vld4q_dup_v(&__ret, __p0, 43); \ __ret; \ }) #else #define vld4q_dup_bf16(__p0) __extension__ ({ \ bfloat16x8x4_t __ret; \ __builtin_neon_vld4q_dup_v(&__ret, __p0, 43); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4_dup_bf16(__p0) __extension__ ({ \ bfloat16x4x4_t __ret; \ __builtin_neon_vld4_dup_v(&__ret, __p0, 11); \ __ret; \ }) #else #define vld4_dup_bf16(__p0) __extension__ ({ \ bfloat16x4x4_t __ret; \ __builtin_neon_vld4_dup_v(&__ret, __p0, 11); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x8x4_t __ret; \ bfloat16x8x4_t __s1 = __p1; \ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \ __ret; \ }) #else #define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x8x4_t __ret; \ bfloat16x8x4_t __s1 = __p1; \ bfloat16x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x4x4_t __ret; \ bfloat16x4x4_t __s1 = __p1; \ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \ __ret; \ }) #else #define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x4x4_t __ret; \ bfloat16x4x4_t __s1 = __p1; \ bfloat16x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x8_t __ret; \ bfloat16_t __s0 = __p0; \ bfloat16x8_t __s1 = __p1; \ __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \ __ret; \ }) #else #define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x8_t __ret; \ bfloat16_t __s0 = __p0; \ bfloat16x8_t __s1 = __p1; \ bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x8_t __ret; \ bfloat16_t __s0 = __p0; \ bfloat16x8_t __s1 = __p1; \ __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x4_t __ret; \ bfloat16_t __s0 = __p0; \ bfloat16x4_t __s1 = __p1; \ __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \ __ret; \ }) #else #define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x4_t __ret; \ bfloat16_t __s0 = __p0; \ bfloat16x4_t __s1 = __p1; \ bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x4_t __ret; \ bfloat16_t __s0 = __p0; \ bfloat16x4_t __s1 = __p1; \ __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_bf16(__p0, __p1) __extension__ ({ \ bfloat16x8_t __s1 = __p1; \ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 43); \ }) #else #define vst1q_bf16(__p0, __p1) __extension__ ({ \ bfloat16x8_t __s1 = __p1; \ bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 43); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_bf16(__p0, __p1) __extension__ ({ \ bfloat16x4_t __s1 = __p1; \ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 11); \ }) #else #define vst1_bf16(__p0, __p1) __extension__ ({ \ bfloat16x4_t __s1 = __p1; \ bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 11); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x8_t __s1 = __p1; \ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 43); \ }) #else #define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x8_t __s1 = __p1; \ bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 43); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x4_t __s1 = __p1; \ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 11); \ }) #else #define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x4_t __s1 = __p1; \ bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 11); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \ bfloat16x8x2_t __s1 = __p1; \ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \ }) #else #define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \ bfloat16x8x2_t __s1 = __p1; \ bfloat16x8x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_bf16_x2(__p0, __p1) __extension__ ({ \ bfloat16x4x2_t __s1 = __p1; \ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \ }) #else #define vst1_bf16_x2(__p0, __p1) __extension__ ({ \ bfloat16x4x2_t __s1 = __p1; \ bfloat16x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \ bfloat16x8x3_t __s1 = __p1; \ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \ }) #else #define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \ bfloat16x8x3_t __s1 = __p1; \ bfloat16x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_bf16_x3(__p0, __p1) __extension__ ({ \ bfloat16x4x3_t __s1 = __p1; \ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \ }) #else #define vst1_bf16_x3(__p0, __p1) __extension__ ({ \ bfloat16x4x3_t __s1 = __p1; \ bfloat16x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \ bfloat16x8x4_t __s1 = __p1; \ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \ }) #else #define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \ bfloat16x8x4_t __s1 = __p1; \ bfloat16x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1_bf16_x4(__p0, __p1) __extension__ ({ \ bfloat16x4x4_t __s1 = __p1; \ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \ }) #else #define vst1_bf16_x4(__p0, __p1) __extension__ ({ \ bfloat16x4x4_t __s1 = __p1; \ bfloat16x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2q_bf16(__p0, __p1) __extension__ ({ \ bfloat16x8x2_t __s1 = __p1; \ __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \ }) #else #define vst2q_bf16(__p0, __p1) __extension__ ({ \ bfloat16x8x2_t __s1 = __p1; \ bfloat16x8x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2_bf16(__p0, __p1) __extension__ ({ \ bfloat16x4x2_t __s1 = __p1; \ __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \ }) #else #define vst2_bf16(__p0, __p1) __extension__ ({ \ bfloat16x4x2_t __s1 = __p1; \ bfloat16x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x8x2_t __s1 = __p1; \ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \ }) #else #define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x8x2_t __s1 = __p1; \ bfloat16x8x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x4x2_t __s1 = __p1; \ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \ }) #else #define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x4x2_t __s1 = __p1; \ bfloat16x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3q_bf16(__p0, __p1) __extension__ ({ \ bfloat16x8x3_t __s1 = __p1; \ __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \ }) #else #define vst3q_bf16(__p0, __p1) __extension__ ({ \ bfloat16x8x3_t __s1 = __p1; \ bfloat16x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3_bf16(__p0, __p1) __extension__ ({ \ bfloat16x4x3_t __s1 = __p1; \ __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \ }) #else #define vst3_bf16(__p0, __p1) __extension__ ({ \ bfloat16x4x3_t __s1 = __p1; \ bfloat16x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x8x3_t __s1 = __p1; \ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \ }) #else #define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x8x3_t __s1 = __p1; \ bfloat16x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x4x3_t __s1 = __p1; \ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \ }) #else #define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x4x3_t __s1 = __p1; \ bfloat16x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4q_bf16(__p0, __p1) __extension__ ({ \ bfloat16x8x4_t __s1 = __p1; \ __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \ }) #else #define vst4q_bf16(__p0, __p1) __extension__ ({ \ bfloat16x8x4_t __s1 = __p1; \ bfloat16x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4_bf16(__p0, __p1) __extension__ ({ \ bfloat16x4x4_t __s1 = __p1; \ __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \ }) #else #define vst4_bf16(__p0, __p1) __extension__ ({ \ bfloat16x4x4_t __s1 = __p1; \ bfloat16x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x8x4_t __s1 = __p1; \ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \ }) #else #define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x8x4_t __s1 = __p1; \ bfloat16x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x4x4_t __s1 = __p1; \ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \ }) #else #define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ bfloat16x4x4_t __s1 = __p1; \ bfloat16x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \ }) #endif #endif #if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) && !defined(__aarch64__) #ifdef __LITTLE_ENDIAN__ __ai bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__p0, 11); return __ret; } #else __ai bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) { bfloat16x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__rev0, 11); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai bfloat16x4_t __noswap___a32_vcvt_bf16_f32(float32x4_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__p0, 11); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { bfloat16x4_t __ret; __ret = __a32_vcvt_bf16_f32(__p0); return __ret; } #else __ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { bfloat16x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __noswap___a32_vcvt_bf16_f32(__rev0); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { bfloat16x8_t __ret; __ret = vcombine_bf16(__a32_vcvt_bf16_f32(__p1), vget_low_bf16(__p0)); return __ret; } #else __ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { bfloat16x8_t __ret; bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __noswap_vcombine_bf16(__noswap___a32_vcvt_bf16_f32(__rev1), __noswap_vget_low_bf16(__rev0)); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { bfloat16x8_t __ret; __ret = vcombine_bf16((bfloat16x4_t)(0ULL), __a32_vcvt_bf16_f32(__p0)); return __ret; } #else __ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { bfloat16x8_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __noswap_vcombine_bf16((bfloat16x4_t)(0ULL), __noswap___a32_vcvt_bf16_f32(__rev0)); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #endif #if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) && defined(__aarch64__) #ifdef __LITTLE_ENDIAN__ __ai bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__p0, 43); return __ret; } #else __ai bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) { bfloat16x8_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__rev0, 43); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai bfloat16x8_t __noswap___a64_vcvtq_low_bf16_f32(float32x4_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__p0, 43); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vcopyq_lane_bf16(__p0_161, __p1_161, __p2_161, __p3_161) __extension__ ({ \ bfloat16x8_t __ret_161; \ bfloat16x8_t __s0_161 = __p0_161; \ bfloat16x4_t __s2_161 = __p2_161; \ __ret_161 = vsetq_lane_bf16(vget_lane_bf16(__s2_161, __p3_161), __s0_161, __p1_161); \ __ret_161; \ }) #else #define vcopyq_lane_bf16(__p0_162, __p1_162, __p2_162, __p3_162) __extension__ ({ \ bfloat16x8_t __ret_162; \ bfloat16x8_t __s0_162 = __p0_162; \ bfloat16x4_t __s2_162 = __p2_162; \ bfloat16x8_t __rev0_162; __rev0_162 = __builtin_shufflevector(__s0_162, __s0_162, 7, 6, 5, 4, 3, 2, 1, 0); \ bfloat16x4_t __rev2_162; __rev2_162 = __builtin_shufflevector(__s2_162, __s2_162, 3, 2, 1, 0); \ __ret_162 = __noswap_vsetq_lane_bf16(__noswap_vget_lane_bf16(__rev2_162, __p3_162), __rev0_162, __p1_162); \ __ret_162 = __builtin_shufflevector(__ret_162, __ret_162, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_162; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopy_lane_bf16(__p0_163, __p1_163, __p2_163, __p3_163) __extension__ ({ \ bfloat16x4_t __ret_163; \ bfloat16x4_t __s0_163 = __p0_163; \ bfloat16x4_t __s2_163 = __p2_163; \ __ret_163 = vset_lane_bf16(vget_lane_bf16(__s2_163, __p3_163), __s0_163, __p1_163); \ __ret_163; \ }) #else #define vcopy_lane_bf16(__p0_164, __p1_164, __p2_164, __p3_164) __extension__ ({ \ bfloat16x4_t __ret_164; \ bfloat16x4_t __s0_164 = __p0_164; \ bfloat16x4_t __s2_164 = __p2_164; \ bfloat16x4_t __rev0_164; __rev0_164 = __builtin_shufflevector(__s0_164, __s0_164, 3, 2, 1, 0); \ bfloat16x4_t __rev2_164; __rev2_164 = __builtin_shufflevector(__s2_164, __s2_164, 3, 2, 1, 0); \ __ret_164 = __noswap_vset_lane_bf16(__noswap_vget_lane_bf16(__rev2_164, __p3_164), __rev0_164, __p1_164); \ __ret_164 = __builtin_shufflevector(__ret_164, __ret_164, 3, 2, 1, 0); \ __ret_164; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopyq_laneq_bf16(__p0_165, __p1_165, __p2_165, __p3_165) __extension__ ({ \ bfloat16x8_t __ret_165; \ bfloat16x8_t __s0_165 = __p0_165; \ bfloat16x8_t __s2_165 = __p2_165; \ __ret_165 = vsetq_lane_bf16(vgetq_lane_bf16(__s2_165, __p3_165), __s0_165, __p1_165); \ __ret_165; \ }) #else #define vcopyq_laneq_bf16(__p0_166, __p1_166, __p2_166, __p3_166) __extension__ ({ \ bfloat16x8_t __ret_166; \ bfloat16x8_t __s0_166 = __p0_166; \ bfloat16x8_t __s2_166 = __p2_166; \ bfloat16x8_t __rev0_166; __rev0_166 = __builtin_shufflevector(__s0_166, __s0_166, 7, 6, 5, 4, 3, 2, 1, 0); \ bfloat16x8_t __rev2_166; __rev2_166 = __builtin_shufflevector(__s2_166, __s2_166, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_166 = __noswap_vsetq_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_166, __p3_166), __rev0_166, __p1_166); \ __ret_166 = __builtin_shufflevector(__ret_166, __ret_166, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_166; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopy_laneq_bf16(__p0_167, __p1_167, __p2_167, __p3_167) __extension__ ({ \ bfloat16x4_t __ret_167; \ bfloat16x4_t __s0_167 = __p0_167; \ bfloat16x8_t __s2_167 = __p2_167; \ __ret_167 = vset_lane_bf16(vgetq_lane_bf16(__s2_167, __p3_167), __s0_167, __p1_167); \ __ret_167; \ }) #else #define vcopy_laneq_bf16(__p0_168, __p1_168, __p2_168, __p3_168) __extension__ ({ \ bfloat16x4_t __ret_168; \ bfloat16x4_t __s0_168 = __p0_168; \ bfloat16x8_t __s2_168 = __p2_168; \ bfloat16x4_t __rev0_168; __rev0_168 = __builtin_shufflevector(__s0_168, __s0_168, 3, 2, 1, 0); \ bfloat16x8_t __rev2_168; __rev2_168 = __builtin_shufflevector(__s2_168, __s2_168, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_168 = __noswap_vset_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_168, __p3_168), __rev0_168, __p1_168); \ __ret_168 = __builtin_shufflevector(__ret_168, __ret_168, 3, 2, 1, 0); \ __ret_168; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { bfloat16x4_t __ret; __ret = vget_low_bf16(__a64_vcvtq_low_bf16_f32(__p0)); return __ret; } #else __ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { bfloat16x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __noswap_vget_low_bf16(__noswap___a64_vcvtq_low_bf16_f32(__rev0)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { bfloat16x8_t __ret; __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_v((int8x16_t)__p0, (int8x16_t)__p1, 43); return __ret; } #else __ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { bfloat16x8_t __ret; bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_v((int8x16_t)__rev0, (int8x16_t)__rev1, 43); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { bfloat16x8_t __ret; __ret = __a64_vcvtq_low_bf16_f32(__p0); return __ret; } #else __ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { bfloat16x8_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __noswap___a64_vcvtq_low_bf16_f32(__rev0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #endif #if defined(__ARM_FEATURE_COMPLEX) #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else __ai float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (float32x2_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else __ai float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (float32x2_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else __ai float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else __ai float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #else __ai float32x4_t vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vcmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai float32x4_t __noswap_vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #else __ai float32x2_t vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = (float32x2_t) __builtin_neon_vcmla_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai float32x2_t __noswap_vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vcmla_lane_f32(__p0_169, __p1_169, __p2_169, __p3_169) __extension__ ({ \ float32x2_t __ret_169; \ float32x2_t __s0_169 = __p0_169; \ float32x2_t __s1_169 = __p1_169; \ float32x2_t __s2_169 = __p2_169; \ float32x2_t __reint_169 = __s2_169; \ uint64x1_t __reint1_169 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_169, __p3_169)}; \ __ret_169 = vcmla_f32(__s0_169, __s1_169, *(float32x2_t *) &__reint1_169); \ __ret_169; \ }) #else #define vcmla_lane_f32(__p0_170, __p1_170, __p2_170, __p3_170) __extension__ ({ \ float32x2_t __ret_170; \ float32x2_t __s0_170 = __p0_170; \ float32x2_t __s1_170 = __p1_170; \ float32x2_t __s2_170 = __p2_170; \ float32x2_t __rev0_170; __rev0_170 = __builtin_shufflevector(__s0_170, __s0_170, 1, 0); \ float32x2_t __rev1_170; __rev1_170 = __builtin_shufflevector(__s1_170, __s1_170, 1, 0); \ float32x2_t __rev2_170; __rev2_170 = __builtin_shufflevector(__s2_170, __s2_170, 1, 0); \ float32x2_t __reint_170 = __rev2_170; \ uint64x1_t __reint1_170 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_170, __p3_170)}; \ __ret_170 = __noswap_vcmla_f32(__rev0_170, __rev1_170, *(float32x2_t *) &__reint1_170); \ __ret_170 = __builtin_shufflevector(__ret_170, __ret_170, 1, 0); \ __ret_170; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcmlaq_lane_f32(__p0_171, __p1_171, __p2_171, __p3_171) __extension__ ({ \ float32x4_t __ret_171; \ float32x4_t __s0_171 = __p0_171; \ float32x4_t __s1_171 = __p1_171; \ float32x2_t __s2_171 = __p2_171; \ float32x2_t __reint_171 = __s2_171; \ uint64x2_t __reint1_171 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_171, __p3_171), vget_lane_u64(*(uint64x1_t *) &__reint_171, __p3_171)}; \ __ret_171 = vcmlaq_f32(__s0_171, __s1_171, *(float32x4_t *) &__reint1_171); \ __ret_171; \ }) #else #define vcmlaq_lane_f32(__p0_172, __p1_172, __p2_172, __p3_172) __extension__ ({ \ float32x4_t __ret_172; \ float32x4_t __s0_172 = __p0_172; \ float32x4_t __s1_172 = __p1_172; \ float32x2_t __s2_172 = __p2_172; \ float32x4_t __rev0_172; __rev0_172 = __builtin_shufflevector(__s0_172, __s0_172, 3, 2, 1, 0); \ float32x4_t __rev1_172; __rev1_172 = __builtin_shufflevector(__s1_172, __s1_172, 3, 2, 1, 0); \ float32x2_t __rev2_172; __rev2_172 = __builtin_shufflevector(__s2_172, __s2_172, 1, 0); \ float32x2_t __reint_172 = __rev2_172; \ uint64x2_t __reint1_172 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_172, __p3_172), vget_lane_u64(*(uint64x1_t *) &__reint_172, __p3_172)}; \ __ret_172 = __noswap_vcmlaq_f32(__rev0_172, __rev1_172, *(float32x4_t *) &__reint1_172); \ __ret_172 = __builtin_shufflevector(__ret_172, __ret_172, 3, 2, 1, 0); \ __ret_172; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcmla_laneq_f32(__p0_173, __p1_173, __p2_173, __p3_173) __extension__ ({ \ float32x2_t __ret_173; \ float32x2_t __s0_173 = __p0_173; \ float32x2_t __s1_173 = __p1_173; \ float32x4_t __s2_173 = __p2_173; \ float32x4_t __reint_173 = __s2_173; \ uint64x1_t __reint1_173 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_173, __p3_173)}; \ __ret_173 = vcmla_f32(__s0_173, __s1_173, *(float32x2_t *) &__reint1_173); \ __ret_173; \ }) #else #define vcmla_laneq_f32(__p0_174, __p1_174, __p2_174, __p3_174) __extension__ ({ \ float32x2_t __ret_174; \ float32x2_t __s0_174 = __p0_174; \ float32x2_t __s1_174 = __p1_174; \ float32x4_t __s2_174 = __p2_174; \ float32x2_t __rev0_174; __rev0_174 = __builtin_shufflevector(__s0_174, __s0_174, 1, 0); \ float32x2_t __rev1_174; __rev1_174 = __builtin_shufflevector(__s1_174, __s1_174, 1, 0); \ float32x4_t __rev2_174; __rev2_174 = __builtin_shufflevector(__s2_174, __s2_174, 3, 2, 1, 0); \ float32x4_t __reint_174 = __rev2_174; \ uint64x1_t __reint1_174 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_174, __p3_174)}; \ __ret_174 = __noswap_vcmla_f32(__rev0_174, __rev1_174, *(float32x2_t *) &__reint1_174); \ __ret_174 = __builtin_shufflevector(__ret_174, __ret_174, 1, 0); \ __ret_174; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcmlaq_laneq_f32(__p0_175, __p1_175, __p2_175, __p3_175) __extension__ ({ \ float32x4_t __ret_175; \ float32x4_t __s0_175 = __p0_175; \ float32x4_t __s1_175 = __p1_175; \ float32x4_t __s2_175 = __p2_175; \ float32x4_t __reint_175 = __s2_175; \ uint64x2_t __reint1_175 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_175, __p3_175), vgetq_lane_u64(*(uint64x2_t *) &__reint_175, __p3_175)}; \ __ret_175 = vcmlaq_f32(__s0_175, __s1_175, *(float32x4_t *) &__reint1_175); \ __ret_175; \ }) #else #define vcmlaq_laneq_f32(__p0_176, __p1_176, __p2_176, __p3_176) __extension__ ({ \ float32x4_t __ret_176; \ float32x4_t __s0_176 = __p0_176; \ float32x4_t __s1_176 = __p1_176; \ float32x4_t __s2_176 = __p2_176; \ float32x4_t __rev0_176; __rev0_176 = __builtin_shufflevector(__s0_176, __s0_176, 3, 2, 1, 0); \ float32x4_t __rev1_176; __rev1_176 = __builtin_shufflevector(__s1_176, __s1_176, 3, 2, 1, 0); \ float32x4_t __rev2_176; __rev2_176 = __builtin_shufflevector(__s2_176, __s2_176, 3, 2, 1, 0); \ float32x4_t __reint_176 = __rev2_176; \ uint64x2_t __reint1_176 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_176, __p3_176), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_176, __p3_176)}; \ __ret_176 = __noswap_vcmlaq_f32(__rev0_176, __rev1_176, *(float32x4_t *) &__reint1_176); \ __ret_176 = __builtin_shufflevector(__ret_176, __ret_176, 3, 2, 1, 0); \ __ret_176; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #else __ai float32x4_t vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai float32x4_t __noswap_vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #else __ai float32x2_t vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = (float32x2_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai float32x2_t __noswap_vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vcmla_rot180_lane_f32(__p0_177, __p1_177, __p2_177, __p3_177) __extension__ ({ \ float32x2_t __ret_177; \ float32x2_t __s0_177 = __p0_177; \ float32x2_t __s1_177 = __p1_177; \ float32x2_t __s2_177 = __p2_177; \ float32x2_t __reint_177 = __s2_177; \ uint64x1_t __reint1_177 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_177, __p3_177)}; \ __ret_177 = vcmla_rot180_f32(__s0_177, __s1_177, *(float32x2_t *) &__reint1_177); \ __ret_177; \ }) #else #define vcmla_rot180_lane_f32(__p0_178, __p1_178, __p2_178, __p3_178) __extension__ ({ \ float32x2_t __ret_178; \ float32x2_t __s0_178 = __p0_178; \ float32x2_t __s1_178 = __p1_178; \ float32x2_t __s2_178 = __p2_178; \ float32x2_t __rev0_178; __rev0_178 = __builtin_shufflevector(__s0_178, __s0_178, 1, 0); \ float32x2_t __rev1_178; __rev1_178 = __builtin_shufflevector(__s1_178, __s1_178, 1, 0); \ float32x2_t __rev2_178; __rev2_178 = __builtin_shufflevector(__s2_178, __s2_178, 1, 0); \ float32x2_t __reint_178 = __rev2_178; \ uint64x1_t __reint1_178 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_178, __p3_178)}; \ __ret_178 = __noswap_vcmla_rot180_f32(__rev0_178, __rev1_178, *(float32x2_t *) &__reint1_178); \ __ret_178 = __builtin_shufflevector(__ret_178, __ret_178, 1, 0); \ __ret_178; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcmlaq_rot180_lane_f32(__p0_179, __p1_179, __p2_179, __p3_179) __extension__ ({ \ float32x4_t __ret_179; \ float32x4_t __s0_179 = __p0_179; \ float32x4_t __s1_179 = __p1_179; \ float32x2_t __s2_179 = __p2_179; \ float32x2_t __reint_179 = __s2_179; \ uint64x2_t __reint1_179 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_179, __p3_179), vget_lane_u64(*(uint64x1_t *) &__reint_179, __p3_179)}; \ __ret_179 = vcmlaq_rot180_f32(__s0_179, __s1_179, *(float32x4_t *) &__reint1_179); \ __ret_179; \ }) #else #define vcmlaq_rot180_lane_f32(__p0_180, __p1_180, __p2_180, __p3_180) __extension__ ({ \ float32x4_t __ret_180; \ float32x4_t __s0_180 = __p0_180; \ float32x4_t __s1_180 = __p1_180; \ float32x2_t __s2_180 = __p2_180; \ float32x4_t __rev0_180; __rev0_180 = __builtin_shufflevector(__s0_180, __s0_180, 3, 2, 1, 0); \ float32x4_t __rev1_180; __rev1_180 = __builtin_shufflevector(__s1_180, __s1_180, 3, 2, 1, 0); \ float32x2_t __rev2_180; __rev2_180 = __builtin_shufflevector(__s2_180, __s2_180, 1, 0); \ float32x2_t __reint_180 = __rev2_180; \ uint64x2_t __reint1_180 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_180, __p3_180), vget_lane_u64(*(uint64x1_t *) &__reint_180, __p3_180)}; \ __ret_180 = __noswap_vcmlaq_rot180_f32(__rev0_180, __rev1_180, *(float32x4_t *) &__reint1_180); \ __ret_180 = __builtin_shufflevector(__ret_180, __ret_180, 3, 2, 1, 0); \ __ret_180; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcmla_rot180_laneq_f32(__p0_181, __p1_181, __p2_181, __p3_181) __extension__ ({ \ float32x2_t __ret_181; \ float32x2_t __s0_181 = __p0_181; \ float32x2_t __s1_181 = __p1_181; \ float32x4_t __s2_181 = __p2_181; \ float32x4_t __reint_181 = __s2_181; \ uint64x1_t __reint1_181 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_181, __p3_181)}; \ __ret_181 = vcmla_rot180_f32(__s0_181, __s1_181, *(float32x2_t *) &__reint1_181); \ __ret_181; \ }) #else #define vcmla_rot180_laneq_f32(__p0_182, __p1_182, __p2_182, __p3_182) __extension__ ({ \ float32x2_t __ret_182; \ float32x2_t __s0_182 = __p0_182; \ float32x2_t __s1_182 = __p1_182; \ float32x4_t __s2_182 = __p2_182; \ float32x2_t __rev0_182; __rev0_182 = __builtin_shufflevector(__s0_182, __s0_182, 1, 0); \ float32x2_t __rev1_182; __rev1_182 = __builtin_shufflevector(__s1_182, __s1_182, 1, 0); \ float32x4_t __rev2_182; __rev2_182 = __builtin_shufflevector(__s2_182, __s2_182, 3, 2, 1, 0); \ float32x4_t __reint_182 = __rev2_182; \ uint64x1_t __reint1_182 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_182, __p3_182)}; \ __ret_182 = __noswap_vcmla_rot180_f32(__rev0_182, __rev1_182, *(float32x2_t *) &__reint1_182); \ __ret_182 = __builtin_shufflevector(__ret_182, __ret_182, 1, 0); \ __ret_182; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcmlaq_rot180_laneq_f32(__p0_183, __p1_183, __p2_183, __p3_183) __extension__ ({ \ float32x4_t __ret_183; \ float32x4_t __s0_183 = __p0_183; \ float32x4_t __s1_183 = __p1_183; \ float32x4_t __s2_183 = __p2_183; \ float32x4_t __reint_183 = __s2_183; \ uint64x2_t __reint1_183 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_183, __p3_183), vgetq_lane_u64(*(uint64x2_t *) &__reint_183, __p3_183)}; \ __ret_183 = vcmlaq_rot180_f32(__s0_183, __s1_183, *(float32x4_t *) &__reint1_183); \ __ret_183; \ }) #else #define vcmlaq_rot180_laneq_f32(__p0_184, __p1_184, __p2_184, __p3_184) __extension__ ({ \ float32x4_t __ret_184; \ float32x4_t __s0_184 = __p0_184; \ float32x4_t __s1_184 = __p1_184; \ float32x4_t __s2_184 = __p2_184; \ float32x4_t __rev0_184; __rev0_184 = __builtin_shufflevector(__s0_184, __s0_184, 3, 2, 1, 0); \ float32x4_t __rev1_184; __rev1_184 = __builtin_shufflevector(__s1_184, __s1_184, 3, 2, 1, 0); \ float32x4_t __rev2_184; __rev2_184 = __builtin_shufflevector(__s2_184, __s2_184, 3, 2, 1, 0); \ float32x4_t __reint_184 = __rev2_184; \ uint64x2_t __reint1_184 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_184, __p3_184), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_184, __p3_184)}; \ __ret_184 = __noswap_vcmlaq_rot180_f32(__rev0_184, __rev1_184, *(float32x4_t *) &__reint1_184); \ __ret_184 = __builtin_shufflevector(__ret_184, __ret_184, 3, 2, 1, 0); \ __ret_184; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #else __ai float32x4_t vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai float32x4_t __noswap_vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #else __ai float32x2_t vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = (float32x2_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai float32x2_t __noswap_vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vcmla_rot270_lane_f32(__p0_185, __p1_185, __p2_185, __p3_185) __extension__ ({ \ float32x2_t __ret_185; \ float32x2_t __s0_185 = __p0_185; \ float32x2_t __s1_185 = __p1_185; \ float32x2_t __s2_185 = __p2_185; \ float32x2_t __reint_185 = __s2_185; \ uint64x1_t __reint1_185 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_185, __p3_185)}; \ __ret_185 = vcmla_rot270_f32(__s0_185, __s1_185, *(float32x2_t *) &__reint1_185); \ __ret_185; \ }) #else #define vcmla_rot270_lane_f32(__p0_186, __p1_186, __p2_186, __p3_186) __extension__ ({ \ float32x2_t __ret_186; \ float32x2_t __s0_186 = __p0_186; \ float32x2_t __s1_186 = __p1_186; \ float32x2_t __s2_186 = __p2_186; \ float32x2_t __rev0_186; __rev0_186 = __builtin_shufflevector(__s0_186, __s0_186, 1, 0); \ float32x2_t __rev1_186; __rev1_186 = __builtin_shufflevector(__s1_186, __s1_186, 1, 0); \ float32x2_t __rev2_186; __rev2_186 = __builtin_shufflevector(__s2_186, __s2_186, 1, 0); \ float32x2_t __reint_186 = __rev2_186; \ uint64x1_t __reint1_186 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_186, __p3_186)}; \ __ret_186 = __noswap_vcmla_rot270_f32(__rev0_186, __rev1_186, *(float32x2_t *) &__reint1_186); \ __ret_186 = __builtin_shufflevector(__ret_186, __ret_186, 1, 0); \ __ret_186; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcmlaq_rot270_lane_f32(__p0_187, __p1_187, __p2_187, __p3_187) __extension__ ({ \ float32x4_t __ret_187; \ float32x4_t __s0_187 = __p0_187; \ float32x4_t __s1_187 = __p1_187; \ float32x2_t __s2_187 = __p2_187; \ float32x2_t __reint_187 = __s2_187; \ uint64x2_t __reint1_187 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_187, __p3_187), vget_lane_u64(*(uint64x1_t *) &__reint_187, __p3_187)}; \ __ret_187 = vcmlaq_rot270_f32(__s0_187, __s1_187, *(float32x4_t *) &__reint1_187); \ __ret_187; \ }) #else #define vcmlaq_rot270_lane_f32(__p0_188, __p1_188, __p2_188, __p3_188) __extension__ ({ \ float32x4_t __ret_188; \ float32x4_t __s0_188 = __p0_188; \ float32x4_t __s1_188 = __p1_188; \ float32x2_t __s2_188 = __p2_188; \ float32x4_t __rev0_188; __rev0_188 = __builtin_shufflevector(__s0_188, __s0_188, 3, 2, 1, 0); \ float32x4_t __rev1_188; __rev1_188 = __builtin_shufflevector(__s1_188, __s1_188, 3, 2, 1, 0); \ float32x2_t __rev2_188; __rev2_188 = __builtin_shufflevector(__s2_188, __s2_188, 1, 0); \ float32x2_t __reint_188 = __rev2_188; \ uint64x2_t __reint1_188 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_188, __p3_188), vget_lane_u64(*(uint64x1_t *) &__reint_188, __p3_188)}; \ __ret_188 = __noswap_vcmlaq_rot270_f32(__rev0_188, __rev1_188, *(float32x4_t *) &__reint1_188); \ __ret_188 = __builtin_shufflevector(__ret_188, __ret_188, 3, 2, 1, 0); \ __ret_188; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcmla_rot270_laneq_f32(__p0_189, __p1_189, __p2_189, __p3_189) __extension__ ({ \ float32x2_t __ret_189; \ float32x2_t __s0_189 = __p0_189; \ float32x2_t __s1_189 = __p1_189; \ float32x4_t __s2_189 = __p2_189; \ float32x4_t __reint_189 = __s2_189; \ uint64x1_t __reint1_189 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_189, __p3_189)}; \ __ret_189 = vcmla_rot270_f32(__s0_189, __s1_189, *(float32x2_t *) &__reint1_189); \ __ret_189; \ }) #else #define vcmla_rot270_laneq_f32(__p0_190, __p1_190, __p2_190, __p3_190) __extension__ ({ \ float32x2_t __ret_190; \ float32x2_t __s0_190 = __p0_190; \ float32x2_t __s1_190 = __p1_190; \ float32x4_t __s2_190 = __p2_190; \ float32x2_t __rev0_190; __rev0_190 = __builtin_shufflevector(__s0_190, __s0_190, 1, 0); \ float32x2_t __rev1_190; __rev1_190 = __builtin_shufflevector(__s1_190, __s1_190, 1, 0); \ float32x4_t __rev2_190; __rev2_190 = __builtin_shufflevector(__s2_190, __s2_190, 3, 2, 1, 0); \ float32x4_t __reint_190 = __rev2_190; \ uint64x1_t __reint1_190 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_190, __p3_190)}; \ __ret_190 = __noswap_vcmla_rot270_f32(__rev0_190, __rev1_190, *(float32x2_t *) &__reint1_190); \ __ret_190 = __builtin_shufflevector(__ret_190, __ret_190, 1, 0); \ __ret_190; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcmlaq_rot270_laneq_f32(__p0_191, __p1_191, __p2_191, __p3_191) __extension__ ({ \ float32x4_t __ret_191; \ float32x4_t __s0_191 = __p0_191; \ float32x4_t __s1_191 = __p1_191; \ float32x4_t __s2_191 = __p2_191; \ float32x4_t __reint_191 = __s2_191; \ uint64x2_t __reint1_191 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_191, __p3_191), vgetq_lane_u64(*(uint64x2_t *) &__reint_191, __p3_191)}; \ __ret_191 = vcmlaq_rot270_f32(__s0_191, __s1_191, *(float32x4_t *) &__reint1_191); \ __ret_191; \ }) #else #define vcmlaq_rot270_laneq_f32(__p0_192, __p1_192, __p2_192, __p3_192) __extension__ ({ \ float32x4_t __ret_192; \ float32x4_t __s0_192 = __p0_192; \ float32x4_t __s1_192 = __p1_192; \ float32x4_t __s2_192 = __p2_192; \ float32x4_t __rev0_192; __rev0_192 = __builtin_shufflevector(__s0_192, __s0_192, 3, 2, 1, 0); \ float32x4_t __rev1_192; __rev1_192 = __builtin_shufflevector(__s1_192, __s1_192, 3, 2, 1, 0); \ float32x4_t __rev2_192; __rev2_192 = __builtin_shufflevector(__s2_192, __s2_192, 3, 2, 1, 0); \ float32x4_t __reint_192 = __rev2_192; \ uint64x2_t __reint1_192 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_192, __p3_192), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_192, __p3_192)}; \ __ret_192 = __noswap_vcmlaq_rot270_f32(__rev0_192, __rev1_192, *(float32x4_t *) &__reint1_192); \ __ret_192 = __builtin_shufflevector(__ret_192, __ret_192, 3, 2, 1, 0); \ __ret_192; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #else __ai float32x4_t vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai float32x4_t __noswap_vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #else __ai float32x2_t vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = (float32x2_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai float32x2_t __noswap_vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vcmla_rot90_lane_f32(__p0_193, __p1_193, __p2_193, __p3_193) __extension__ ({ \ float32x2_t __ret_193; \ float32x2_t __s0_193 = __p0_193; \ float32x2_t __s1_193 = __p1_193; \ float32x2_t __s2_193 = __p2_193; \ float32x2_t __reint_193 = __s2_193; \ uint64x1_t __reint1_193 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_193, __p3_193)}; \ __ret_193 = vcmla_rot90_f32(__s0_193, __s1_193, *(float32x2_t *) &__reint1_193); \ __ret_193; \ }) #else #define vcmla_rot90_lane_f32(__p0_194, __p1_194, __p2_194, __p3_194) __extension__ ({ \ float32x2_t __ret_194; \ float32x2_t __s0_194 = __p0_194; \ float32x2_t __s1_194 = __p1_194; \ float32x2_t __s2_194 = __p2_194; \ float32x2_t __rev0_194; __rev0_194 = __builtin_shufflevector(__s0_194, __s0_194, 1, 0); \ float32x2_t __rev1_194; __rev1_194 = __builtin_shufflevector(__s1_194, __s1_194, 1, 0); \ float32x2_t __rev2_194; __rev2_194 = __builtin_shufflevector(__s2_194, __s2_194, 1, 0); \ float32x2_t __reint_194 = __rev2_194; \ uint64x1_t __reint1_194 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_194, __p3_194)}; \ __ret_194 = __noswap_vcmla_rot90_f32(__rev0_194, __rev1_194, *(float32x2_t *) &__reint1_194); \ __ret_194 = __builtin_shufflevector(__ret_194, __ret_194, 1, 0); \ __ret_194; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcmlaq_rot90_lane_f32(__p0_195, __p1_195, __p2_195, __p3_195) __extension__ ({ \ float32x4_t __ret_195; \ float32x4_t __s0_195 = __p0_195; \ float32x4_t __s1_195 = __p1_195; \ float32x2_t __s2_195 = __p2_195; \ float32x2_t __reint_195 = __s2_195; \ uint64x2_t __reint1_195 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_195, __p3_195), vget_lane_u64(*(uint64x1_t *) &__reint_195, __p3_195)}; \ __ret_195 = vcmlaq_rot90_f32(__s0_195, __s1_195, *(float32x4_t *) &__reint1_195); \ __ret_195; \ }) #else #define vcmlaq_rot90_lane_f32(__p0_196, __p1_196, __p2_196, __p3_196) __extension__ ({ \ float32x4_t __ret_196; \ float32x4_t __s0_196 = __p0_196; \ float32x4_t __s1_196 = __p1_196; \ float32x2_t __s2_196 = __p2_196; \ float32x4_t __rev0_196; __rev0_196 = __builtin_shufflevector(__s0_196, __s0_196, 3, 2, 1, 0); \ float32x4_t __rev1_196; __rev1_196 = __builtin_shufflevector(__s1_196, __s1_196, 3, 2, 1, 0); \ float32x2_t __rev2_196; __rev2_196 = __builtin_shufflevector(__s2_196, __s2_196, 1, 0); \ float32x2_t __reint_196 = __rev2_196; \ uint64x2_t __reint1_196 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_196, __p3_196), vget_lane_u64(*(uint64x1_t *) &__reint_196, __p3_196)}; \ __ret_196 = __noswap_vcmlaq_rot90_f32(__rev0_196, __rev1_196, *(float32x4_t *) &__reint1_196); \ __ret_196 = __builtin_shufflevector(__ret_196, __ret_196, 3, 2, 1, 0); \ __ret_196; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcmla_rot90_laneq_f32(__p0_197, __p1_197, __p2_197, __p3_197) __extension__ ({ \ float32x2_t __ret_197; \ float32x2_t __s0_197 = __p0_197; \ float32x2_t __s1_197 = __p1_197; \ float32x4_t __s2_197 = __p2_197; \ float32x4_t __reint_197 = __s2_197; \ uint64x1_t __reint1_197 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_197, __p3_197)}; \ __ret_197 = vcmla_rot90_f32(__s0_197, __s1_197, *(float32x2_t *) &__reint1_197); \ __ret_197; \ }) #else #define vcmla_rot90_laneq_f32(__p0_198, __p1_198, __p2_198, __p3_198) __extension__ ({ \ float32x2_t __ret_198; \ float32x2_t __s0_198 = __p0_198; \ float32x2_t __s1_198 = __p1_198; \ float32x4_t __s2_198 = __p2_198; \ float32x2_t __rev0_198; __rev0_198 = __builtin_shufflevector(__s0_198, __s0_198, 1, 0); \ float32x2_t __rev1_198; __rev1_198 = __builtin_shufflevector(__s1_198, __s1_198, 1, 0); \ float32x4_t __rev2_198; __rev2_198 = __builtin_shufflevector(__s2_198, __s2_198, 3, 2, 1, 0); \ float32x4_t __reint_198 = __rev2_198; \ uint64x1_t __reint1_198 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_198, __p3_198)}; \ __ret_198 = __noswap_vcmla_rot90_f32(__rev0_198, __rev1_198, *(float32x2_t *) &__reint1_198); \ __ret_198 = __builtin_shufflevector(__ret_198, __ret_198, 1, 0); \ __ret_198; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcmlaq_rot90_laneq_f32(__p0_199, __p1_199, __p2_199, __p3_199) __extension__ ({ \ float32x4_t __ret_199; \ float32x4_t __s0_199 = __p0_199; \ float32x4_t __s1_199 = __p1_199; \ float32x4_t __s2_199 = __p2_199; \ float32x4_t __reint_199 = __s2_199; \ uint64x2_t __reint1_199 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_199, __p3_199), vgetq_lane_u64(*(uint64x2_t *) &__reint_199, __p3_199)}; \ __ret_199 = vcmlaq_rot90_f32(__s0_199, __s1_199, *(float32x4_t *) &__reint1_199); \ __ret_199; \ }) #else #define vcmlaq_rot90_laneq_f32(__p0_200, __p1_200, __p2_200, __p3_200) __extension__ ({ \ float32x4_t __ret_200; \ float32x4_t __s0_200 = __p0_200; \ float32x4_t __s1_200 = __p1_200; \ float32x4_t __s2_200 = __p2_200; \ float32x4_t __rev0_200; __rev0_200 = __builtin_shufflevector(__s0_200, __s0_200, 3, 2, 1, 0); \ float32x4_t __rev1_200; __rev1_200 = __builtin_shufflevector(__s1_200, __s1_200, 3, 2, 1, 0); \ float32x4_t __rev2_200; __rev2_200 = __builtin_shufflevector(__s2_200, __s2_200, 3, 2, 1, 0); \ float32x4_t __reint_200 = __rev2_200; \ uint64x2_t __reint1_200 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_200, __p3_200), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_200, __p3_200)}; \ __ret_200 = __noswap_vcmlaq_rot90_f32(__rev0_200, __rev1_200, *(float32x4_t *) &__reint1_200); \ __ret_200 = __builtin_shufflevector(__ret_200, __ret_200, 3, 2, 1, 0); \ __ret_200; \ }) #endif #endif #if defined(__ARM_FEATURE_COMPLEX) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else __ai float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else __ai float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else __ai float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else __ai float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); return __ret; } #else __ai float16x8_t vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vcmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai float16x8_t __noswap_vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); return __ret; } #else __ai float16x4_t vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vcmla_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai float16x4_t __noswap_vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vcmla_lane_f16(__p0_201, __p1_201, __p2_201, __p3_201) __extension__ ({ \ float16x4_t __ret_201; \ float16x4_t __s0_201 = __p0_201; \ float16x4_t __s1_201 = __p1_201; \ float16x4_t __s2_201 = __p2_201; \ float16x4_t __reint_201 = __s2_201; \ uint32x2_t __reint1_201 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_201, __p3_201), vget_lane_u32(*(uint32x2_t *) &__reint_201, __p3_201)}; \ __ret_201 = vcmla_f16(__s0_201, __s1_201, *(float16x4_t *) &__reint1_201); \ __ret_201; \ }) #else #define vcmla_lane_f16(__p0_202, __p1_202, __p2_202, __p3_202) __extension__ ({ \ float16x4_t __ret_202; \ float16x4_t __s0_202 = __p0_202; \ float16x4_t __s1_202 = __p1_202; \ float16x4_t __s2_202 = __p2_202; \ float16x4_t __rev0_202; __rev0_202 = __builtin_shufflevector(__s0_202, __s0_202, 3, 2, 1, 0); \ float16x4_t __rev1_202; __rev1_202 = __builtin_shufflevector(__s1_202, __s1_202, 3, 2, 1, 0); \ float16x4_t __rev2_202; __rev2_202 = __builtin_shufflevector(__s2_202, __s2_202, 3, 2, 1, 0); \ float16x4_t __reint_202 = __rev2_202; \ uint32x2_t __reint1_202 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_202, __p3_202), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_202, __p3_202)}; \ __ret_202 = __noswap_vcmla_f16(__rev0_202, __rev1_202, *(float16x4_t *) &__reint1_202); \ __ret_202 = __builtin_shufflevector(__ret_202, __ret_202, 3, 2, 1, 0); \ __ret_202; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcmlaq_lane_f16(__p0_203, __p1_203, __p2_203, __p3_203) __extension__ ({ \ float16x8_t __ret_203; \ float16x8_t __s0_203 = __p0_203; \ float16x8_t __s1_203 = __p1_203; \ float16x4_t __s2_203 = __p2_203; \ float16x4_t __reint_203 = __s2_203; \ uint32x4_t __reint1_203 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203), vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203), vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203), vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203)}; \ __ret_203 = vcmlaq_f16(__s0_203, __s1_203, *(float16x8_t *) &__reint1_203); \ __ret_203; \ }) #else #define vcmlaq_lane_f16(__p0_204, __p1_204, __p2_204, __p3_204) __extension__ ({ \ float16x8_t __ret_204; \ float16x8_t __s0_204 = __p0_204; \ float16x8_t __s1_204 = __p1_204; \ float16x4_t __s2_204 = __p2_204; \ float16x8_t __rev0_204; __rev0_204 = __builtin_shufflevector(__s0_204, __s0_204, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __rev1_204; __rev1_204 = __builtin_shufflevector(__s1_204, __s1_204, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x4_t __rev2_204; __rev2_204 = __builtin_shufflevector(__s2_204, __s2_204, 3, 2, 1, 0); \ float16x4_t __reint_204 = __rev2_204; \ uint32x4_t __reint1_204 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204)}; \ __ret_204 = __noswap_vcmlaq_f16(__rev0_204, __rev1_204, *(float16x8_t *) &__reint1_204); \ __ret_204 = __builtin_shufflevector(__ret_204, __ret_204, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_204; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcmla_laneq_f16(__p0_205, __p1_205, __p2_205, __p3_205) __extension__ ({ \ float16x4_t __ret_205; \ float16x4_t __s0_205 = __p0_205; \ float16x4_t __s1_205 = __p1_205; \ float16x8_t __s2_205 = __p2_205; \ float16x8_t __reint_205 = __s2_205; \ uint32x2_t __reint1_205 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_205, __p3_205), vgetq_lane_u32(*(uint32x4_t *) &__reint_205, __p3_205)}; \ __ret_205 = vcmla_f16(__s0_205, __s1_205, *(float16x4_t *) &__reint1_205); \ __ret_205; \ }) #else #define vcmla_laneq_f16(__p0_206, __p1_206, __p2_206, __p3_206) __extension__ ({ \ float16x4_t __ret_206; \ float16x4_t __s0_206 = __p0_206; \ float16x4_t __s1_206 = __p1_206; \ float16x8_t __s2_206 = __p2_206; \ float16x4_t __rev0_206; __rev0_206 = __builtin_shufflevector(__s0_206, __s0_206, 3, 2, 1, 0); \ float16x4_t __rev1_206; __rev1_206 = __builtin_shufflevector(__s1_206, __s1_206, 3, 2, 1, 0); \ float16x8_t __rev2_206; __rev2_206 = __builtin_shufflevector(__s2_206, __s2_206, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __reint_206 = __rev2_206; \ uint32x2_t __reint1_206 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_206, __p3_206), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_206, __p3_206)}; \ __ret_206 = __noswap_vcmla_f16(__rev0_206, __rev1_206, *(float16x4_t *) &__reint1_206); \ __ret_206 = __builtin_shufflevector(__ret_206, __ret_206, 3, 2, 1, 0); \ __ret_206; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcmlaq_laneq_f16(__p0_207, __p1_207, __p2_207, __p3_207) __extension__ ({ \ float16x8_t __ret_207; \ float16x8_t __s0_207 = __p0_207; \ float16x8_t __s1_207 = __p1_207; \ float16x8_t __s2_207 = __p2_207; \ float16x8_t __reint_207 = __s2_207; \ uint32x4_t __reint1_207 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207), vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207), vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207), vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207)}; \ __ret_207 = vcmlaq_f16(__s0_207, __s1_207, *(float16x8_t *) &__reint1_207); \ __ret_207; \ }) #else #define vcmlaq_laneq_f16(__p0_208, __p1_208, __p2_208, __p3_208) __extension__ ({ \ float16x8_t __ret_208; \ float16x8_t __s0_208 = __p0_208; \ float16x8_t __s1_208 = __p1_208; \ float16x8_t __s2_208 = __p2_208; \ float16x8_t __rev0_208; __rev0_208 = __builtin_shufflevector(__s0_208, __s0_208, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __rev1_208; __rev1_208 = __builtin_shufflevector(__s1_208, __s1_208, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __rev2_208; __rev2_208 = __builtin_shufflevector(__s2_208, __s2_208, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __reint_208 = __rev2_208; \ uint32x4_t __reint1_208 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208)}; \ __ret_208 = __noswap_vcmlaq_f16(__rev0_208, __rev1_208, *(float16x8_t *) &__reint1_208); \ __ret_208 = __builtin_shufflevector(__ret_208, __ret_208, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_208; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); return __ret; } #else __ai float16x8_t vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai float16x8_t __noswap_vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); return __ret; } #else __ai float16x4_t vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai float16x4_t __noswap_vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vcmla_rot180_lane_f16(__p0_209, __p1_209, __p2_209, __p3_209) __extension__ ({ \ float16x4_t __ret_209; \ float16x4_t __s0_209 = __p0_209; \ float16x4_t __s1_209 = __p1_209; \ float16x4_t __s2_209 = __p2_209; \ float16x4_t __reint_209 = __s2_209; \ uint32x2_t __reint1_209 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_209, __p3_209), vget_lane_u32(*(uint32x2_t *) &__reint_209, __p3_209)}; \ __ret_209 = vcmla_rot180_f16(__s0_209, __s1_209, *(float16x4_t *) &__reint1_209); \ __ret_209; \ }) #else #define vcmla_rot180_lane_f16(__p0_210, __p1_210, __p2_210, __p3_210) __extension__ ({ \ float16x4_t __ret_210; \ float16x4_t __s0_210 = __p0_210; \ float16x4_t __s1_210 = __p1_210; \ float16x4_t __s2_210 = __p2_210; \ float16x4_t __rev0_210; __rev0_210 = __builtin_shufflevector(__s0_210, __s0_210, 3, 2, 1, 0); \ float16x4_t __rev1_210; __rev1_210 = __builtin_shufflevector(__s1_210, __s1_210, 3, 2, 1, 0); \ float16x4_t __rev2_210; __rev2_210 = __builtin_shufflevector(__s2_210, __s2_210, 3, 2, 1, 0); \ float16x4_t __reint_210 = __rev2_210; \ uint32x2_t __reint1_210 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_210, __p3_210), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_210, __p3_210)}; \ __ret_210 = __noswap_vcmla_rot180_f16(__rev0_210, __rev1_210, *(float16x4_t *) &__reint1_210); \ __ret_210 = __builtin_shufflevector(__ret_210, __ret_210, 3, 2, 1, 0); \ __ret_210; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcmlaq_rot180_lane_f16(__p0_211, __p1_211, __p2_211, __p3_211) __extension__ ({ \ float16x8_t __ret_211; \ float16x8_t __s0_211 = __p0_211; \ float16x8_t __s1_211 = __p1_211; \ float16x4_t __s2_211 = __p2_211; \ float16x4_t __reint_211 = __s2_211; \ uint32x4_t __reint1_211 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211)}; \ __ret_211 = vcmlaq_rot180_f16(__s0_211, __s1_211, *(float16x8_t *) &__reint1_211); \ __ret_211; \ }) #else #define vcmlaq_rot180_lane_f16(__p0_212, __p1_212, __p2_212, __p3_212) __extension__ ({ \ float16x8_t __ret_212; \ float16x8_t __s0_212 = __p0_212; \ float16x8_t __s1_212 = __p1_212; \ float16x4_t __s2_212 = __p2_212; \ float16x8_t __rev0_212; __rev0_212 = __builtin_shufflevector(__s0_212, __s0_212, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __rev1_212; __rev1_212 = __builtin_shufflevector(__s1_212, __s1_212, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x4_t __rev2_212; __rev2_212 = __builtin_shufflevector(__s2_212, __s2_212, 3, 2, 1, 0); \ float16x4_t __reint_212 = __rev2_212; \ uint32x4_t __reint1_212 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212)}; \ __ret_212 = __noswap_vcmlaq_rot180_f16(__rev0_212, __rev1_212, *(float16x8_t *) &__reint1_212); \ __ret_212 = __builtin_shufflevector(__ret_212, __ret_212, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_212; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcmla_rot180_laneq_f16(__p0_213, __p1_213, __p2_213, __p3_213) __extension__ ({ \ float16x4_t __ret_213; \ float16x4_t __s0_213 = __p0_213; \ float16x4_t __s1_213 = __p1_213; \ float16x8_t __s2_213 = __p2_213; \ float16x8_t __reint_213 = __s2_213; \ uint32x2_t __reint1_213 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_213, __p3_213), vgetq_lane_u32(*(uint32x4_t *) &__reint_213, __p3_213)}; \ __ret_213 = vcmla_rot180_f16(__s0_213, __s1_213, *(float16x4_t *) &__reint1_213); \ __ret_213; \ }) #else #define vcmla_rot180_laneq_f16(__p0_214, __p1_214, __p2_214, __p3_214) __extension__ ({ \ float16x4_t __ret_214; \ float16x4_t __s0_214 = __p0_214; \ float16x4_t __s1_214 = __p1_214; \ float16x8_t __s2_214 = __p2_214; \ float16x4_t __rev0_214; __rev0_214 = __builtin_shufflevector(__s0_214, __s0_214, 3, 2, 1, 0); \ float16x4_t __rev1_214; __rev1_214 = __builtin_shufflevector(__s1_214, __s1_214, 3, 2, 1, 0); \ float16x8_t __rev2_214; __rev2_214 = __builtin_shufflevector(__s2_214, __s2_214, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __reint_214 = __rev2_214; \ uint32x2_t __reint1_214 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_214, __p3_214), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_214, __p3_214)}; \ __ret_214 = __noswap_vcmla_rot180_f16(__rev0_214, __rev1_214, *(float16x4_t *) &__reint1_214); \ __ret_214 = __builtin_shufflevector(__ret_214, __ret_214, 3, 2, 1, 0); \ __ret_214; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcmlaq_rot180_laneq_f16(__p0_215, __p1_215, __p2_215, __p3_215) __extension__ ({ \ float16x8_t __ret_215; \ float16x8_t __s0_215 = __p0_215; \ float16x8_t __s1_215 = __p1_215; \ float16x8_t __s2_215 = __p2_215; \ float16x8_t __reint_215 = __s2_215; \ uint32x4_t __reint1_215 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215)}; \ __ret_215 = vcmlaq_rot180_f16(__s0_215, __s1_215, *(float16x8_t *) &__reint1_215); \ __ret_215; \ }) #else #define vcmlaq_rot180_laneq_f16(__p0_216, __p1_216, __p2_216, __p3_216) __extension__ ({ \ float16x8_t __ret_216; \ float16x8_t __s0_216 = __p0_216; \ float16x8_t __s1_216 = __p1_216; \ float16x8_t __s2_216 = __p2_216; \ float16x8_t __rev0_216; __rev0_216 = __builtin_shufflevector(__s0_216, __s0_216, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __rev1_216; __rev1_216 = __builtin_shufflevector(__s1_216, __s1_216, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __rev2_216; __rev2_216 = __builtin_shufflevector(__s2_216, __s2_216, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __reint_216 = __rev2_216; \ uint32x4_t __reint1_216 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216)}; \ __ret_216 = __noswap_vcmlaq_rot180_f16(__rev0_216, __rev1_216, *(float16x8_t *) &__reint1_216); \ __ret_216 = __builtin_shufflevector(__ret_216, __ret_216, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_216; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); return __ret; } #else __ai float16x8_t vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai float16x8_t __noswap_vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); return __ret; } #else __ai float16x4_t vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai float16x4_t __noswap_vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vcmla_rot270_lane_f16(__p0_217, __p1_217, __p2_217, __p3_217) __extension__ ({ \ float16x4_t __ret_217; \ float16x4_t __s0_217 = __p0_217; \ float16x4_t __s1_217 = __p1_217; \ float16x4_t __s2_217 = __p2_217; \ float16x4_t __reint_217 = __s2_217; \ uint32x2_t __reint1_217 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_217, __p3_217), vget_lane_u32(*(uint32x2_t *) &__reint_217, __p3_217)}; \ __ret_217 = vcmla_rot270_f16(__s0_217, __s1_217, *(float16x4_t *) &__reint1_217); \ __ret_217; \ }) #else #define vcmla_rot270_lane_f16(__p0_218, __p1_218, __p2_218, __p3_218) __extension__ ({ \ float16x4_t __ret_218; \ float16x4_t __s0_218 = __p0_218; \ float16x4_t __s1_218 = __p1_218; \ float16x4_t __s2_218 = __p2_218; \ float16x4_t __rev0_218; __rev0_218 = __builtin_shufflevector(__s0_218, __s0_218, 3, 2, 1, 0); \ float16x4_t __rev1_218; __rev1_218 = __builtin_shufflevector(__s1_218, __s1_218, 3, 2, 1, 0); \ float16x4_t __rev2_218; __rev2_218 = __builtin_shufflevector(__s2_218, __s2_218, 3, 2, 1, 0); \ float16x4_t __reint_218 = __rev2_218; \ uint32x2_t __reint1_218 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_218, __p3_218), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_218, __p3_218)}; \ __ret_218 = __noswap_vcmla_rot270_f16(__rev0_218, __rev1_218, *(float16x4_t *) &__reint1_218); \ __ret_218 = __builtin_shufflevector(__ret_218, __ret_218, 3, 2, 1, 0); \ __ret_218; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcmlaq_rot270_lane_f16(__p0_219, __p1_219, __p2_219, __p3_219) __extension__ ({ \ float16x8_t __ret_219; \ float16x8_t __s0_219 = __p0_219; \ float16x8_t __s1_219 = __p1_219; \ float16x4_t __s2_219 = __p2_219; \ float16x4_t __reint_219 = __s2_219; \ uint32x4_t __reint1_219 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219)}; \ __ret_219 = vcmlaq_rot270_f16(__s0_219, __s1_219, *(float16x8_t *) &__reint1_219); \ __ret_219; \ }) #else #define vcmlaq_rot270_lane_f16(__p0_220, __p1_220, __p2_220, __p3_220) __extension__ ({ \ float16x8_t __ret_220; \ float16x8_t __s0_220 = __p0_220; \ float16x8_t __s1_220 = __p1_220; \ float16x4_t __s2_220 = __p2_220; \ float16x8_t __rev0_220; __rev0_220 = __builtin_shufflevector(__s0_220, __s0_220, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __rev1_220; __rev1_220 = __builtin_shufflevector(__s1_220, __s1_220, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x4_t __rev2_220; __rev2_220 = __builtin_shufflevector(__s2_220, __s2_220, 3, 2, 1, 0); \ float16x4_t __reint_220 = __rev2_220; \ uint32x4_t __reint1_220 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220)}; \ __ret_220 = __noswap_vcmlaq_rot270_f16(__rev0_220, __rev1_220, *(float16x8_t *) &__reint1_220); \ __ret_220 = __builtin_shufflevector(__ret_220, __ret_220, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_220; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcmla_rot270_laneq_f16(__p0_221, __p1_221, __p2_221, __p3_221) __extension__ ({ \ float16x4_t __ret_221; \ float16x4_t __s0_221 = __p0_221; \ float16x4_t __s1_221 = __p1_221; \ float16x8_t __s2_221 = __p2_221; \ float16x8_t __reint_221 = __s2_221; \ uint32x2_t __reint1_221 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_221, __p3_221), vgetq_lane_u32(*(uint32x4_t *) &__reint_221, __p3_221)}; \ __ret_221 = vcmla_rot270_f16(__s0_221, __s1_221, *(float16x4_t *) &__reint1_221); \ __ret_221; \ }) #else #define vcmla_rot270_laneq_f16(__p0_222, __p1_222, __p2_222, __p3_222) __extension__ ({ \ float16x4_t __ret_222; \ float16x4_t __s0_222 = __p0_222; \ float16x4_t __s1_222 = __p1_222; \ float16x8_t __s2_222 = __p2_222; \ float16x4_t __rev0_222; __rev0_222 = __builtin_shufflevector(__s0_222, __s0_222, 3, 2, 1, 0); \ float16x4_t __rev1_222; __rev1_222 = __builtin_shufflevector(__s1_222, __s1_222, 3, 2, 1, 0); \ float16x8_t __rev2_222; __rev2_222 = __builtin_shufflevector(__s2_222, __s2_222, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __reint_222 = __rev2_222; \ uint32x2_t __reint1_222 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_222, __p3_222), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_222, __p3_222)}; \ __ret_222 = __noswap_vcmla_rot270_f16(__rev0_222, __rev1_222, *(float16x4_t *) &__reint1_222); \ __ret_222 = __builtin_shufflevector(__ret_222, __ret_222, 3, 2, 1, 0); \ __ret_222; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcmlaq_rot270_laneq_f16(__p0_223, __p1_223, __p2_223, __p3_223) __extension__ ({ \ float16x8_t __ret_223; \ float16x8_t __s0_223 = __p0_223; \ float16x8_t __s1_223 = __p1_223; \ float16x8_t __s2_223 = __p2_223; \ float16x8_t __reint_223 = __s2_223; \ uint32x4_t __reint1_223 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223)}; \ __ret_223 = vcmlaq_rot270_f16(__s0_223, __s1_223, *(float16x8_t *) &__reint1_223); \ __ret_223; \ }) #else #define vcmlaq_rot270_laneq_f16(__p0_224, __p1_224, __p2_224, __p3_224) __extension__ ({ \ float16x8_t __ret_224; \ float16x8_t __s0_224 = __p0_224; \ float16x8_t __s1_224 = __p1_224; \ float16x8_t __s2_224 = __p2_224; \ float16x8_t __rev0_224; __rev0_224 = __builtin_shufflevector(__s0_224, __s0_224, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __rev1_224; __rev1_224 = __builtin_shufflevector(__s1_224, __s1_224, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __rev2_224; __rev2_224 = __builtin_shufflevector(__s2_224, __s2_224, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __reint_224 = __rev2_224; \ uint32x4_t __reint1_224 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224)}; \ __ret_224 = __noswap_vcmlaq_rot270_f16(__rev0_224, __rev1_224, *(float16x8_t *) &__reint1_224); \ __ret_224 = __builtin_shufflevector(__ret_224, __ret_224, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_224; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); return __ret; } #else __ai float16x8_t vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai float16x8_t __noswap_vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); return __ret; } #else __ai float16x4_t vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai float16x4_t __noswap_vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vcmla_rot90_lane_f16(__p0_225, __p1_225, __p2_225, __p3_225) __extension__ ({ \ float16x4_t __ret_225; \ float16x4_t __s0_225 = __p0_225; \ float16x4_t __s1_225 = __p1_225; \ float16x4_t __s2_225 = __p2_225; \ float16x4_t __reint_225 = __s2_225; \ uint32x2_t __reint1_225 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_225, __p3_225), vget_lane_u32(*(uint32x2_t *) &__reint_225, __p3_225)}; \ __ret_225 = vcmla_rot90_f16(__s0_225, __s1_225, *(float16x4_t *) &__reint1_225); \ __ret_225; \ }) #else #define vcmla_rot90_lane_f16(__p0_226, __p1_226, __p2_226, __p3_226) __extension__ ({ \ float16x4_t __ret_226; \ float16x4_t __s0_226 = __p0_226; \ float16x4_t __s1_226 = __p1_226; \ float16x4_t __s2_226 = __p2_226; \ float16x4_t __rev0_226; __rev0_226 = __builtin_shufflevector(__s0_226, __s0_226, 3, 2, 1, 0); \ float16x4_t __rev1_226; __rev1_226 = __builtin_shufflevector(__s1_226, __s1_226, 3, 2, 1, 0); \ float16x4_t __rev2_226; __rev2_226 = __builtin_shufflevector(__s2_226, __s2_226, 3, 2, 1, 0); \ float16x4_t __reint_226 = __rev2_226; \ uint32x2_t __reint1_226 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_226, __p3_226), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_226, __p3_226)}; \ __ret_226 = __noswap_vcmla_rot90_f16(__rev0_226, __rev1_226, *(float16x4_t *) &__reint1_226); \ __ret_226 = __builtin_shufflevector(__ret_226, __ret_226, 3, 2, 1, 0); \ __ret_226; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcmlaq_rot90_lane_f16(__p0_227, __p1_227, __p2_227, __p3_227) __extension__ ({ \ float16x8_t __ret_227; \ float16x8_t __s0_227 = __p0_227; \ float16x8_t __s1_227 = __p1_227; \ float16x4_t __s2_227 = __p2_227; \ float16x4_t __reint_227 = __s2_227; \ uint32x4_t __reint1_227 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227)}; \ __ret_227 = vcmlaq_rot90_f16(__s0_227, __s1_227, *(float16x8_t *) &__reint1_227); \ __ret_227; \ }) #else #define vcmlaq_rot90_lane_f16(__p0_228, __p1_228, __p2_228, __p3_228) __extension__ ({ \ float16x8_t __ret_228; \ float16x8_t __s0_228 = __p0_228; \ float16x8_t __s1_228 = __p1_228; \ float16x4_t __s2_228 = __p2_228; \ float16x8_t __rev0_228; __rev0_228 = __builtin_shufflevector(__s0_228, __s0_228, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __rev1_228; __rev1_228 = __builtin_shufflevector(__s1_228, __s1_228, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x4_t __rev2_228; __rev2_228 = __builtin_shufflevector(__s2_228, __s2_228, 3, 2, 1, 0); \ float16x4_t __reint_228 = __rev2_228; \ uint32x4_t __reint1_228 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228)}; \ __ret_228 = __noswap_vcmlaq_rot90_f16(__rev0_228, __rev1_228, *(float16x8_t *) &__reint1_228); \ __ret_228 = __builtin_shufflevector(__ret_228, __ret_228, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_228; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcmla_rot90_laneq_f16(__p0_229, __p1_229, __p2_229, __p3_229) __extension__ ({ \ float16x4_t __ret_229; \ float16x4_t __s0_229 = __p0_229; \ float16x4_t __s1_229 = __p1_229; \ float16x8_t __s2_229 = __p2_229; \ float16x8_t __reint_229 = __s2_229; \ uint32x2_t __reint1_229 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_229, __p3_229), vgetq_lane_u32(*(uint32x4_t *) &__reint_229, __p3_229)}; \ __ret_229 = vcmla_rot90_f16(__s0_229, __s1_229, *(float16x4_t *) &__reint1_229); \ __ret_229; \ }) #else #define vcmla_rot90_laneq_f16(__p0_230, __p1_230, __p2_230, __p3_230) __extension__ ({ \ float16x4_t __ret_230; \ float16x4_t __s0_230 = __p0_230; \ float16x4_t __s1_230 = __p1_230; \ float16x8_t __s2_230 = __p2_230; \ float16x4_t __rev0_230; __rev0_230 = __builtin_shufflevector(__s0_230, __s0_230, 3, 2, 1, 0); \ float16x4_t __rev1_230; __rev1_230 = __builtin_shufflevector(__s1_230, __s1_230, 3, 2, 1, 0); \ float16x8_t __rev2_230; __rev2_230 = __builtin_shufflevector(__s2_230, __s2_230, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __reint_230 = __rev2_230; \ uint32x2_t __reint1_230 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_230, __p3_230), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_230, __p3_230)}; \ __ret_230 = __noswap_vcmla_rot90_f16(__rev0_230, __rev1_230, *(float16x4_t *) &__reint1_230); \ __ret_230 = __builtin_shufflevector(__ret_230, __ret_230, 3, 2, 1, 0); \ __ret_230; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcmlaq_rot90_laneq_f16(__p0_231, __p1_231, __p2_231, __p3_231) __extension__ ({ \ float16x8_t __ret_231; \ float16x8_t __s0_231 = __p0_231; \ float16x8_t __s1_231 = __p1_231; \ float16x8_t __s2_231 = __p2_231; \ float16x8_t __reint_231 = __s2_231; \ uint32x4_t __reint1_231 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231)}; \ __ret_231 = vcmlaq_rot90_f16(__s0_231, __s1_231, *(float16x8_t *) &__reint1_231); \ __ret_231; \ }) #else #define vcmlaq_rot90_laneq_f16(__p0_232, __p1_232, __p2_232, __p3_232) __extension__ ({ \ float16x8_t __ret_232; \ float16x8_t __s0_232 = __p0_232; \ float16x8_t __s1_232 = __p1_232; \ float16x8_t __s2_232 = __p2_232; \ float16x8_t __rev0_232; __rev0_232 = __builtin_shufflevector(__s0_232, __s0_232, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __rev1_232; __rev1_232 = __builtin_shufflevector(__s1_232, __s1_232, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __rev2_232; __rev2_232 = __builtin_shufflevector(__s2_232, __s2_232, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __reint_232 = __rev2_232; \ uint32x4_t __reint1_232 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232)}; \ __ret_232 = __noswap_vcmlaq_rot90_f16(__rev0_232, __rev1_232, *(float16x8_t *) &__reint1_232); \ __ret_232 = __builtin_shufflevector(__ret_232, __ret_232, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_232; \ }) #endif #endif #if defined(__ARM_FEATURE_COMPLEX) && defined(__aarch64__) #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #else __ai float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (float64x2_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #else __ai float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (float64x2_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); return __ret; } #else __ai float64x2_t vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = (float64x2_t) __builtin_neon_vcmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai float64x2_t __noswap_vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); return __ret; } #endif __ai float64x1_t vcmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); return __ret; } #define vcmla_lane_f64(__p0_233, __p1_233, __p2_233, __p3_233) __extension__ ({ \ float64x1_t __ret_233; \ float64x1_t __s0_233 = __p0_233; \ float64x1_t __s1_233 = __p1_233; \ float64x1_t __s2_233 = __p2_233; \ float64x1_t __reint_233 = __s2_233; \ uint64x2_t __reint1_233 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_233, __p3_233), vgetq_lane_u64(*(uint64x2_t *) &__reint_233, __p3_233)}; \ __ret_233 = vcmla_f64(__s0_233, __s1_233, *(float64x1_t *) &__reint1_233); \ __ret_233; \ }) #ifdef __LITTLE_ENDIAN__ #define vcmlaq_lane_f64(__p0_234, __p1_234, __p2_234, __p3_234) __extension__ ({ \ float64x2_t __ret_234; \ float64x2_t __s0_234 = __p0_234; \ float64x2_t __s1_234 = __p1_234; \ float64x1_t __s2_234 = __p2_234; \ float64x1_t __reint_234 = __s2_234; \ uint64x2_t __reint1_234 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_234, __p3_234), vgetq_lane_u64(*(uint64x2_t *) &__reint_234, __p3_234)}; \ __ret_234 = vcmlaq_f64(__s0_234, __s1_234, *(float64x2_t *) &__reint1_234); \ __ret_234; \ }) #else #define vcmlaq_lane_f64(__p0_235, __p1_235, __p2_235, __p3_235) __extension__ ({ \ float64x2_t __ret_235; \ float64x2_t __s0_235 = __p0_235; \ float64x2_t __s1_235 = __p1_235; \ float64x1_t __s2_235 = __p2_235; \ float64x2_t __rev0_235; __rev0_235 = __builtin_shufflevector(__s0_235, __s0_235, 1, 0); \ float64x2_t __rev1_235; __rev1_235 = __builtin_shufflevector(__s1_235, __s1_235, 1, 0); \ float64x1_t __reint_235 = __s2_235; \ uint64x2_t __reint1_235 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_235, __p3_235), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_235, __p3_235)}; \ __ret_235 = __noswap_vcmlaq_f64(__rev0_235, __rev1_235, *(float64x2_t *) &__reint1_235); \ __ret_235 = __builtin_shufflevector(__ret_235, __ret_235, 1, 0); \ __ret_235; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcmla_laneq_f64(__p0_236, __p1_236, __p2_236, __p3_236) __extension__ ({ \ float64x1_t __ret_236; \ float64x1_t __s0_236 = __p0_236; \ float64x1_t __s1_236 = __p1_236; \ float64x2_t __s2_236 = __p2_236; \ float64x2_t __reint_236 = __s2_236; \ uint64x2_t __reint1_236 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_236, __p3_236), vgetq_lane_u64(*(uint64x2_t *) &__reint_236, __p3_236)}; \ __ret_236 = vcmla_f64(__s0_236, __s1_236, *(float64x1_t *) &__reint1_236); \ __ret_236; \ }) #else #define vcmla_laneq_f64(__p0_237, __p1_237, __p2_237, __p3_237) __extension__ ({ \ float64x1_t __ret_237; \ float64x1_t __s0_237 = __p0_237; \ float64x1_t __s1_237 = __p1_237; \ float64x2_t __s2_237 = __p2_237; \ float64x2_t __rev2_237; __rev2_237 = __builtin_shufflevector(__s2_237, __s2_237, 1, 0); \ float64x2_t __reint_237 = __rev2_237; \ uint64x2_t __reint1_237 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_237, __p3_237), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_237, __p3_237)}; \ __ret_237 = vcmla_f64(__s0_237, __s1_237, *(float64x1_t *) &__reint1_237); \ __ret_237; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcmlaq_laneq_f64(__p0_238, __p1_238, __p2_238, __p3_238) __extension__ ({ \ float64x2_t __ret_238; \ float64x2_t __s0_238 = __p0_238; \ float64x2_t __s1_238 = __p1_238; \ float64x2_t __s2_238 = __p2_238; \ float64x2_t __reint_238 = __s2_238; \ uint64x2_t __reint1_238 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_238, __p3_238), vgetq_lane_u64(*(uint64x2_t *) &__reint_238, __p3_238)}; \ __ret_238 = vcmlaq_f64(__s0_238, __s1_238, *(float64x2_t *) &__reint1_238); \ __ret_238; \ }) #else #define vcmlaq_laneq_f64(__p0_239, __p1_239, __p2_239, __p3_239) __extension__ ({ \ float64x2_t __ret_239; \ float64x2_t __s0_239 = __p0_239; \ float64x2_t __s1_239 = __p1_239; \ float64x2_t __s2_239 = __p2_239; \ float64x2_t __rev0_239; __rev0_239 = __builtin_shufflevector(__s0_239, __s0_239, 1, 0); \ float64x2_t __rev1_239; __rev1_239 = __builtin_shufflevector(__s1_239, __s1_239, 1, 0); \ float64x2_t __rev2_239; __rev2_239 = __builtin_shufflevector(__s2_239, __s2_239, 1, 0); \ float64x2_t __reint_239 = __rev2_239; \ uint64x2_t __reint1_239 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_239, __p3_239), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_239, __p3_239)}; \ __ret_239 = __noswap_vcmlaq_f64(__rev0_239, __rev1_239, *(float64x2_t *) &__reint1_239); \ __ret_239 = __builtin_shufflevector(__ret_239, __ret_239, 1, 0); \ __ret_239; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); return __ret; } #else __ai float64x2_t vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai float64x2_t __noswap_vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); return __ret; } #endif __ai float64x1_t vcmla_rot180_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); return __ret; } #define vcmla_rot180_lane_f64(__p0_240, __p1_240, __p2_240, __p3_240) __extension__ ({ \ float64x1_t __ret_240; \ float64x1_t __s0_240 = __p0_240; \ float64x1_t __s1_240 = __p1_240; \ float64x1_t __s2_240 = __p2_240; \ float64x1_t __reint_240 = __s2_240; \ uint64x2_t __reint1_240 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_240, __p3_240), vgetq_lane_u64(*(uint64x2_t *) &__reint_240, __p3_240)}; \ __ret_240 = vcmla_rot180_f64(__s0_240, __s1_240, *(float64x1_t *) &__reint1_240); \ __ret_240; \ }) #ifdef __LITTLE_ENDIAN__ #define vcmlaq_rot180_lane_f64(__p0_241, __p1_241, __p2_241, __p3_241) __extension__ ({ \ float64x2_t __ret_241; \ float64x2_t __s0_241 = __p0_241; \ float64x2_t __s1_241 = __p1_241; \ float64x1_t __s2_241 = __p2_241; \ float64x1_t __reint_241 = __s2_241; \ uint64x2_t __reint1_241 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_241, __p3_241), vgetq_lane_u64(*(uint64x2_t *) &__reint_241, __p3_241)}; \ __ret_241 = vcmlaq_rot180_f64(__s0_241, __s1_241, *(float64x2_t *) &__reint1_241); \ __ret_241; \ }) #else #define vcmlaq_rot180_lane_f64(__p0_242, __p1_242, __p2_242, __p3_242) __extension__ ({ \ float64x2_t __ret_242; \ float64x2_t __s0_242 = __p0_242; \ float64x2_t __s1_242 = __p1_242; \ float64x1_t __s2_242 = __p2_242; \ float64x2_t __rev0_242; __rev0_242 = __builtin_shufflevector(__s0_242, __s0_242, 1, 0); \ float64x2_t __rev1_242; __rev1_242 = __builtin_shufflevector(__s1_242, __s1_242, 1, 0); \ float64x1_t __reint_242 = __s2_242; \ uint64x2_t __reint1_242 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_242, __p3_242), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_242, __p3_242)}; \ __ret_242 = __noswap_vcmlaq_rot180_f64(__rev0_242, __rev1_242, *(float64x2_t *) &__reint1_242); \ __ret_242 = __builtin_shufflevector(__ret_242, __ret_242, 1, 0); \ __ret_242; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcmla_rot180_laneq_f64(__p0_243, __p1_243, __p2_243, __p3_243) __extension__ ({ \ float64x1_t __ret_243; \ float64x1_t __s0_243 = __p0_243; \ float64x1_t __s1_243 = __p1_243; \ float64x2_t __s2_243 = __p2_243; \ float64x2_t __reint_243 = __s2_243; \ uint64x2_t __reint1_243 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_243, __p3_243), vgetq_lane_u64(*(uint64x2_t *) &__reint_243, __p3_243)}; \ __ret_243 = vcmla_rot180_f64(__s0_243, __s1_243, *(float64x1_t *) &__reint1_243); \ __ret_243; \ }) #else #define vcmla_rot180_laneq_f64(__p0_244, __p1_244, __p2_244, __p3_244) __extension__ ({ \ float64x1_t __ret_244; \ float64x1_t __s0_244 = __p0_244; \ float64x1_t __s1_244 = __p1_244; \ float64x2_t __s2_244 = __p2_244; \ float64x2_t __rev2_244; __rev2_244 = __builtin_shufflevector(__s2_244, __s2_244, 1, 0); \ float64x2_t __reint_244 = __rev2_244; \ uint64x2_t __reint1_244 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_244, __p3_244), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_244, __p3_244)}; \ __ret_244 = vcmla_rot180_f64(__s0_244, __s1_244, *(float64x1_t *) &__reint1_244); \ __ret_244; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcmlaq_rot180_laneq_f64(__p0_245, __p1_245, __p2_245, __p3_245) __extension__ ({ \ float64x2_t __ret_245; \ float64x2_t __s0_245 = __p0_245; \ float64x2_t __s1_245 = __p1_245; \ float64x2_t __s2_245 = __p2_245; \ float64x2_t __reint_245 = __s2_245; \ uint64x2_t __reint1_245 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_245, __p3_245), vgetq_lane_u64(*(uint64x2_t *) &__reint_245, __p3_245)}; \ __ret_245 = vcmlaq_rot180_f64(__s0_245, __s1_245, *(float64x2_t *) &__reint1_245); \ __ret_245; \ }) #else #define vcmlaq_rot180_laneq_f64(__p0_246, __p1_246, __p2_246, __p3_246) __extension__ ({ \ float64x2_t __ret_246; \ float64x2_t __s0_246 = __p0_246; \ float64x2_t __s1_246 = __p1_246; \ float64x2_t __s2_246 = __p2_246; \ float64x2_t __rev0_246; __rev0_246 = __builtin_shufflevector(__s0_246, __s0_246, 1, 0); \ float64x2_t __rev1_246; __rev1_246 = __builtin_shufflevector(__s1_246, __s1_246, 1, 0); \ float64x2_t __rev2_246; __rev2_246 = __builtin_shufflevector(__s2_246, __s2_246, 1, 0); \ float64x2_t __reint_246 = __rev2_246; \ uint64x2_t __reint1_246 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_246, __p3_246), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_246, __p3_246)}; \ __ret_246 = __noswap_vcmlaq_rot180_f64(__rev0_246, __rev1_246, *(float64x2_t *) &__reint1_246); \ __ret_246 = __builtin_shufflevector(__ret_246, __ret_246, 1, 0); \ __ret_246; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); return __ret; } #else __ai float64x2_t vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai float64x2_t __noswap_vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); return __ret; } #endif __ai float64x1_t vcmla_rot270_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); return __ret; } #define vcmla_rot270_lane_f64(__p0_247, __p1_247, __p2_247, __p3_247) __extension__ ({ \ float64x1_t __ret_247; \ float64x1_t __s0_247 = __p0_247; \ float64x1_t __s1_247 = __p1_247; \ float64x1_t __s2_247 = __p2_247; \ float64x1_t __reint_247 = __s2_247; \ uint64x2_t __reint1_247 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_247, __p3_247), vgetq_lane_u64(*(uint64x2_t *) &__reint_247, __p3_247)}; \ __ret_247 = vcmla_rot270_f64(__s0_247, __s1_247, *(float64x1_t *) &__reint1_247); \ __ret_247; \ }) #ifdef __LITTLE_ENDIAN__ #define vcmlaq_rot270_lane_f64(__p0_248, __p1_248, __p2_248, __p3_248) __extension__ ({ \ float64x2_t __ret_248; \ float64x2_t __s0_248 = __p0_248; \ float64x2_t __s1_248 = __p1_248; \ float64x1_t __s2_248 = __p2_248; \ float64x1_t __reint_248 = __s2_248; \ uint64x2_t __reint1_248 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_248, __p3_248), vgetq_lane_u64(*(uint64x2_t *) &__reint_248, __p3_248)}; \ __ret_248 = vcmlaq_rot270_f64(__s0_248, __s1_248, *(float64x2_t *) &__reint1_248); \ __ret_248; \ }) #else #define vcmlaq_rot270_lane_f64(__p0_249, __p1_249, __p2_249, __p3_249) __extension__ ({ \ float64x2_t __ret_249; \ float64x2_t __s0_249 = __p0_249; \ float64x2_t __s1_249 = __p1_249; \ float64x1_t __s2_249 = __p2_249; \ float64x2_t __rev0_249; __rev0_249 = __builtin_shufflevector(__s0_249, __s0_249, 1, 0); \ float64x2_t __rev1_249; __rev1_249 = __builtin_shufflevector(__s1_249, __s1_249, 1, 0); \ float64x1_t __reint_249 = __s2_249; \ uint64x2_t __reint1_249 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_249, __p3_249), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_249, __p3_249)}; \ __ret_249 = __noswap_vcmlaq_rot270_f64(__rev0_249, __rev1_249, *(float64x2_t *) &__reint1_249); \ __ret_249 = __builtin_shufflevector(__ret_249, __ret_249, 1, 0); \ __ret_249; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcmla_rot270_laneq_f64(__p0_250, __p1_250, __p2_250, __p3_250) __extension__ ({ \ float64x1_t __ret_250; \ float64x1_t __s0_250 = __p0_250; \ float64x1_t __s1_250 = __p1_250; \ float64x2_t __s2_250 = __p2_250; \ float64x2_t __reint_250 = __s2_250; \ uint64x2_t __reint1_250 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_250, __p3_250), vgetq_lane_u64(*(uint64x2_t *) &__reint_250, __p3_250)}; \ __ret_250 = vcmla_rot270_f64(__s0_250, __s1_250, *(float64x1_t *) &__reint1_250); \ __ret_250; \ }) #else #define vcmla_rot270_laneq_f64(__p0_251, __p1_251, __p2_251, __p3_251) __extension__ ({ \ float64x1_t __ret_251; \ float64x1_t __s0_251 = __p0_251; \ float64x1_t __s1_251 = __p1_251; \ float64x2_t __s2_251 = __p2_251; \ float64x2_t __rev2_251; __rev2_251 = __builtin_shufflevector(__s2_251, __s2_251, 1, 0); \ float64x2_t __reint_251 = __rev2_251; \ uint64x2_t __reint1_251 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_251, __p3_251), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_251, __p3_251)}; \ __ret_251 = vcmla_rot270_f64(__s0_251, __s1_251, *(float64x1_t *) &__reint1_251); \ __ret_251; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcmlaq_rot270_laneq_f64(__p0_252, __p1_252, __p2_252, __p3_252) __extension__ ({ \ float64x2_t __ret_252; \ float64x2_t __s0_252 = __p0_252; \ float64x2_t __s1_252 = __p1_252; \ float64x2_t __s2_252 = __p2_252; \ float64x2_t __reint_252 = __s2_252; \ uint64x2_t __reint1_252 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_252, __p3_252), vgetq_lane_u64(*(uint64x2_t *) &__reint_252, __p3_252)}; \ __ret_252 = vcmlaq_rot270_f64(__s0_252, __s1_252, *(float64x2_t *) &__reint1_252); \ __ret_252; \ }) #else #define vcmlaq_rot270_laneq_f64(__p0_253, __p1_253, __p2_253, __p3_253) __extension__ ({ \ float64x2_t __ret_253; \ float64x2_t __s0_253 = __p0_253; \ float64x2_t __s1_253 = __p1_253; \ float64x2_t __s2_253 = __p2_253; \ float64x2_t __rev0_253; __rev0_253 = __builtin_shufflevector(__s0_253, __s0_253, 1, 0); \ float64x2_t __rev1_253; __rev1_253 = __builtin_shufflevector(__s1_253, __s1_253, 1, 0); \ float64x2_t __rev2_253; __rev2_253 = __builtin_shufflevector(__s2_253, __s2_253, 1, 0); \ float64x2_t __reint_253 = __rev2_253; \ uint64x2_t __reint1_253 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_253, __p3_253), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_253, __p3_253)}; \ __ret_253 = __noswap_vcmlaq_rot270_f64(__rev0_253, __rev1_253, *(float64x2_t *) &__reint1_253); \ __ret_253 = __builtin_shufflevector(__ret_253, __ret_253, 1, 0); \ __ret_253; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); return __ret; } #else __ai float64x2_t vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai float64x2_t __noswap_vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); return __ret; } #endif __ai float64x1_t vcmla_rot90_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); return __ret; } #define vcmla_rot90_lane_f64(__p0_254, __p1_254, __p2_254, __p3_254) __extension__ ({ \ float64x1_t __ret_254; \ float64x1_t __s0_254 = __p0_254; \ float64x1_t __s1_254 = __p1_254; \ float64x1_t __s2_254 = __p2_254; \ float64x1_t __reint_254 = __s2_254; \ uint64x2_t __reint1_254 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_254, __p3_254), vgetq_lane_u64(*(uint64x2_t *) &__reint_254, __p3_254)}; \ __ret_254 = vcmla_rot90_f64(__s0_254, __s1_254, *(float64x1_t *) &__reint1_254); \ __ret_254; \ }) #ifdef __LITTLE_ENDIAN__ #define vcmlaq_rot90_lane_f64(__p0_255, __p1_255, __p2_255, __p3_255) __extension__ ({ \ float64x2_t __ret_255; \ float64x2_t __s0_255 = __p0_255; \ float64x2_t __s1_255 = __p1_255; \ float64x1_t __s2_255 = __p2_255; \ float64x1_t __reint_255 = __s2_255; \ uint64x2_t __reint1_255 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_255, __p3_255), vgetq_lane_u64(*(uint64x2_t *) &__reint_255, __p3_255)}; \ __ret_255 = vcmlaq_rot90_f64(__s0_255, __s1_255, *(float64x2_t *) &__reint1_255); \ __ret_255; \ }) #else #define vcmlaq_rot90_lane_f64(__p0_256, __p1_256, __p2_256, __p3_256) __extension__ ({ \ float64x2_t __ret_256; \ float64x2_t __s0_256 = __p0_256; \ float64x2_t __s1_256 = __p1_256; \ float64x1_t __s2_256 = __p2_256; \ float64x2_t __rev0_256; __rev0_256 = __builtin_shufflevector(__s0_256, __s0_256, 1, 0); \ float64x2_t __rev1_256; __rev1_256 = __builtin_shufflevector(__s1_256, __s1_256, 1, 0); \ float64x1_t __reint_256 = __s2_256; \ uint64x2_t __reint1_256 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_256, __p3_256), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_256, __p3_256)}; \ __ret_256 = __noswap_vcmlaq_rot90_f64(__rev0_256, __rev1_256, *(float64x2_t *) &__reint1_256); \ __ret_256 = __builtin_shufflevector(__ret_256, __ret_256, 1, 0); \ __ret_256; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcmla_rot90_laneq_f64(__p0_257, __p1_257, __p2_257, __p3_257) __extension__ ({ \ float64x1_t __ret_257; \ float64x1_t __s0_257 = __p0_257; \ float64x1_t __s1_257 = __p1_257; \ float64x2_t __s2_257 = __p2_257; \ float64x2_t __reint_257 = __s2_257; \ uint64x2_t __reint1_257 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_257, __p3_257), vgetq_lane_u64(*(uint64x2_t *) &__reint_257, __p3_257)}; \ __ret_257 = vcmla_rot90_f64(__s0_257, __s1_257, *(float64x1_t *) &__reint1_257); \ __ret_257; \ }) #else #define vcmla_rot90_laneq_f64(__p0_258, __p1_258, __p2_258, __p3_258) __extension__ ({ \ float64x1_t __ret_258; \ float64x1_t __s0_258 = __p0_258; \ float64x1_t __s1_258 = __p1_258; \ float64x2_t __s2_258 = __p2_258; \ float64x2_t __rev2_258; __rev2_258 = __builtin_shufflevector(__s2_258, __s2_258, 1, 0); \ float64x2_t __reint_258 = __rev2_258; \ uint64x2_t __reint1_258 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_258, __p3_258), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_258, __p3_258)}; \ __ret_258 = vcmla_rot90_f64(__s0_258, __s1_258, *(float64x1_t *) &__reint1_258); \ __ret_258; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcmlaq_rot90_laneq_f64(__p0_259, __p1_259, __p2_259, __p3_259) __extension__ ({ \ float64x2_t __ret_259; \ float64x2_t __s0_259 = __p0_259; \ float64x2_t __s1_259 = __p1_259; \ float64x2_t __s2_259 = __p2_259; \ float64x2_t __reint_259 = __s2_259; \ uint64x2_t __reint1_259 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_259, __p3_259), vgetq_lane_u64(*(uint64x2_t *) &__reint_259, __p3_259)}; \ __ret_259 = vcmlaq_rot90_f64(__s0_259, __s1_259, *(float64x2_t *) &__reint1_259); \ __ret_259; \ }) #else #define vcmlaq_rot90_laneq_f64(__p0_260, __p1_260, __p2_260, __p3_260) __extension__ ({ \ float64x2_t __ret_260; \ float64x2_t __s0_260 = __p0_260; \ float64x2_t __s1_260 = __p1_260; \ float64x2_t __s2_260 = __p2_260; \ float64x2_t __rev0_260; __rev0_260 = __builtin_shufflevector(__s0_260, __s0_260, 1, 0); \ float64x2_t __rev1_260; __rev1_260 = __builtin_shufflevector(__s1_260, __s1_260, 1, 0); \ float64x2_t __rev2_260; __rev2_260 = __builtin_shufflevector(__s2_260, __s2_260, 1, 0); \ float64x2_t __reint_260 = __rev2_260; \ uint64x2_t __reint1_260 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_260, __p3_260), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_260, __p3_260)}; \ __ret_260 = __noswap_vcmlaq_rot90_f64(__rev0_260, __rev1_260, *(float64x2_t *) &__reint1_260); \ __ret_260 = __builtin_shufflevector(__ret_260, __ret_260, 1, 0); \ __ret_260; \ }) #endif #endif #if defined(__ARM_FEATURE_DOTPROD) #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #else __ai uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai uint32x4_t __noswap_vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); return __ret; } #else __ai int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int32x4_t __noswap_vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18); return __ret; } #else __ai uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai uint32x2_t __noswap_vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); return __ret; } #else __ai int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai int32x2_t __noswap_vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vdotq_lane_u32(__p0_261, __p1_261, __p2_261, __p3_261) __extension__ ({ \ uint32x4_t __ret_261; \ uint32x4_t __s0_261 = __p0_261; \ uint8x16_t __s1_261 = __p1_261; \ uint8x8_t __s2_261 = __p2_261; \ uint8x8_t __reint_261 = __s2_261; \ uint32x4_t __reint1_261 = splatq_lane_u32(*(uint32x2_t *) &__reint_261, __p3_261); \ __ret_261 = vdotq_u32(__s0_261, __s1_261, *(uint8x16_t *) &__reint1_261); \ __ret_261; \ }) #else #define vdotq_lane_u32(__p0_262, __p1_262, __p2_262, __p3_262) __extension__ ({ \ uint32x4_t __ret_262; \ uint32x4_t __s0_262 = __p0_262; \ uint8x16_t __s1_262 = __p1_262; \ uint8x8_t __s2_262 = __p2_262; \ uint32x4_t __rev0_262; __rev0_262 = __builtin_shufflevector(__s0_262, __s0_262, 3, 2, 1, 0); \ uint8x16_t __rev1_262; __rev1_262 = __builtin_shufflevector(__s1_262, __s1_262, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ uint8x8_t __rev2_262; __rev2_262 = __builtin_shufflevector(__s2_262, __s2_262, 7, 6, 5, 4, 3, 2, 1, 0); \ uint8x8_t __reint_262 = __rev2_262; \ uint32x4_t __reint1_262 = __noswap_splatq_lane_u32(*(uint32x2_t *) &__reint_262, __p3_262); \ __ret_262 = __noswap_vdotq_u32(__rev0_262, __rev1_262, *(uint8x16_t *) &__reint1_262); \ __ret_262 = __builtin_shufflevector(__ret_262, __ret_262, 3, 2, 1, 0); \ __ret_262; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdotq_lane_s32(__p0_263, __p1_263, __p2_263, __p3_263) __extension__ ({ \ int32x4_t __ret_263; \ int32x4_t __s0_263 = __p0_263; \ int8x16_t __s1_263 = __p1_263; \ int8x8_t __s2_263 = __p2_263; \ int8x8_t __reint_263 = __s2_263; \ int32x4_t __reint1_263 = splatq_lane_s32(*(int32x2_t *) &__reint_263, __p3_263); \ __ret_263 = vdotq_s32(__s0_263, __s1_263, *(int8x16_t *) &__reint1_263); \ __ret_263; \ }) #else #define vdotq_lane_s32(__p0_264, __p1_264, __p2_264, __p3_264) __extension__ ({ \ int32x4_t __ret_264; \ int32x4_t __s0_264 = __p0_264; \ int8x16_t __s1_264 = __p1_264; \ int8x8_t __s2_264 = __p2_264; \ int32x4_t __rev0_264; __rev0_264 = __builtin_shufflevector(__s0_264, __s0_264, 3, 2, 1, 0); \ int8x16_t __rev1_264; __rev1_264 = __builtin_shufflevector(__s1_264, __s1_264, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x8_t __rev2_264; __rev2_264 = __builtin_shufflevector(__s2_264, __s2_264, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x8_t __reint_264 = __rev2_264; \ int32x4_t __reint1_264 = __noswap_splatq_lane_s32(*(int32x2_t *) &__reint_264, __p3_264); \ __ret_264 = __noswap_vdotq_s32(__rev0_264, __rev1_264, *(int8x16_t *) &__reint1_264); \ __ret_264 = __builtin_shufflevector(__ret_264, __ret_264, 3, 2, 1, 0); \ __ret_264; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdot_lane_u32(__p0_265, __p1_265, __p2_265, __p3_265) __extension__ ({ \ uint32x2_t __ret_265; \ uint32x2_t __s0_265 = __p0_265; \ uint8x8_t __s1_265 = __p1_265; \ uint8x8_t __s2_265 = __p2_265; \ uint8x8_t __reint_265 = __s2_265; \ uint32x2_t __reint1_265 = splat_lane_u32(*(uint32x2_t *) &__reint_265, __p3_265); \ __ret_265 = vdot_u32(__s0_265, __s1_265, *(uint8x8_t *) &__reint1_265); \ __ret_265; \ }) #else #define vdot_lane_u32(__p0_266, __p1_266, __p2_266, __p3_266) __extension__ ({ \ uint32x2_t __ret_266; \ uint32x2_t __s0_266 = __p0_266; \ uint8x8_t __s1_266 = __p1_266; \ uint8x8_t __s2_266 = __p2_266; \ uint32x2_t __rev0_266; __rev0_266 = __builtin_shufflevector(__s0_266, __s0_266, 1, 0); \ uint8x8_t __rev1_266; __rev1_266 = __builtin_shufflevector(__s1_266, __s1_266, 7, 6, 5, 4, 3, 2, 1, 0); \ uint8x8_t __rev2_266; __rev2_266 = __builtin_shufflevector(__s2_266, __s2_266, 7, 6, 5, 4, 3, 2, 1, 0); \ uint8x8_t __reint_266 = __rev2_266; \ uint32x2_t __reint1_266 = __noswap_splat_lane_u32(*(uint32x2_t *) &__reint_266, __p3_266); \ __ret_266 = __noswap_vdot_u32(__rev0_266, __rev1_266, *(uint8x8_t *) &__reint1_266); \ __ret_266 = __builtin_shufflevector(__ret_266, __ret_266, 1, 0); \ __ret_266; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdot_lane_s32(__p0_267, __p1_267, __p2_267, __p3_267) __extension__ ({ \ int32x2_t __ret_267; \ int32x2_t __s0_267 = __p0_267; \ int8x8_t __s1_267 = __p1_267; \ int8x8_t __s2_267 = __p2_267; \ int8x8_t __reint_267 = __s2_267; \ int32x2_t __reint1_267 = splat_lane_s32(*(int32x2_t *) &__reint_267, __p3_267); \ __ret_267 = vdot_s32(__s0_267, __s1_267, *(int8x8_t *) &__reint1_267); \ __ret_267; \ }) #else #define vdot_lane_s32(__p0_268, __p1_268, __p2_268, __p3_268) __extension__ ({ \ int32x2_t __ret_268; \ int32x2_t __s0_268 = __p0_268; \ int8x8_t __s1_268 = __p1_268; \ int8x8_t __s2_268 = __p2_268; \ int32x2_t __rev0_268; __rev0_268 = __builtin_shufflevector(__s0_268, __s0_268, 1, 0); \ int8x8_t __rev1_268; __rev1_268 = __builtin_shufflevector(__s1_268, __s1_268, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x8_t __rev2_268; __rev2_268 = __builtin_shufflevector(__s2_268, __s2_268, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x8_t __reint_268 = __rev2_268; \ int32x2_t __reint1_268 = __noswap_splat_lane_s32(*(int32x2_t *) &__reint_268, __p3_268); \ __ret_268 = __noswap_vdot_s32(__rev0_268, __rev1_268, *(int8x8_t *) &__reint1_268); \ __ret_268 = __builtin_shufflevector(__ret_268, __ret_268, 1, 0); \ __ret_268; \ }) #endif #endif #if defined(__ARM_FEATURE_DOTPROD) && defined(__aarch64__) #ifdef __LITTLE_ENDIAN__ #define vdotq_laneq_u32(__p0_269, __p1_269, __p2_269, __p3_269) __extension__ ({ \ uint32x4_t __ret_269; \ uint32x4_t __s0_269 = __p0_269; \ uint8x16_t __s1_269 = __p1_269; \ uint8x16_t __s2_269 = __p2_269; \ uint8x16_t __reint_269 = __s2_269; \ uint32x4_t __reint1_269 = splatq_laneq_u32(*(uint32x4_t *) &__reint_269, __p3_269); \ __ret_269 = vdotq_u32(__s0_269, __s1_269, *(uint8x16_t *) &__reint1_269); \ __ret_269; \ }) #else #define vdotq_laneq_u32(__p0_270, __p1_270, __p2_270, __p3_270) __extension__ ({ \ uint32x4_t __ret_270; \ uint32x4_t __s0_270 = __p0_270; \ uint8x16_t __s1_270 = __p1_270; \ uint8x16_t __s2_270 = __p2_270; \ uint32x4_t __rev0_270; __rev0_270 = __builtin_shufflevector(__s0_270, __s0_270, 3, 2, 1, 0); \ uint8x16_t __rev1_270; __rev1_270 = __builtin_shufflevector(__s1_270, __s1_270, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ uint8x16_t __rev2_270; __rev2_270 = __builtin_shufflevector(__s2_270, __s2_270, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ uint8x16_t __reint_270 = __rev2_270; \ uint32x4_t __reint1_270 = __noswap_splatq_laneq_u32(*(uint32x4_t *) &__reint_270, __p3_270); \ __ret_270 = __noswap_vdotq_u32(__rev0_270, __rev1_270, *(uint8x16_t *) &__reint1_270); \ __ret_270 = __builtin_shufflevector(__ret_270, __ret_270, 3, 2, 1, 0); \ __ret_270; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdotq_laneq_s32(__p0_271, __p1_271, __p2_271, __p3_271) __extension__ ({ \ int32x4_t __ret_271; \ int32x4_t __s0_271 = __p0_271; \ int8x16_t __s1_271 = __p1_271; \ int8x16_t __s2_271 = __p2_271; \ int8x16_t __reint_271 = __s2_271; \ int32x4_t __reint1_271 = splatq_laneq_s32(*(int32x4_t *) &__reint_271, __p3_271); \ __ret_271 = vdotq_s32(__s0_271, __s1_271, *(int8x16_t *) &__reint1_271); \ __ret_271; \ }) #else #define vdotq_laneq_s32(__p0_272, __p1_272, __p2_272, __p3_272) __extension__ ({ \ int32x4_t __ret_272; \ int32x4_t __s0_272 = __p0_272; \ int8x16_t __s1_272 = __p1_272; \ int8x16_t __s2_272 = __p2_272; \ int32x4_t __rev0_272; __rev0_272 = __builtin_shufflevector(__s0_272, __s0_272, 3, 2, 1, 0); \ int8x16_t __rev1_272; __rev1_272 = __builtin_shufflevector(__s1_272, __s1_272, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x16_t __rev2_272; __rev2_272 = __builtin_shufflevector(__s2_272, __s2_272, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x16_t __reint_272 = __rev2_272; \ int32x4_t __reint1_272 = __noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_272, __p3_272); \ __ret_272 = __noswap_vdotq_s32(__rev0_272, __rev1_272, *(int8x16_t *) &__reint1_272); \ __ret_272 = __builtin_shufflevector(__ret_272, __ret_272, 3, 2, 1, 0); \ __ret_272; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdot_laneq_u32(__p0_273, __p1_273, __p2_273, __p3_273) __extension__ ({ \ uint32x2_t __ret_273; \ uint32x2_t __s0_273 = __p0_273; \ uint8x8_t __s1_273 = __p1_273; \ uint8x16_t __s2_273 = __p2_273; \ uint8x16_t __reint_273 = __s2_273; \ uint32x2_t __reint1_273 = splat_laneq_u32(*(uint32x4_t *) &__reint_273, __p3_273); \ __ret_273 = vdot_u32(__s0_273, __s1_273, *(uint8x8_t *) &__reint1_273); \ __ret_273; \ }) #else #define vdot_laneq_u32(__p0_274, __p1_274, __p2_274, __p3_274) __extension__ ({ \ uint32x2_t __ret_274; \ uint32x2_t __s0_274 = __p0_274; \ uint8x8_t __s1_274 = __p1_274; \ uint8x16_t __s2_274 = __p2_274; \ uint32x2_t __rev0_274; __rev0_274 = __builtin_shufflevector(__s0_274, __s0_274, 1, 0); \ uint8x8_t __rev1_274; __rev1_274 = __builtin_shufflevector(__s1_274, __s1_274, 7, 6, 5, 4, 3, 2, 1, 0); \ uint8x16_t __rev2_274; __rev2_274 = __builtin_shufflevector(__s2_274, __s2_274, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ uint8x16_t __reint_274 = __rev2_274; \ uint32x2_t __reint1_274 = __noswap_splat_laneq_u32(*(uint32x4_t *) &__reint_274, __p3_274); \ __ret_274 = __noswap_vdot_u32(__rev0_274, __rev1_274, *(uint8x8_t *) &__reint1_274); \ __ret_274 = __builtin_shufflevector(__ret_274, __ret_274, 1, 0); \ __ret_274; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdot_laneq_s32(__p0_275, __p1_275, __p2_275, __p3_275) __extension__ ({ \ int32x2_t __ret_275; \ int32x2_t __s0_275 = __p0_275; \ int8x8_t __s1_275 = __p1_275; \ int8x16_t __s2_275 = __p2_275; \ int8x16_t __reint_275 = __s2_275; \ int32x2_t __reint1_275 = splat_laneq_s32(*(int32x4_t *) &__reint_275, __p3_275); \ __ret_275 = vdot_s32(__s0_275, __s1_275, *(int8x8_t *) &__reint1_275); \ __ret_275; \ }) #else #define vdot_laneq_s32(__p0_276, __p1_276, __p2_276, __p3_276) __extension__ ({ \ int32x2_t __ret_276; \ int32x2_t __s0_276 = __p0_276; \ int8x8_t __s1_276 = __p1_276; \ int8x16_t __s2_276 = __p2_276; \ int32x2_t __rev0_276; __rev0_276 = __builtin_shufflevector(__s0_276, __s0_276, 1, 0); \ int8x8_t __rev1_276; __rev1_276 = __builtin_shufflevector(__s1_276, __s1_276, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x16_t __rev2_276; __rev2_276 = __builtin_shufflevector(__s2_276, __s2_276, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x16_t __reint_276 = __rev2_276; \ int32x2_t __reint1_276 = __noswap_splat_laneq_s32(*(int32x4_t *) &__reint_276, __p3_276); \ __ret_276 = __noswap_vdot_s32(__rev0_276, __rev1_276, *(int8x8_t *) &__reint1_276); \ __ret_276 = __builtin_shufflevector(__ret_276, __ret_276, 1, 0); \ __ret_276; \ }) #endif #endif #if defined(__ARM_FEATURE_FMA) #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #else __ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai float32x4_t __noswap_vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #else __ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai float32x2_t __noswap_vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { float32x4_t __ret; __ret = vfmaq_f32(__p0, __p1, (float32x4_t) {__p2, __p2, __p2, __p2}); return __ret; } #else __ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __noswap_vfmaq_f32(__rev0, __rev1, (float32x4_t) {__p2, __p2, __p2, __p2}); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { float32x2_t __ret; __ret = vfma_f32(__p0, __p1, (float32x2_t) {__p2, __p2}); return __ret; } #else __ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __noswap_vfma_f32(__rev0, __rev1, (float32x2_t) {__p2, __p2}); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; __ret = vfmaq_f32(__p0, -__p1, __p2); return __ret; } #else __ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __noswap_vfmaq_f32(__rev0, -__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; __ret = vfma_f32(__p0, -__p1, __p2); return __ret; } #else __ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = __noswap_vfma_f32(__rev0, -__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #endif #if defined(__ARM_FEATURE_FP16_FML) && defined(__aarch64__) #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #else __ai float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai float32x4_t __noswap_vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #else __ai float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai float32x2_t __noswap_vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #else __ai float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai float32x4_t __noswap_vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #else __ai float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai float32x2_t __noswap_vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #else __ai float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai float32x4_t __noswap_vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #else __ai float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai float32x2_t __noswap_vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #else __ai float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai float32x4_t __noswap_vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #else __ai float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai float32x2_t __noswap_vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #endif #endif #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else __ai float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else __ai float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vabsq_f16(float16x8_t __p0) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 40); return __ret; } #else __ai float16x8_t vabsq_f16(float16x8_t __p0) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vabs_f16(float16x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 8); return __ret; } #else __ai float16x4_t vabs_f16(float16x4_t __p0) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; __ret = __p0 + __p1; return __ret; } #else __ai float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 + __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; __ret = __p0 + __p1; return __ret; } #else __ai float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 + __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); return __ret; } #else __ai float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { float16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); return __ret; } #else __ai float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { float16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else __ai uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) { uint16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else __ai uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) { uint16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else __ai uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) { uint16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else __ai uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) { uint16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else __ai uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) { uint16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else __ai uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) { uint16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else __ai uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) { uint16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else __ai uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) { uint16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0 == __p1); return __ret; } #else __ai uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) { uint16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t)(__rev0 == __rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0 == __p1); return __ret; } #else __ai uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) { uint16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t)(__rev0 == __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vceqzq_f16(float16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49); return __ret; } #else __ai uint16x8_t vceqzq_f16(float16x8_t __p0) { uint16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vceqz_f16(float16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17); return __ret; } #else __ai uint16x4_t vceqz_f16(float16x4_t __p0) { uint16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0 >= __p1); return __ret; } #else __ai uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) { uint16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t)(__rev0 >= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0 >= __p1); return __ret; } #else __ai uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) { uint16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t)(__rev0 >= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vcgezq_f16(float16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49); return __ret; } #else __ai uint16x8_t vcgezq_f16(float16x8_t __p0) { uint16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vcgez_f16(float16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17); return __ret; } #else __ai uint16x4_t vcgez_f16(float16x4_t __p0) { uint16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0 > __p1); return __ret; } #else __ai uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) { uint16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t)(__rev0 > __rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0 > __p1); return __ret; } #else __ai uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) { uint16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t)(__rev0 > __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vcgtzq_f16(float16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49); return __ret; } #else __ai uint16x8_t vcgtzq_f16(float16x8_t __p0) { uint16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vcgtz_f16(float16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17); return __ret; } #else __ai uint16x4_t vcgtz_f16(float16x4_t __p0) { uint16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0 <= __p1); return __ret; } #else __ai uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) { uint16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t)(__rev0 <= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0 <= __p1); return __ret; } #else __ai uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) { uint16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t)(__rev0 <= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vclezq_f16(float16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49); return __ret; } #else __ai uint16x8_t vclezq_f16(float16x8_t __p0) { uint16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vclez_f16(float16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17); return __ret; } #else __ai uint16x4_t vclez_f16(float16x4_t __p0) { uint16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0 < __p1); return __ret; } #else __ai uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) { uint16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t)(__rev0 < __rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0 < __p1); return __ret; } #else __ai uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) { uint16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t)(__rev0 < __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vcltzq_f16(float16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49); return __ret; } #else __ai uint16x8_t vcltzq_f16(float16x8_t __p0) { uint16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vcltz_f16(float16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17); return __ret; } #else __ai uint16x4_t vcltz_f16(float16x4_t __p0) { uint16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vcvtq_f16_u16(uint16x8_t __p0) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__p0, 49); return __ret; } #else __ai float16x8_t vcvtq_f16_u16(uint16x8_t __p0) { float16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__rev0, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vcvtq_f16_s16(int16x8_t __p0) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__p0, 33); return __ret; } #else __ai float16x8_t vcvtq_f16_s16(int16x8_t __p0) { float16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__rev0, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vcvt_f16_u16(uint16x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__p0, 17); return __ret; } #else __ai float16x4_t vcvt_f16_u16(uint16x4_t __p0) { float16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vcvt_f16_s16(int16x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__p0, 1); return __ret; } #else __ai float16x4_t vcvt_f16_s16(int16x4_t __p0) { float16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__rev0, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \ float16x8_t __ret; \ uint16x8_t __s0 = __p0; \ __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__s0, __p1, 49); \ __ret; \ }) #else #define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \ float16x8_t __ret; \ uint16x8_t __s0 = __p0; \ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__rev0, __p1, 49); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \ float16x8_t __ret; \ int16x8_t __s0 = __p0; \ __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__s0, __p1, 33); \ __ret; \ }) #else #define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \ float16x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__rev0, __p1, 33); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \ float16x4_t __ret; \ uint16x4_t __s0 = __p0; \ __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__s0, __p1, 17); \ __ret; \ }) #else #define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \ float16x4_t __ret; \ uint16x4_t __s0 = __p0; \ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__rev0, __p1, 17); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \ float16x4_t __ret; \ int16x4_t __s0 = __p0; \ __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__s0, __p1, 1); \ __ret; \ }) #else #define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \ float16x4_t __ret; \ int16x4_t __s0 = __p0; \ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__rev0, __p1, 1); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \ int16x8_t __ret; \ float16x8_t __s0 = __p0; \ __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_v((int8x16_t)__s0, __p1, 33); \ __ret; \ }) #else #define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \ int16x8_t __ret; \ float16x8_t __s0 = __p0; \ float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_v((int8x16_t)__rev0, __p1, 33); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ float16x4_t __s0 = __p0; \ __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_v((int8x8_t)__s0, __p1, 1); \ __ret; \ }) #else #define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ float16x4_t __s0 = __p0; \ float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_v((int8x8_t)__rev0, __p1, 1); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \ uint16x8_t __ret; \ float16x8_t __s0 = __p0; \ __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_v((int8x16_t)__s0, __p1, 49); \ __ret; \ }) #else #define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \ uint16x8_t __ret; \ float16x8_t __s0 = __p0; \ float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_v((int8x16_t)__rev0, __p1, 49); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ float16x4_t __s0 = __p0; \ __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_v((int8x8_t)__s0, __p1, 17); \ __ret; \ }) #else #define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ float16x4_t __s0 = __p0; \ float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_v((int8x8_t)__rev0, __p1, 17); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vcvtq_s16_f16(float16x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vcvtq_s16_v((int8x16_t)__p0, 33); return __ret; } #else __ai int16x8_t vcvtq_s16_f16(float16x8_t __p0) { int16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vcvtq_s16_v((int8x16_t)__rev0, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vcvt_s16_f16(float16x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vcvt_s16_v((int8x8_t)__p0, 1); return __ret; } #else __ai int16x4_t vcvt_s16_f16(float16x4_t __p0) { int16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vcvt_s16_v((int8x8_t)__rev0, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vcvtq_u16_f16(float16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_v((int8x16_t)__p0, 49); return __ret; } #else __ai uint16x8_t vcvtq_u16_f16(float16x8_t __p0) { uint16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_v((int8x16_t)__rev0, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vcvt_u16_f16(float16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vcvt_u16_v((int8x8_t)__p0, 17); return __ret; } #else __ai uint16x4_t vcvt_u16_f16(float16x4_t __p0) { uint16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vcvt_u16_v((int8x8_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vcvtaq_s16_f16(float16x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_v((int8x16_t)__p0, 33); return __ret; } #else __ai int16x8_t vcvtaq_s16_f16(float16x8_t __p0) { int16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_v((int8x16_t)__rev0, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vcvta_s16_f16(float16x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vcvta_s16_v((int8x8_t)__p0, 1); return __ret; } #else __ai int16x4_t vcvta_s16_f16(float16x4_t __p0) { int16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vcvta_s16_v((int8x8_t)__rev0, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_v((int8x16_t)__p0, 49); return __ret; } #else __ai uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) { uint16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_v((int8x16_t)__rev0, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vcvta_u16_f16(float16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vcvta_u16_v((int8x8_t)__p0, 17); return __ret; } #else __ai uint16x4_t vcvta_u16_f16(float16x4_t __p0) { uint16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vcvta_u16_v((int8x8_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vcvtmq_s16_f16(float16x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_v((int8x16_t)__p0, 33); return __ret; } #else __ai int16x8_t vcvtmq_s16_f16(float16x8_t __p0) { int16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_v((int8x16_t)__rev0, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vcvtm_s16_f16(float16x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vcvtm_s16_v((int8x8_t)__p0, 1); return __ret; } #else __ai int16x4_t vcvtm_s16_f16(float16x4_t __p0) { int16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vcvtm_s16_v((int8x8_t)__rev0, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_v((int8x16_t)__p0, 49); return __ret; } #else __ai uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) { uint16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_v((int8x16_t)__rev0, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vcvtm_u16_f16(float16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_v((int8x8_t)__p0, 17); return __ret; } #else __ai uint16x4_t vcvtm_u16_f16(float16x4_t __p0) { uint16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_v((int8x8_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vcvtnq_s16_f16(float16x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_v((int8x16_t)__p0, 33); return __ret; } #else __ai int16x8_t vcvtnq_s16_f16(float16x8_t __p0) { int16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_v((int8x16_t)__rev0, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vcvtn_s16_f16(float16x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vcvtn_s16_v((int8x8_t)__p0, 1); return __ret; } #else __ai int16x4_t vcvtn_s16_f16(float16x4_t __p0) { int16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vcvtn_s16_v((int8x8_t)__rev0, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_v((int8x16_t)__p0, 49); return __ret; } #else __ai uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) { uint16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_v((int8x16_t)__rev0, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vcvtn_u16_f16(float16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_v((int8x8_t)__p0, 17); return __ret; } #else __ai uint16x4_t vcvtn_u16_f16(float16x4_t __p0) { uint16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_v((int8x8_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vcvtpq_s16_f16(float16x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_v((int8x16_t)__p0, 33); return __ret; } #else __ai int16x8_t vcvtpq_s16_f16(float16x8_t __p0) { int16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_v((int8x16_t)__rev0, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vcvtp_s16_f16(float16x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vcvtp_s16_v((int8x8_t)__p0, 1); return __ret; } #else __ai int16x4_t vcvtp_s16_f16(float16x4_t __p0) { int16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vcvtp_s16_v((int8x8_t)__rev0, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_v((int8x16_t)__p0, 49); return __ret; } #else __ai uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) { uint16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_v((int8x16_t)__rev0, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vcvtp_u16_f16(float16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_v((int8x8_t)__p0, 17); return __ret; } #else __ai uint16x4_t vcvtp_u16_f16(float16x4_t __p0) { uint16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_v((int8x8_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vextq_f16(__p0, __p1, __p2) __extension__ ({ \ float16x8_t __ret; \ float16x8_t __s0 = __p0; \ float16x8_t __s1 = __p1; \ __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 40); \ __ret; \ }) #else #define vextq_f16(__p0, __p1, __p2) __extension__ ({ \ float16x8_t __ret; \ float16x8_t __s0 = __p0; \ float16x8_t __s1 = __p1; \ float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 40); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vext_f16(__p0, __p1, __p2) __extension__ ({ \ float16x4_t __ret; \ float16x4_t __s0 = __p0; \ float16x4_t __s1 = __p1; \ __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 8); \ __ret; \ }) #else #define vext_f16(__p0, __p1, __p2) __extension__ ({ \ float16x4_t __ret; \ float16x4_t __s0 = __p0; \ float16x4_t __s1 = __p1; \ float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 8); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); return __ret; } #else __ai float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai float16x8_t __noswap_vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); return __ret; } #else __ai float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai float16x4_t __noswap_vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { float16x8_t __ret; __ret = vfmaq_f16(__p0, -__p1, __p2); return __ret; } #else __ai float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vfmaq_f16(__rev0, -__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { float16x4_t __ret; __ret = vfma_f16(__p0, -__p1, __p2); return __ret; } #else __ai float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __noswap_vfma_f16(__rev0, -__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else __ai float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else __ai float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else __ai float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else __ai float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; __ret = __p0 * __p1; return __ret; } #else __ai float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 * __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; __ret = __p0 * __p1; return __ret; } #else __ai float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 * __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vmulq_lane_f16(__p0_277, __p1_277, __p2_277) __extension__ ({ \ float16x8_t __ret_277; \ float16x8_t __s0_277 = __p0_277; \ float16x4_t __s1_277 = __p1_277; \ __ret_277 = __s0_277 * splatq_lane_f16(__s1_277, __p2_277); \ __ret_277; \ }) #else #define vmulq_lane_f16(__p0_278, __p1_278, __p2_278) __extension__ ({ \ float16x8_t __ret_278; \ float16x8_t __s0_278 = __p0_278; \ float16x4_t __s1_278 = __p1_278; \ float16x8_t __rev0_278; __rev0_278 = __builtin_shufflevector(__s0_278, __s0_278, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x4_t __rev1_278; __rev1_278 = __builtin_shufflevector(__s1_278, __s1_278, 3, 2, 1, 0); \ __ret_278 = __rev0_278 * __noswap_splatq_lane_f16(__rev1_278, __p2_278); \ __ret_278 = __builtin_shufflevector(__ret_278, __ret_278, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_278; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmul_lane_f16(__p0_279, __p1_279, __p2_279) __extension__ ({ \ float16x4_t __ret_279; \ float16x4_t __s0_279 = __p0_279; \ float16x4_t __s1_279 = __p1_279; \ __ret_279 = __s0_279 * splat_lane_f16(__s1_279, __p2_279); \ __ret_279; \ }) #else #define vmul_lane_f16(__p0_280, __p1_280, __p2_280) __extension__ ({ \ float16x4_t __ret_280; \ float16x4_t __s0_280 = __p0_280; \ float16x4_t __s1_280 = __p1_280; \ float16x4_t __rev0_280; __rev0_280 = __builtin_shufflevector(__s0_280, __s0_280, 3, 2, 1, 0); \ float16x4_t __rev1_280; __rev1_280 = __builtin_shufflevector(__s1_280, __s1_280, 3, 2, 1, 0); \ __ret_280 = __rev0_280 * __noswap_splat_lane_f16(__rev1_280, __p2_280); \ __ret_280 = __builtin_shufflevector(__ret_280, __ret_280, 3, 2, 1, 0); \ __ret_280; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmulq_n_f16(__p0, __p1) __extension__ ({ \ float16x8_t __ret; \ float16x8_t __s0 = __p0; \ float16_t __s1 = __p1; \ __ret = __s0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \ __ret; \ }) #else #define vmulq_n_f16(__p0, __p1) __extension__ ({ \ float16x8_t __ret; \ float16x8_t __s0 = __p0; \ float16_t __s1 = __p1; \ float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = __rev0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmul_n_f16(__p0, __p1) __extension__ ({ \ float16x4_t __ret; \ float16x4_t __s0 = __p0; \ float16_t __s1 = __p1; \ __ret = __s0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \ __ret; \ }) #else #define vmul_n_f16(__p0, __p1) __extension__ ({ \ float16x4_t __ret; \ float16x4_t __s0 = __p0; \ float16_t __s1 = __p1; \ float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = __rev0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vnegq_f16(float16x8_t __p0) { float16x8_t __ret; __ret = -__p0; return __ret; } #else __ai float16x8_t vnegq_f16(float16x8_t __p0) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = -__rev0; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vneg_f16(float16x4_t __p0) { float16x4_t __ret; __ret = -__p0; return __ret; } #else __ai float16x4_t vneg_f16(float16x4_t __p0) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = -__rev0; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else __ai float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else __ai float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else __ai float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vrecpeq_f16(float16x8_t __p0) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 40); return __ret; } #else __ai float16x8_t vrecpeq_f16(float16x8_t __p0) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vrecpe_f16(float16x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 8); return __ret; } #else __ai float16x4_t vrecpe_f16(float16x4_t __p0) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else __ai float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else __ai float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vrev64q_f16(float16x8_t __p0) { float16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); return __ret; } #else __ai float16x8_t vrev64q_f16(float16x8_t __p0) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vrev64_f16(float16x4_t __p0) { float16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); return __ret; } #else __ai float16x4_t vrev64_f16(float16x4_t __p0) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vrsqrteq_f16(float16x8_t __p0) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 40); return __ret; } #else __ai float16x8_t vrsqrteq_f16(float16x8_t __p0) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vrsqrte_f16(float16x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 8); return __ret; } #else __ai float16x4_t vrsqrte_f16(float16x4_t __p0) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else __ai float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else __ai float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; __ret = __p0 - __p1; return __ret; } #else __ai float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 - __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; __ret = __p0 - __p1; return __ret; } #else __ai float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 - __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8x2_t __ret; __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else __ai float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8x2_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) { float16x4x2_t __ret; __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else __ai float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) { float16x4x2_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8x2_t __ret; __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else __ai float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8x2_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) { float16x4x2_t __ret; __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else __ai float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) { float16x4x2_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8x2_t __ret; __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else __ai float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8x2_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) { float16x4x2_t __ret; __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else __ai float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) { float16x4x2_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); return __ret; } #endif #endif #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__) #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; __ret = __p0 / __p1; return __ret; } #else __ai float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 / __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; __ret = __p0 / __p1; return __ret; } #else __ai float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 / __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vduph_lane_f16(__p0, __p1) __extension__ ({ \ float16_t __ret; \ float16x4_t __s0 = __p0; \ __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__s0, __p1); \ __ret; \ }) #else #define vduph_lane_f16(__p0, __p1) __extension__ ({ \ float16_t __ret; \ float16x4_t __s0 = __p0; \ float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__rev0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vduph_laneq_f16(__p0, __p1) __extension__ ({ \ float16_t __ret; \ float16x8_t __s0 = __p0; \ __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__s0, __p1); \ __ret; \ }) #else #define vduph_laneq_f16(__p0, __p1) __extension__ ({ \ float16_t __ret; \ float16x8_t __s0 = __p0; \ float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__rev0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ float16_t __ret; \ float16_t __s0 = __p0; \ float16_t __s1 = __p1; \ float16x4_t __s2 = __p2; \ __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \ __ret; \ }) #else #define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ float16_t __ret; \ float16_t __s0 = __p0; \ float16_t __s1 = __p1; \ float16x4_t __s2 = __p2; \ float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__rev2, __p3); \ __ret; \ }) #define __noswap_vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ float16_t __ret; \ float16_t __s0 = __p0; \ float16_t __s1 = __p1; \ float16x4_t __s2 = __p2; \ __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ float16x8_t __ret; \ float16x8_t __s0 = __p0; \ float16x8_t __s1 = __p1; \ float16x4_t __s2 = __p2; \ __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \ __ret; \ }) #else #define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ float16x8_t __ret; \ float16x8_t __s0 = __p0; \ float16x8_t __s1 = __p1; \ float16x4_t __s2 = __p2; \ float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 40); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ float16x8_t __ret; \ float16x8_t __s0 = __p0; \ float16x8_t __s1 = __p1; \ float16x4_t __s2 = __p2; \ __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ float16x4_t __ret; \ float16x4_t __s0 = __p0; \ float16x4_t __s1 = __p1; \ float16x4_t __s2 = __p2; \ __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \ __ret; \ }) #else #define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ float16x4_t __ret; \ float16x4_t __s0 = __p0; \ float16x4_t __s1 = __p1; \ float16x4_t __s2 = __p2; \ float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 8); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ float16x4_t __ret; \ float16x4_t __s0 = __p0; \ float16x4_t __s1 = __p1; \ float16x4_t __s2 = __p2; \ __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ float16_t __ret; \ float16_t __s0 = __p0; \ float16_t __s1 = __p1; \ float16x8_t __s2 = __p2; \ __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \ __ret; \ }) #else #define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ float16_t __ret; \ float16_t __s0 = __p0; \ float16_t __s1 = __p1; \ float16x8_t __s2 = __p2; \ float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__rev2, __p3); \ __ret; \ }) #define __noswap_vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ float16_t __ret; \ float16_t __s0 = __p0; \ float16_t __s1 = __p1; \ float16x8_t __s2 = __p2; \ __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ float16x8_t __ret; \ float16x8_t __s0 = __p0; \ float16x8_t __s1 = __p1; \ float16x8_t __s2 = __p2; \ __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \ __ret; \ }) #else #define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ float16x8_t __ret; \ float16x8_t __s0 = __p0; \ float16x8_t __s1 = __p1; \ float16x8_t __s2 = __p2; \ float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 40); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ float16x8_t __ret; \ float16x8_t __s0 = __p0; \ float16x8_t __s1 = __p1; \ float16x8_t __s2 = __p2; \ __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ float16x4_t __ret; \ float16x4_t __s0 = __p0; \ float16x4_t __s1 = __p1; \ float16x8_t __s2 = __p2; \ __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \ __ret; \ }) #else #define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ float16x4_t __ret; \ float16x4_t __s0 = __p0; \ float16x4_t __s1 = __p1; \ float16x8_t __s2 = __p2; \ float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 8); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ float16x4_t __ret; \ float16x4_t __s0 = __p0; \ float16x4_t __s1 = __p1; \ float16x8_t __s2 = __p2; \ __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \ float16x8_t __ret; \ float16x8_t __s0 = __p0; \ float16x8_t __s1 = __p1; \ float16_t __s2 = __p2; \ __ret = vfmaq_f16(__s0, __s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ __ret; \ }) #else #define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \ float16x8_t __ret; \ float16x8_t __s0 = __p0; \ float16x8_t __s1 = __p1; \ float16_t __s2 = __p2; \ float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = __noswap_vfmaq_f16(__rev0, __rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \ float16x4_t __ret; \ float16x4_t __s0 = __p0; \ float16x4_t __s1 = __p1; \ float16_t __s2 = __p2; \ __ret = vfma_f16(__s0, __s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ __ret; \ }) #else #define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \ float16x4_t __ret; \ float16x4_t __s0 = __p0; \ float16x4_t __s1 = __p1; \ float16_t __s2 = __p2; \ float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = __noswap_vfma_f16(__rev0, __rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmsh_lane_f16(__p0_281, __p1_281, __p2_281, __p3_281) __extension__ ({ \ float16_t __ret_281; \ float16_t __s0_281 = __p0_281; \ float16_t __s1_281 = __p1_281; \ float16x4_t __s2_281 = __p2_281; \ __ret_281 = vfmah_lane_f16(__s0_281, -__s1_281, __s2_281, __p3_281); \ __ret_281; \ }) #else #define vfmsh_lane_f16(__p0_282, __p1_282, __p2_282, __p3_282) __extension__ ({ \ float16_t __ret_282; \ float16_t __s0_282 = __p0_282; \ float16_t __s1_282 = __p1_282; \ float16x4_t __s2_282 = __p2_282; \ float16x4_t __rev2_282; __rev2_282 = __builtin_shufflevector(__s2_282, __s2_282, 3, 2, 1, 0); \ __ret_282 = __noswap_vfmah_lane_f16(__s0_282, -__s1_282, __rev2_282, __p3_282); \ __ret_282; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmsq_lane_f16(__p0_283, __p1_283, __p2_283, __p3_283) __extension__ ({ \ float16x8_t __ret_283; \ float16x8_t __s0_283 = __p0_283; \ float16x8_t __s1_283 = __p1_283; \ float16x4_t __s2_283 = __p2_283; \ __ret_283 = vfmaq_lane_f16(__s0_283, -__s1_283, __s2_283, __p3_283); \ __ret_283; \ }) #else #define vfmsq_lane_f16(__p0_284, __p1_284, __p2_284, __p3_284) __extension__ ({ \ float16x8_t __ret_284; \ float16x8_t __s0_284 = __p0_284; \ float16x8_t __s1_284 = __p1_284; \ float16x4_t __s2_284 = __p2_284; \ float16x8_t __rev0_284; __rev0_284 = __builtin_shufflevector(__s0_284, __s0_284, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __rev1_284; __rev1_284 = __builtin_shufflevector(__s1_284, __s1_284, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x4_t __rev2_284; __rev2_284 = __builtin_shufflevector(__s2_284, __s2_284, 3, 2, 1, 0); \ __ret_284 = __noswap_vfmaq_lane_f16(__rev0_284, -__rev1_284, __rev2_284, __p3_284); \ __ret_284 = __builtin_shufflevector(__ret_284, __ret_284, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_284; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfms_lane_f16(__p0_285, __p1_285, __p2_285, __p3_285) __extension__ ({ \ float16x4_t __ret_285; \ float16x4_t __s0_285 = __p0_285; \ float16x4_t __s1_285 = __p1_285; \ float16x4_t __s2_285 = __p2_285; \ __ret_285 = vfma_lane_f16(__s0_285, -__s1_285, __s2_285, __p3_285); \ __ret_285; \ }) #else #define vfms_lane_f16(__p0_286, __p1_286, __p2_286, __p3_286) __extension__ ({ \ float16x4_t __ret_286; \ float16x4_t __s0_286 = __p0_286; \ float16x4_t __s1_286 = __p1_286; \ float16x4_t __s2_286 = __p2_286; \ float16x4_t __rev0_286; __rev0_286 = __builtin_shufflevector(__s0_286, __s0_286, 3, 2, 1, 0); \ float16x4_t __rev1_286; __rev1_286 = __builtin_shufflevector(__s1_286, __s1_286, 3, 2, 1, 0); \ float16x4_t __rev2_286; __rev2_286 = __builtin_shufflevector(__s2_286, __s2_286, 3, 2, 1, 0); \ __ret_286 = __noswap_vfma_lane_f16(__rev0_286, -__rev1_286, __rev2_286, __p3_286); \ __ret_286 = __builtin_shufflevector(__ret_286, __ret_286, 3, 2, 1, 0); \ __ret_286; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmsh_laneq_f16(__p0_287, __p1_287, __p2_287, __p3_287) __extension__ ({ \ float16_t __ret_287; \ float16_t __s0_287 = __p0_287; \ float16_t __s1_287 = __p1_287; \ float16x8_t __s2_287 = __p2_287; \ __ret_287 = vfmah_laneq_f16(__s0_287, -__s1_287, __s2_287, __p3_287); \ __ret_287; \ }) #else #define vfmsh_laneq_f16(__p0_288, __p1_288, __p2_288, __p3_288) __extension__ ({ \ float16_t __ret_288; \ float16_t __s0_288 = __p0_288; \ float16_t __s1_288 = __p1_288; \ float16x8_t __s2_288 = __p2_288; \ float16x8_t __rev2_288; __rev2_288 = __builtin_shufflevector(__s2_288, __s2_288, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_288 = __noswap_vfmah_laneq_f16(__s0_288, -__s1_288, __rev2_288, __p3_288); \ __ret_288; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmsq_laneq_f16(__p0_289, __p1_289, __p2_289, __p3_289) __extension__ ({ \ float16x8_t __ret_289; \ float16x8_t __s0_289 = __p0_289; \ float16x8_t __s1_289 = __p1_289; \ float16x8_t __s2_289 = __p2_289; \ __ret_289 = vfmaq_laneq_f16(__s0_289, -__s1_289, __s2_289, __p3_289); \ __ret_289; \ }) #else #define vfmsq_laneq_f16(__p0_290, __p1_290, __p2_290, __p3_290) __extension__ ({ \ float16x8_t __ret_290; \ float16x8_t __s0_290 = __p0_290; \ float16x8_t __s1_290 = __p1_290; \ float16x8_t __s2_290 = __p2_290; \ float16x8_t __rev0_290; __rev0_290 = __builtin_shufflevector(__s0_290, __s0_290, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __rev1_290; __rev1_290 = __builtin_shufflevector(__s1_290, __s1_290, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __rev2_290; __rev2_290 = __builtin_shufflevector(__s2_290, __s2_290, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_290 = __noswap_vfmaq_laneq_f16(__rev0_290, -__rev1_290, __rev2_290, __p3_290); \ __ret_290 = __builtin_shufflevector(__ret_290, __ret_290, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_290; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfms_laneq_f16(__p0_291, __p1_291, __p2_291, __p3_291) __extension__ ({ \ float16x4_t __ret_291; \ float16x4_t __s0_291 = __p0_291; \ float16x4_t __s1_291 = __p1_291; \ float16x8_t __s2_291 = __p2_291; \ __ret_291 = vfma_laneq_f16(__s0_291, -__s1_291, __s2_291, __p3_291); \ __ret_291; \ }) #else #define vfms_laneq_f16(__p0_292, __p1_292, __p2_292, __p3_292) __extension__ ({ \ float16x4_t __ret_292; \ float16x4_t __s0_292 = __p0_292; \ float16x4_t __s1_292 = __p1_292; \ float16x8_t __s2_292 = __p2_292; \ float16x4_t __rev0_292; __rev0_292 = __builtin_shufflevector(__s0_292, __s0_292, 3, 2, 1, 0); \ float16x4_t __rev1_292; __rev1_292 = __builtin_shufflevector(__s1_292, __s1_292, 3, 2, 1, 0); \ float16x8_t __rev2_292; __rev2_292 = __builtin_shufflevector(__s2_292, __s2_292, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_292 = __noswap_vfma_laneq_f16(__rev0_292, -__rev1_292, __rev2_292, __p3_292); \ __ret_292 = __builtin_shufflevector(__ret_292, __ret_292, 3, 2, 1, 0); \ __ret_292; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \ float16x8_t __ret; \ float16x8_t __s0 = __p0; \ float16x8_t __s1 = __p1; \ float16_t __s2 = __p2; \ __ret = vfmaq_f16(__s0, -__s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ __ret; \ }) #else #define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \ float16x8_t __ret; \ float16x8_t __s0 = __p0; \ float16x8_t __s1 = __p1; \ float16_t __s2 = __p2; \ float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = __noswap_vfmaq_f16(__rev0, -__rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \ float16x4_t __ret; \ float16x4_t __s0 = __p0; \ float16x4_t __s1 = __p1; \ float16_t __s2 = __p2; \ __ret = vfma_f16(__s0, -__s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ __ret; \ }) #else #define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \ float16x4_t __ret; \ float16x4_t __s0 = __p0; \ float16x4_t __s1 = __p1; \ float16_t __s2 = __p2; \ float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = __noswap_vfma_f16(__rev0, -__rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmaxnmvq_f16(__p0) __extension__ ({ \ float16_t __ret; \ float16x8_t __s0 = __p0; \ __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__s0); \ __ret; \ }) #else #define vmaxnmvq_f16(__p0) __extension__ ({ \ float16_t __ret; \ float16x8_t __s0 = __p0; \ float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__rev0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmaxnmv_f16(__p0) __extension__ ({ \ float16_t __ret; \ float16x4_t __s0 = __p0; \ __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__s0); \ __ret; \ }) #else #define vmaxnmv_f16(__p0) __extension__ ({ \ float16_t __ret; \ float16x4_t __s0 = __p0; \ float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__rev0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmaxvq_f16(__p0) __extension__ ({ \ float16_t __ret; \ float16x8_t __s0 = __p0; \ __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__s0); \ __ret; \ }) #else #define vmaxvq_f16(__p0) __extension__ ({ \ float16_t __ret; \ float16x8_t __s0 = __p0; \ float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__rev0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmaxv_f16(__p0) __extension__ ({ \ float16_t __ret; \ float16x4_t __s0 = __p0; \ __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__s0); \ __ret; \ }) #else #define vmaxv_f16(__p0) __extension__ ({ \ float16_t __ret; \ float16x4_t __s0 = __p0; \ float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__rev0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vminnmvq_f16(__p0) __extension__ ({ \ float16_t __ret; \ float16x8_t __s0 = __p0; \ __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__s0); \ __ret; \ }) #else #define vminnmvq_f16(__p0) __extension__ ({ \ float16_t __ret; \ float16x8_t __s0 = __p0; \ float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__rev0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vminnmv_f16(__p0) __extension__ ({ \ float16_t __ret; \ float16x4_t __s0 = __p0; \ __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__s0); \ __ret; \ }) #else #define vminnmv_f16(__p0) __extension__ ({ \ float16_t __ret; \ float16x4_t __s0 = __p0; \ float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__rev0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vminvq_f16(__p0) __extension__ ({ \ float16_t __ret; \ float16x8_t __s0 = __p0; \ __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__s0); \ __ret; \ }) #else #define vminvq_f16(__p0) __extension__ ({ \ float16_t __ret; \ float16x8_t __s0 = __p0; \ float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__rev0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vminv_f16(__p0) __extension__ ({ \ float16_t __ret; \ float16x4_t __s0 = __p0; \ __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__s0); \ __ret; \ }) #else #define vminv_f16(__p0) __extension__ ({ \ float16_t __ret; \ float16x4_t __s0 = __p0; \ float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__rev0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmulq_laneq_f16(__p0_293, __p1_293, __p2_293) __extension__ ({ \ float16x8_t __ret_293; \ float16x8_t __s0_293 = __p0_293; \ float16x8_t __s1_293 = __p1_293; \ __ret_293 = __s0_293 * splatq_laneq_f16(__s1_293, __p2_293); \ __ret_293; \ }) #else #define vmulq_laneq_f16(__p0_294, __p1_294, __p2_294) __extension__ ({ \ float16x8_t __ret_294; \ float16x8_t __s0_294 = __p0_294; \ float16x8_t __s1_294 = __p1_294; \ float16x8_t __rev0_294; __rev0_294 = __builtin_shufflevector(__s0_294, __s0_294, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __rev1_294; __rev1_294 = __builtin_shufflevector(__s1_294, __s1_294, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_294 = __rev0_294 * __noswap_splatq_laneq_f16(__rev1_294, __p2_294); \ __ret_294 = __builtin_shufflevector(__ret_294, __ret_294, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_294; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmul_laneq_f16(__p0_295, __p1_295, __p2_295) __extension__ ({ \ float16x4_t __ret_295; \ float16x4_t __s0_295 = __p0_295; \ float16x8_t __s1_295 = __p1_295; \ __ret_295 = __s0_295 * splat_laneq_f16(__s1_295, __p2_295); \ __ret_295; \ }) #else #define vmul_laneq_f16(__p0_296, __p1_296, __p2_296) __extension__ ({ \ float16x4_t __ret_296; \ float16x4_t __s0_296 = __p0_296; \ float16x8_t __s1_296 = __p1_296; \ float16x4_t __rev0_296; __rev0_296 = __builtin_shufflevector(__s0_296, __s0_296, 3, 2, 1, 0); \ float16x8_t __rev1_296; __rev1_296 = __builtin_shufflevector(__s1_296, __s1_296, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_296 = __rev0_296 * __noswap_splat_laneq_f16(__rev1_296, __p2_296); \ __ret_296 = __builtin_shufflevector(__ret_296, __ret_296, 3, 2, 1, 0); \ __ret_296; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else __ai float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai float16x8_t __noswap_vmulxq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else __ai float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai float16x4_t __noswap_vmulx_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \ float16_t __ret; \ float16_t __s0 = __p0; \ float16x4_t __s1 = __p1; \ __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__s1, __p2); \ __ret; \ }) #else #define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \ float16_t __ret; \ float16_t __s0 = __p0; \ float16x4_t __s1 = __p1; \ float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__rev1, __p2); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmulxq_lane_f16(__p0_297, __p1_297, __p2_297) __extension__ ({ \ float16x8_t __ret_297; \ float16x8_t __s0_297 = __p0_297; \ float16x4_t __s1_297 = __p1_297; \ __ret_297 = vmulxq_f16(__s0_297, splatq_lane_f16(__s1_297, __p2_297)); \ __ret_297; \ }) #else #define vmulxq_lane_f16(__p0_298, __p1_298, __p2_298) __extension__ ({ \ float16x8_t __ret_298; \ float16x8_t __s0_298 = __p0_298; \ float16x4_t __s1_298 = __p1_298; \ float16x8_t __rev0_298; __rev0_298 = __builtin_shufflevector(__s0_298, __s0_298, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x4_t __rev1_298; __rev1_298 = __builtin_shufflevector(__s1_298, __s1_298, 3, 2, 1, 0); \ __ret_298 = __noswap_vmulxq_f16(__rev0_298, __noswap_splatq_lane_f16(__rev1_298, __p2_298)); \ __ret_298 = __builtin_shufflevector(__ret_298, __ret_298, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_298; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmulx_lane_f16(__p0_299, __p1_299, __p2_299) __extension__ ({ \ float16x4_t __ret_299; \ float16x4_t __s0_299 = __p0_299; \ float16x4_t __s1_299 = __p1_299; \ __ret_299 = vmulx_f16(__s0_299, splat_lane_f16(__s1_299, __p2_299)); \ __ret_299; \ }) #else #define vmulx_lane_f16(__p0_300, __p1_300, __p2_300) __extension__ ({ \ float16x4_t __ret_300; \ float16x4_t __s0_300 = __p0_300; \ float16x4_t __s1_300 = __p1_300; \ float16x4_t __rev0_300; __rev0_300 = __builtin_shufflevector(__s0_300, __s0_300, 3, 2, 1, 0); \ float16x4_t __rev1_300; __rev1_300 = __builtin_shufflevector(__s1_300, __s1_300, 3, 2, 1, 0); \ __ret_300 = __noswap_vmulx_f16(__rev0_300, __noswap_splat_lane_f16(__rev1_300, __p2_300)); \ __ret_300 = __builtin_shufflevector(__ret_300, __ret_300, 3, 2, 1, 0); \ __ret_300; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \ float16_t __ret; \ float16_t __s0 = __p0; \ float16x8_t __s1 = __p1; \ __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__s1, __p2); \ __ret; \ }) #else #define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \ float16_t __ret; \ float16_t __s0 = __p0; \ float16x8_t __s1 = __p1; \ float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__rev1, __p2); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmulxq_laneq_f16(__p0_301, __p1_301, __p2_301) __extension__ ({ \ float16x8_t __ret_301; \ float16x8_t __s0_301 = __p0_301; \ float16x8_t __s1_301 = __p1_301; \ __ret_301 = vmulxq_f16(__s0_301, splatq_laneq_f16(__s1_301, __p2_301)); \ __ret_301; \ }) #else #define vmulxq_laneq_f16(__p0_302, __p1_302, __p2_302) __extension__ ({ \ float16x8_t __ret_302; \ float16x8_t __s0_302 = __p0_302; \ float16x8_t __s1_302 = __p1_302; \ float16x8_t __rev0_302; __rev0_302 = __builtin_shufflevector(__s0_302, __s0_302, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __rev1_302; __rev1_302 = __builtin_shufflevector(__s1_302, __s1_302, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_302 = __noswap_vmulxq_f16(__rev0_302, __noswap_splatq_laneq_f16(__rev1_302, __p2_302)); \ __ret_302 = __builtin_shufflevector(__ret_302, __ret_302, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_302; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmulx_laneq_f16(__p0_303, __p1_303, __p2_303) __extension__ ({ \ float16x4_t __ret_303; \ float16x4_t __s0_303 = __p0_303; \ float16x8_t __s1_303 = __p1_303; \ __ret_303 = vmulx_f16(__s0_303, splat_laneq_f16(__s1_303, __p2_303)); \ __ret_303; \ }) #else #define vmulx_laneq_f16(__p0_304, __p1_304, __p2_304) __extension__ ({ \ float16x4_t __ret_304; \ float16x4_t __s0_304 = __p0_304; \ float16x8_t __s1_304 = __p1_304; \ float16x4_t __rev0_304; __rev0_304 = __builtin_shufflevector(__s0_304, __s0_304, 3, 2, 1, 0); \ float16x8_t __rev1_304; __rev1_304 = __builtin_shufflevector(__s1_304, __s1_304, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_304 = __noswap_vmulx_f16(__rev0_304, __noswap_splat_laneq_f16(__rev1_304, __p2_304)); \ __ret_304 = __builtin_shufflevector(__ret_304, __ret_304, 3, 2, 1, 0); \ __ret_304; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmulxq_n_f16(__p0, __p1) __extension__ ({ \ float16x8_t __ret; \ float16x8_t __s0 = __p0; \ float16_t __s1 = __p1; \ __ret = vmulxq_f16(__s0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \ __ret; \ }) #else #define vmulxq_n_f16(__p0, __p1) __extension__ ({ \ float16x8_t __ret; \ float16x8_t __s0 = __p0; \ float16_t __s1 = __p1; \ float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = __noswap_vmulxq_f16(__rev0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmulx_n_f16(__p0, __p1) __extension__ ({ \ float16x4_t __ret; \ float16x4_t __s0 = __p0; \ float16_t __s1 = __p1; \ __ret = vmulx_f16(__s0, (float16x4_t) {__s1, __s1, __s1, __s1}); \ __ret; \ }) #else #define vmulx_n_f16(__p0, __p1) __extension__ ({ \ float16x4_t __ret; \ float16x4_t __s0 = __p0; \ float16_t __s1 = __p1; \ float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = __noswap_vmulx_f16(__rev0, (float16x4_t) {__s1, __s1, __s1, __s1}); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else __ai float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else __ai float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else __ai float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else __ai float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else __ai float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else __ai float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else __ai float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vrndiq_f16(float16x8_t __p0) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 40); return __ret; } #else __ai float16x8_t vrndiq_f16(float16x8_t __p0) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vrndi_f16(float16x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 8); return __ret; } #else __ai float16x4_t vrndi_f16(float16x4_t __p0) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vsqrtq_f16(float16x8_t __p0) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 40); return __ret; } #else __ai float16x8_t vsqrtq_f16(float16x8_t __p0) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vsqrt_f16(float16x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 8); return __ret; } #else __ai float16x4_t vsqrt_f16(float16x4_t __p0) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); return __ret; } #else __ai float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); return __ret; } #else __ai float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); return __ret; } #else __ai float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); return __ret; } #else __ai float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); return __ret; } #else __ai float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); return __ret; } #else __ai float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); return __ret; } #else __ai float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); return __ret; } #else __ai float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); return __ret; } #else __ai float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); return __ret; } #else __ai float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); return __ret; } #else __ai float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); return __ret; } #else __ai float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #endif #if defined(__ARM_FEATURE_MATMUL_INT8) #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #else __ai uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); return __ret; } #else __ai int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); return __ret; } #else __ai int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int32x4_t __noswap_vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); return __ret; } #else __ai int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai int32x2_t __noswap_vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vusdotq_lane_s32(__p0_305, __p1_305, __p2_305, __p3_305) __extension__ ({ \ int32x4_t __ret_305; \ int32x4_t __s0_305 = __p0_305; \ uint8x16_t __s1_305 = __p1_305; \ int8x8_t __s2_305 = __p2_305; \ int8x8_t __reint_305 = __s2_305; \ __ret_305 = vusdotq_s32(__s0_305, __s1_305, (int8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_305, __p3_305))); \ __ret_305; \ }) #else #define vusdotq_lane_s32(__p0_306, __p1_306, __p2_306, __p3_306) __extension__ ({ \ int32x4_t __ret_306; \ int32x4_t __s0_306 = __p0_306; \ uint8x16_t __s1_306 = __p1_306; \ int8x8_t __s2_306 = __p2_306; \ int32x4_t __rev0_306; __rev0_306 = __builtin_shufflevector(__s0_306, __s0_306, 3, 2, 1, 0); \ uint8x16_t __rev1_306; __rev1_306 = __builtin_shufflevector(__s1_306, __s1_306, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x8_t __rev2_306; __rev2_306 = __builtin_shufflevector(__s2_306, __s2_306, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x8_t __reint_306 = __rev2_306; \ __ret_306 = __noswap_vusdotq_s32(__rev0_306, __rev1_306, (int8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_306, __p3_306))); \ __ret_306 = __builtin_shufflevector(__ret_306, __ret_306, 3, 2, 1, 0); \ __ret_306; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vusdot_lane_s32(__p0_307, __p1_307, __p2_307, __p3_307) __extension__ ({ \ int32x2_t __ret_307; \ int32x2_t __s0_307 = __p0_307; \ uint8x8_t __s1_307 = __p1_307; \ int8x8_t __s2_307 = __p2_307; \ int8x8_t __reint_307 = __s2_307; \ __ret_307 = vusdot_s32(__s0_307, __s1_307, (int8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_307, __p3_307))); \ __ret_307; \ }) #else #define vusdot_lane_s32(__p0_308, __p1_308, __p2_308, __p3_308) __extension__ ({ \ int32x2_t __ret_308; \ int32x2_t __s0_308 = __p0_308; \ uint8x8_t __s1_308 = __p1_308; \ int8x8_t __s2_308 = __p2_308; \ int32x2_t __rev0_308; __rev0_308 = __builtin_shufflevector(__s0_308, __s0_308, 1, 0); \ uint8x8_t __rev1_308; __rev1_308 = __builtin_shufflevector(__s1_308, __s1_308, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x8_t __rev2_308; __rev2_308 = __builtin_shufflevector(__s2_308, __s2_308, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x8_t __reint_308 = __rev2_308; \ __ret_308 = __noswap_vusdot_s32(__rev0_308, __rev1_308, (int8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_308, __p3_308))); \ __ret_308 = __builtin_shufflevector(__ret_308, __ret_308, 1, 0); \ __ret_308; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vusmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); return __ret; } #else __ai int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vusmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #endif #if defined(__ARM_FEATURE_QRDMX) #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); return __ret; } #else __ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int32x4_t __noswap_vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); return __ret; } #else __ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai int16x8_t __noswap_vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vqrdmlah_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); return __ret; } #else __ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = (int32x2_t) __builtin_neon_vqrdmlah_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai int32x2_t __noswap_vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vqrdmlah_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vqrdmlah_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); return __ret; } #else __ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vqrdmlah_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int16x4_t __noswap_vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vqrdmlah_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmlahq_lane_s32(__p0_309, __p1_309, __p2_309, __p3_309) __extension__ ({ \ int32x4_t __ret_309; \ int32x4_t __s0_309 = __p0_309; \ int32x4_t __s1_309 = __p1_309; \ int32x2_t __s2_309 = __p2_309; \ __ret_309 = vqrdmlahq_s32(__s0_309, __s1_309, splatq_lane_s32(__s2_309, __p3_309)); \ __ret_309; \ }) #else #define vqrdmlahq_lane_s32(__p0_310, __p1_310, __p2_310, __p3_310) __extension__ ({ \ int32x4_t __ret_310; \ int32x4_t __s0_310 = __p0_310; \ int32x4_t __s1_310 = __p1_310; \ int32x2_t __s2_310 = __p2_310; \ int32x4_t __rev0_310; __rev0_310 = __builtin_shufflevector(__s0_310, __s0_310, 3, 2, 1, 0); \ int32x4_t __rev1_310; __rev1_310 = __builtin_shufflevector(__s1_310, __s1_310, 3, 2, 1, 0); \ int32x2_t __rev2_310; __rev2_310 = __builtin_shufflevector(__s2_310, __s2_310, 1, 0); \ __ret_310 = __noswap_vqrdmlahq_s32(__rev0_310, __rev1_310, __noswap_splatq_lane_s32(__rev2_310, __p3_310)); \ __ret_310 = __builtin_shufflevector(__ret_310, __ret_310, 3, 2, 1, 0); \ __ret_310; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmlahq_lane_s16(__p0_311, __p1_311, __p2_311, __p3_311) __extension__ ({ \ int16x8_t __ret_311; \ int16x8_t __s0_311 = __p0_311; \ int16x8_t __s1_311 = __p1_311; \ int16x4_t __s2_311 = __p2_311; \ __ret_311 = vqrdmlahq_s16(__s0_311, __s1_311, splatq_lane_s16(__s2_311, __p3_311)); \ __ret_311; \ }) #else #define vqrdmlahq_lane_s16(__p0_312, __p1_312, __p2_312, __p3_312) __extension__ ({ \ int16x8_t __ret_312; \ int16x8_t __s0_312 = __p0_312; \ int16x8_t __s1_312 = __p1_312; \ int16x4_t __s2_312 = __p2_312; \ int16x8_t __rev0_312; __rev0_312 = __builtin_shufflevector(__s0_312, __s0_312, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev1_312; __rev1_312 = __builtin_shufflevector(__s1_312, __s1_312, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x4_t __rev2_312; __rev2_312 = __builtin_shufflevector(__s2_312, __s2_312, 3, 2, 1, 0); \ __ret_312 = __noswap_vqrdmlahq_s16(__rev0_312, __rev1_312, __noswap_splatq_lane_s16(__rev2_312, __p3_312)); \ __ret_312 = __builtin_shufflevector(__ret_312, __ret_312, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_312; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmlah_lane_s32(__p0_313, __p1_313, __p2_313, __p3_313) __extension__ ({ \ int32x2_t __ret_313; \ int32x2_t __s0_313 = __p0_313; \ int32x2_t __s1_313 = __p1_313; \ int32x2_t __s2_313 = __p2_313; \ __ret_313 = vqrdmlah_s32(__s0_313, __s1_313, splat_lane_s32(__s2_313, __p3_313)); \ __ret_313; \ }) #else #define vqrdmlah_lane_s32(__p0_314, __p1_314, __p2_314, __p3_314) __extension__ ({ \ int32x2_t __ret_314; \ int32x2_t __s0_314 = __p0_314; \ int32x2_t __s1_314 = __p1_314; \ int32x2_t __s2_314 = __p2_314; \ int32x2_t __rev0_314; __rev0_314 = __builtin_shufflevector(__s0_314, __s0_314, 1, 0); \ int32x2_t __rev1_314; __rev1_314 = __builtin_shufflevector(__s1_314, __s1_314, 1, 0); \ int32x2_t __rev2_314; __rev2_314 = __builtin_shufflevector(__s2_314, __s2_314, 1, 0); \ __ret_314 = __noswap_vqrdmlah_s32(__rev0_314, __rev1_314, __noswap_splat_lane_s32(__rev2_314, __p3_314)); \ __ret_314 = __builtin_shufflevector(__ret_314, __ret_314, 1, 0); \ __ret_314; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmlah_lane_s16(__p0_315, __p1_315, __p2_315, __p3_315) __extension__ ({ \ int16x4_t __ret_315; \ int16x4_t __s0_315 = __p0_315; \ int16x4_t __s1_315 = __p1_315; \ int16x4_t __s2_315 = __p2_315; \ __ret_315 = vqrdmlah_s16(__s0_315, __s1_315, splat_lane_s16(__s2_315, __p3_315)); \ __ret_315; \ }) #else #define vqrdmlah_lane_s16(__p0_316, __p1_316, __p2_316, __p3_316) __extension__ ({ \ int16x4_t __ret_316; \ int16x4_t __s0_316 = __p0_316; \ int16x4_t __s1_316 = __p1_316; \ int16x4_t __s2_316 = __p2_316; \ int16x4_t __rev0_316; __rev0_316 = __builtin_shufflevector(__s0_316, __s0_316, 3, 2, 1, 0); \ int16x4_t __rev1_316; __rev1_316 = __builtin_shufflevector(__s1_316, __s1_316, 3, 2, 1, 0); \ int16x4_t __rev2_316; __rev2_316 = __builtin_shufflevector(__s2_316, __s2_316, 3, 2, 1, 0); \ __ret_316 = __noswap_vqrdmlah_s16(__rev0_316, __rev1_316, __noswap_splat_lane_s16(__rev2_316, __p3_316)); \ __ret_316 = __builtin_shufflevector(__ret_316, __ret_316, 3, 2, 1, 0); \ __ret_316; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); return __ret; } #else __ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int32x4_t __noswap_vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); return __ret; } #else __ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai int16x8_t __noswap_vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); return __ret; } #else __ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = (int32x2_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai int32x2_t __noswap_vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); return __ret; } #else __ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int16x4_t __noswap_vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmlshq_lane_s32(__p0_317, __p1_317, __p2_317, __p3_317) __extension__ ({ \ int32x4_t __ret_317; \ int32x4_t __s0_317 = __p0_317; \ int32x4_t __s1_317 = __p1_317; \ int32x2_t __s2_317 = __p2_317; \ __ret_317 = vqrdmlshq_s32(__s0_317, __s1_317, splatq_lane_s32(__s2_317, __p3_317)); \ __ret_317; \ }) #else #define vqrdmlshq_lane_s32(__p0_318, __p1_318, __p2_318, __p3_318) __extension__ ({ \ int32x4_t __ret_318; \ int32x4_t __s0_318 = __p0_318; \ int32x4_t __s1_318 = __p1_318; \ int32x2_t __s2_318 = __p2_318; \ int32x4_t __rev0_318; __rev0_318 = __builtin_shufflevector(__s0_318, __s0_318, 3, 2, 1, 0); \ int32x4_t __rev1_318; __rev1_318 = __builtin_shufflevector(__s1_318, __s1_318, 3, 2, 1, 0); \ int32x2_t __rev2_318; __rev2_318 = __builtin_shufflevector(__s2_318, __s2_318, 1, 0); \ __ret_318 = __noswap_vqrdmlshq_s32(__rev0_318, __rev1_318, __noswap_splatq_lane_s32(__rev2_318, __p3_318)); \ __ret_318 = __builtin_shufflevector(__ret_318, __ret_318, 3, 2, 1, 0); \ __ret_318; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmlshq_lane_s16(__p0_319, __p1_319, __p2_319, __p3_319) __extension__ ({ \ int16x8_t __ret_319; \ int16x8_t __s0_319 = __p0_319; \ int16x8_t __s1_319 = __p1_319; \ int16x4_t __s2_319 = __p2_319; \ __ret_319 = vqrdmlshq_s16(__s0_319, __s1_319, splatq_lane_s16(__s2_319, __p3_319)); \ __ret_319; \ }) #else #define vqrdmlshq_lane_s16(__p0_320, __p1_320, __p2_320, __p3_320) __extension__ ({ \ int16x8_t __ret_320; \ int16x8_t __s0_320 = __p0_320; \ int16x8_t __s1_320 = __p1_320; \ int16x4_t __s2_320 = __p2_320; \ int16x8_t __rev0_320; __rev0_320 = __builtin_shufflevector(__s0_320, __s0_320, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev1_320; __rev1_320 = __builtin_shufflevector(__s1_320, __s1_320, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x4_t __rev2_320; __rev2_320 = __builtin_shufflevector(__s2_320, __s2_320, 3, 2, 1, 0); \ __ret_320 = __noswap_vqrdmlshq_s16(__rev0_320, __rev1_320, __noswap_splatq_lane_s16(__rev2_320, __p3_320)); \ __ret_320 = __builtin_shufflevector(__ret_320, __ret_320, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_320; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmlsh_lane_s32(__p0_321, __p1_321, __p2_321, __p3_321) __extension__ ({ \ int32x2_t __ret_321; \ int32x2_t __s0_321 = __p0_321; \ int32x2_t __s1_321 = __p1_321; \ int32x2_t __s2_321 = __p2_321; \ __ret_321 = vqrdmlsh_s32(__s0_321, __s1_321, splat_lane_s32(__s2_321, __p3_321)); \ __ret_321; \ }) #else #define vqrdmlsh_lane_s32(__p0_322, __p1_322, __p2_322, __p3_322) __extension__ ({ \ int32x2_t __ret_322; \ int32x2_t __s0_322 = __p0_322; \ int32x2_t __s1_322 = __p1_322; \ int32x2_t __s2_322 = __p2_322; \ int32x2_t __rev0_322; __rev0_322 = __builtin_shufflevector(__s0_322, __s0_322, 1, 0); \ int32x2_t __rev1_322; __rev1_322 = __builtin_shufflevector(__s1_322, __s1_322, 1, 0); \ int32x2_t __rev2_322; __rev2_322 = __builtin_shufflevector(__s2_322, __s2_322, 1, 0); \ __ret_322 = __noswap_vqrdmlsh_s32(__rev0_322, __rev1_322, __noswap_splat_lane_s32(__rev2_322, __p3_322)); \ __ret_322 = __builtin_shufflevector(__ret_322, __ret_322, 1, 0); \ __ret_322; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmlsh_lane_s16(__p0_323, __p1_323, __p2_323, __p3_323) __extension__ ({ \ int16x4_t __ret_323; \ int16x4_t __s0_323 = __p0_323; \ int16x4_t __s1_323 = __p1_323; \ int16x4_t __s2_323 = __p2_323; \ __ret_323 = vqrdmlsh_s16(__s0_323, __s1_323, splat_lane_s16(__s2_323, __p3_323)); \ __ret_323; \ }) #else #define vqrdmlsh_lane_s16(__p0_324, __p1_324, __p2_324, __p3_324) __extension__ ({ \ int16x4_t __ret_324; \ int16x4_t __s0_324 = __p0_324; \ int16x4_t __s1_324 = __p1_324; \ int16x4_t __s2_324 = __p2_324; \ int16x4_t __rev0_324; __rev0_324 = __builtin_shufflevector(__s0_324, __s0_324, 3, 2, 1, 0); \ int16x4_t __rev1_324; __rev1_324 = __builtin_shufflevector(__s1_324, __s1_324, 3, 2, 1, 0); \ int16x4_t __rev2_324; __rev2_324 = __builtin_shufflevector(__s2_324, __s2_324, 3, 2, 1, 0); \ __ret_324 = __noswap_vqrdmlsh_s16(__rev0_324, __rev1_324, __noswap_splat_lane_s16(__rev2_324, __p3_324)); \ __ret_324 = __builtin_shufflevector(__ret_324, __ret_324, 3, 2, 1, 0); \ __ret_324; \ }) #endif #endif #if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__) __ai int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) { int32_t __ret; __ret = (int32_t) __builtin_neon_vqrdmlahs_s32(__p0, __p1, __p2); return __ret; } __ai int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) { int16_t __ret; __ret = (int16_t) __builtin_neon_vqrdmlahh_s16(__p0, __p1, __p2); return __ret; } #ifdef __LITTLE_ENDIAN__ #define vqrdmlahs_lane_s32(__p0_325, __p1_325, __p2_325, __p3_325) __extension__ ({ \ int32_t __ret_325; \ int32_t __s0_325 = __p0_325; \ int32_t __s1_325 = __p1_325; \ int32x2_t __s2_325 = __p2_325; \ __ret_325 = vqrdmlahs_s32(__s0_325, __s1_325, vget_lane_s32(__s2_325, __p3_325)); \ __ret_325; \ }) #else #define vqrdmlahs_lane_s32(__p0_326, __p1_326, __p2_326, __p3_326) __extension__ ({ \ int32_t __ret_326; \ int32_t __s0_326 = __p0_326; \ int32_t __s1_326 = __p1_326; \ int32x2_t __s2_326 = __p2_326; \ int32x2_t __rev2_326; __rev2_326 = __builtin_shufflevector(__s2_326, __s2_326, 1, 0); \ __ret_326 = vqrdmlahs_s32(__s0_326, __s1_326, __noswap_vget_lane_s32(__rev2_326, __p3_326)); \ __ret_326; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmlahh_lane_s16(__p0_327, __p1_327, __p2_327, __p3_327) __extension__ ({ \ int16_t __ret_327; \ int16_t __s0_327 = __p0_327; \ int16_t __s1_327 = __p1_327; \ int16x4_t __s2_327 = __p2_327; \ __ret_327 = vqrdmlahh_s16(__s0_327, __s1_327, vget_lane_s16(__s2_327, __p3_327)); \ __ret_327; \ }) #else #define vqrdmlahh_lane_s16(__p0_328, __p1_328, __p2_328, __p3_328) __extension__ ({ \ int16_t __ret_328; \ int16_t __s0_328 = __p0_328; \ int16_t __s1_328 = __p1_328; \ int16x4_t __s2_328 = __p2_328; \ int16x4_t __rev2_328; __rev2_328 = __builtin_shufflevector(__s2_328, __s2_328, 3, 2, 1, 0); \ __ret_328 = vqrdmlahh_s16(__s0_328, __s1_328, __noswap_vget_lane_s16(__rev2_328, __p3_328)); \ __ret_328; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmlahs_laneq_s32(__p0_329, __p1_329, __p2_329, __p3_329) __extension__ ({ \ int32_t __ret_329; \ int32_t __s0_329 = __p0_329; \ int32_t __s1_329 = __p1_329; \ int32x4_t __s2_329 = __p2_329; \ __ret_329 = vqrdmlahs_s32(__s0_329, __s1_329, vgetq_lane_s32(__s2_329, __p3_329)); \ __ret_329; \ }) #else #define vqrdmlahs_laneq_s32(__p0_330, __p1_330, __p2_330, __p3_330) __extension__ ({ \ int32_t __ret_330; \ int32_t __s0_330 = __p0_330; \ int32_t __s1_330 = __p1_330; \ int32x4_t __s2_330 = __p2_330; \ int32x4_t __rev2_330; __rev2_330 = __builtin_shufflevector(__s2_330, __s2_330, 3, 2, 1, 0); \ __ret_330 = vqrdmlahs_s32(__s0_330, __s1_330, __noswap_vgetq_lane_s32(__rev2_330, __p3_330)); \ __ret_330; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmlahh_laneq_s16(__p0_331, __p1_331, __p2_331, __p3_331) __extension__ ({ \ int16_t __ret_331; \ int16_t __s0_331 = __p0_331; \ int16_t __s1_331 = __p1_331; \ int16x8_t __s2_331 = __p2_331; \ __ret_331 = vqrdmlahh_s16(__s0_331, __s1_331, vgetq_lane_s16(__s2_331, __p3_331)); \ __ret_331; \ }) #else #define vqrdmlahh_laneq_s16(__p0_332, __p1_332, __p2_332, __p3_332) __extension__ ({ \ int16_t __ret_332; \ int16_t __s0_332 = __p0_332; \ int16_t __s1_332 = __p1_332; \ int16x8_t __s2_332 = __p2_332; \ int16x8_t __rev2_332; __rev2_332 = __builtin_shufflevector(__s2_332, __s2_332, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_332 = vqrdmlahh_s16(__s0_332, __s1_332, __noswap_vgetq_lane_s16(__rev2_332, __p3_332)); \ __ret_332; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmlahq_laneq_s32(__p0_333, __p1_333, __p2_333, __p3_333) __extension__ ({ \ int32x4_t __ret_333; \ int32x4_t __s0_333 = __p0_333; \ int32x4_t __s1_333 = __p1_333; \ int32x4_t __s2_333 = __p2_333; \ __ret_333 = vqrdmlahq_s32(__s0_333, __s1_333, splatq_laneq_s32(__s2_333, __p3_333)); \ __ret_333; \ }) #else #define vqrdmlahq_laneq_s32(__p0_334, __p1_334, __p2_334, __p3_334) __extension__ ({ \ int32x4_t __ret_334; \ int32x4_t __s0_334 = __p0_334; \ int32x4_t __s1_334 = __p1_334; \ int32x4_t __s2_334 = __p2_334; \ int32x4_t __rev0_334; __rev0_334 = __builtin_shufflevector(__s0_334, __s0_334, 3, 2, 1, 0); \ int32x4_t __rev1_334; __rev1_334 = __builtin_shufflevector(__s1_334, __s1_334, 3, 2, 1, 0); \ int32x4_t __rev2_334; __rev2_334 = __builtin_shufflevector(__s2_334, __s2_334, 3, 2, 1, 0); \ __ret_334 = __noswap_vqrdmlahq_s32(__rev0_334, __rev1_334, __noswap_splatq_laneq_s32(__rev2_334, __p3_334)); \ __ret_334 = __builtin_shufflevector(__ret_334, __ret_334, 3, 2, 1, 0); \ __ret_334; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmlahq_laneq_s16(__p0_335, __p1_335, __p2_335, __p3_335) __extension__ ({ \ int16x8_t __ret_335; \ int16x8_t __s0_335 = __p0_335; \ int16x8_t __s1_335 = __p1_335; \ int16x8_t __s2_335 = __p2_335; \ __ret_335 = vqrdmlahq_s16(__s0_335, __s1_335, splatq_laneq_s16(__s2_335, __p3_335)); \ __ret_335; \ }) #else #define vqrdmlahq_laneq_s16(__p0_336, __p1_336, __p2_336, __p3_336) __extension__ ({ \ int16x8_t __ret_336; \ int16x8_t __s0_336 = __p0_336; \ int16x8_t __s1_336 = __p1_336; \ int16x8_t __s2_336 = __p2_336; \ int16x8_t __rev0_336; __rev0_336 = __builtin_shufflevector(__s0_336, __s0_336, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev1_336; __rev1_336 = __builtin_shufflevector(__s1_336, __s1_336, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev2_336; __rev2_336 = __builtin_shufflevector(__s2_336, __s2_336, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_336 = __noswap_vqrdmlahq_s16(__rev0_336, __rev1_336, __noswap_splatq_laneq_s16(__rev2_336, __p3_336)); \ __ret_336 = __builtin_shufflevector(__ret_336, __ret_336, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_336; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmlah_laneq_s32(__p0_337, __p1_337, __p2_337, __p3_337) __extension__ ({ \ int32x2_t __ret_337; \ int32x2_t __s0_337 = __p0_337; \ int32x2_t __s1_337 = __p1_337; \ int32x4_t __s2_337 = __p2_337; \ __ret_337 = vqrdmlah_s32(__s0_337, __s1_337, splat_laneq_s32(__s2_337, __p3_337)); \ __ret_337; \ }) #else #define vqrdmlah_laneq_s32(__p0_338, __p1_338, __p2_338, __p3_338) __extension__ ({ \ int32x2_t __ret_338; \ int32x2_t __s0_338 = __p0_338; \ int32x2_t __s1_338 = __p1_338; \ int32x4_t __s2_338 = __p2_338; \ int32x2_t __rev0_338; __rev0_338 = __builtin_shufflevector(__s0_338, __s0_338, 1, 0); \ int32x2_t __rev1_338; __rev1_338 = __builtin_shufflevector(__s1_338, __s1_338, 1, 0); \ int32x4_t __rev2_338; __rev2_338 = __builtin_shufflevector(__s2_338, __s2_338, 3, 2, 1, 0); \ __ret_338 = __noswap_vqrdmlah_s32(__rev0_338, __rev1_338, __noswap_splat_laneq_s32(__rev2_338, __p3_338)); \ __ret_338 = __builtin_shufflevector(__ret_338, __ret_338, 1, 0); \ __ret_338; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmlah_laneq_s16(__p0_339, __p1_339, __p2_339, __p3_339) __extension__ ({ \ int16x4_t __ret_339; \ int16x4_t __s0_339 = __p0_339; \ int16x4_t __s1_339 = __p1_339; \ int16x8_t __s2_339 = __p2_339; \ __ret_339 = vqrdmlah_s16(__s0_339, __s1_339, splat_laneq_s16(__s2_339, __p3_339)); \ __ret_339; \ }) #else #define vqrdmlah_laneq_s16(__p0_340, __p1_340, __p2_340, __p3_340) __extension__ ({ \ int16x4_t __ret_340; \ int16x4_t __s0_340 = __p0_340; \ int16x4_t __s1_340 = __p1_340; \ int16x8_t __s2_340 = __p2_340; \ int16x4_t __rev0_340; __rev0_340 = __builtin_shufflevector(__s0_340, __s0_340, 3, 2, 1, 0); \ int16x4_t __rev1_340; __rev1_340 = __builtin_shufflevector(__s1_340, __s1_340, 3, 2, 1, 0); \ int16x8_t __rev2_340; __rev2_340 = __builtin_shufflevector(__s2_340, __s2_340, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_340 = __noswap_vqrdmlah_s16(__rev0_340, __rev1_340, __noswap_splat_laneq_s16(__rev2_340, __p3_340)); \ __ret_340 = __builtin_shufflevector(__ret_340, __ret_340, 3, 2, 1, 0); \ __ret_340; \ }) #endif __ai int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) { int32_t __ret; __ret = (int32_t) __builtin_neon_vqrdmlshs_s32(__p0, __p1, __p2); return __ret; } __ai int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) { int16_t __ret; __ret = (int16_t) __builtin_neon_vqrdmlshh_s16(__p0, __p1, __p2); return __ret; } #ifdef __LITTLE_ENDIAN__ #define vqrdmlshs_lane_s32(__p0_341, __p1_341, __p2_341, __p3_341) __extension__ ({ \ int32_t __ret_341; \ int32_t __s0_341 = __p0_341; \ int32_t __s1_341 = __p1_341; \ int32x2_t __s2_341 = __p2_341; \ __ret_341 = vqrdmlshs_s32(__s0_341, __s1_341, vget_lane_s32(__s2_341, __p3_341)); \ __ret_341; \ }) #else #define vqrdmlshs_lane_s32(__p0_342, __p1_342, __p2_342, __p3_342) __extension__ ({ \ int32_t __ret_342; \ int32_t __s0_342 = __p0_342; \ int32_t __s1_342 = __p1_342; \ int32x2_t __s2_342 = __p2_342; \ int32x2_t __rev2_342; __rev2_342 = __builtin_shufflevector(__s2_342, __s2_342, 1, 0); \ __ret_342 = vqrdmlshs_s32(__s0_342, __s1_342, __noswap_vget_lane_s32(__rev2_342, __p3_342)); \ __ret_342; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmlshh_lane_s16(__p0_343, __p1_343, __p2_343, __p3_343) __extension__ ({ \ int16_t __ret_343; \ int16_t __s0_343 = __p0_343; \ int16_t __s1_343 = __p1_343; \ int16x4_t __s2_343 = __p2_343; \ __ret_343 = vqrdmlshh_s16(__s0_343, __s1_343, vget_lane_s16(__s2_343, __p3_343)); \ __ret_343; \ }) #else #define vqrdmlshh_lane_s16(__p0_344, __p1_344, __p2_344, __p3_344) __extension__ ({ \ int16_t __ret_344; \ int16_t __s0_344 = __p0_344; \ int16_t __s1_344 = __p1_344; \ int16x4_t __s2_344 = __p2_344; \ int16x4_t __rev2_344; __rev2_344 = __builtin_shufflevector(__s2_344, __s2_344, 3, 2, 1, 0); \ __ret_344 = vqrdmlshh_s16(__s0_344, __s1_344, __noswap_vget_lane_s16(__rev2_344, __p3_344)); \ __ret_344; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmlshs_laneq_s32(__p0_345, __p1_345, __p2_345, __p3_345) __extension__ ({ \ int32_t __ret_345; \ int32_t __s0_345 = __p0_345; \ int32_t __s1_345 = __p1_345; \ int32x4_t __s2_345 = __p2_345; \ __ret_345 = vqrdmlshs_s32(__s0_345, __s1_345, vgetq_lane_s32(__s2_345, __p3_345)); \ __ret_345; \ }) #else #define vqrdmlshs_laneq_s32(__p0_346, __p1_346, __p2_346, __p3_346) __extension__ ({ \ int32_t __ret_346; \ int32_t __s0_346 = __p0_346; \ int32_t __s1_346 = __p1_346; \ int32x4_t __s2_346 = __p2_346; \ int32x4_t __rev2_346; __rev2_346 = __builtin_shufflevector(__s2_346, __s2_346, 3, 2, 1, 0); \ __ret_346 = vqrdmlshs_s32(__s0_346, __s1_346, __noswap_vgetq_lane_s32(__rev2_346, __p3_346)); \ __ret_346; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmlshh_laneq_s16(__p0_347, __p1_347, __p2_347, __p3_347) __extension__ ({ \ int16_t __ret_347; \ int16_t __s0_347 = __p0_347; \ int16_t __s1_347 = __p1_347; \ int16x8_t __s2_347 = __p2_347; \ __ret_347 = vqrdmlshh_s16(__s0_347, __s1_347, vgetq_lane_s16(__s2_347, __p3_347)); \ __ret_347; \ }) #else #define vqrdmlshh_laneq_s16(__p0_348, __p1_348, __p2_348, __p3_348) __extension__ ({ \ int16_t __ret_348; \ int16_t __s0_348 = __p0_348; \ int16_t __s1_348 = __p1_348; \ int16x8_t __s2_348 = __p2_348; \ int16x8_t __rev2_348; __rev2_348 = __builtin_shufflevector(__s2_348, __s2_348, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_348 = vqrdmlshh_s16(__s0_348, __s1_348, __noswap_vgetq_lane_s16(__rev2_348, __p3_348)); \ __ret_348; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmlshq_laneq_s32(__p0_349, __p1_349, __p2_349, __p3_349) __extension__ ({ \ int32x4_t __ret_349; \ int32x4_t __s0_349 = __p0_349; \ int32x4_t __s1_349 = __p1_349; \ int32x4_t __s2_349 = __p2_349; \ __ret_349 = vqrdmlshq_s32(__s0_349, __s1_349, splatq_laneq_s32(__s2_349, __p3_349)); \ __ret_349; \ }) #else #define vqrdmlshq_laneq_s32(__p0_350, __p1_350, __p2_350, __p3_350) __extension__ ({ \ int32x4_t __ret_350; \ int32x4_t __s0_350 = __p0_350; \ int32x4_t __s1_350 = __p1_350; \ int32x4_t __s2_350 = __p2_350; \ int32x4_t __rev0_350; __rev0_350 = __builtin_shufflevector(__s0_350, __s0_350, 3, 2, 1, 0); \ int32x4_t __rev1_350; __rev1_350 = __builtin_shufflevector(__s1_350, __s1_350, 3, 2, 1, 0); \ int32x4_t __rev2_350; __rev2_350 = __builtin_shufflevector(__s2_350, __s2_350, 3, 2, 1, 0); \ __ret_350 = __noswap_vqrdmlshq_s32(__rev0_350, __rev1_350, __noswap_splatq_laneq_s32(__rev2_350, __p3_350)); \ __ret_350 = __builtin_shufflevector(__ret_350, __ret_350, 3, 2, 1, 0); \ __ret_350; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmlshq_laneq_s16(__p0_351, __p1_351, __p2_351, __p3_351) __extension__ ({ \ int16x8_t __ret_351; \ int16x8_t __s0_351 = __p0_351; \ int16x8_t __s1_351 = __p1_351; \ int16x8_t __s2_351 = __p2_351; \ __ret_351 = vqrdmlshq_s16(__s0_351, __s1_351, splatq_laneq_s16(__s2_351, __p3_351)); \ __ret_351; \ }) #else #define vqrdmlshq_laneq_s16(__p0_352, __p1_352, __p2_352, __p3_352) __extension__ ({ \ int16x8_t __ret_352; \ int16x8_t __s0_352 = __p0_352; \ int16x8_t __s1_352 = __p1_352; \ int16x8_t __s2_352 = __p2_352; \ int16x8_t __rev0_352; __rev0_352 = __builtin_shufflevector(__s0_352, __s0_352, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev1_352; __rev1_352 = __builtin_shufflevector(__s1_352, __s1_352, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev2_352; __rev2_352 = __builtin_shufflevector(__s2_352, __s2_352, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_352 = __noswap_vqrdmlshq_s16(__rev0_352, __rev1_352, __noswap_splatq_laneq_s16(__rev2_352, __p3_352)); \ __ret_352 = __builtin_shufflevector(__ret_352, __ret_352, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_352; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmlsh_laneq_s32(__p0_353, __p1_353, __p2_353, __p3_353) __extension__ ({ \ int32x2_t __ret_353; \ int32x2_t __s0_353 = __p0_353; \ int32x2_t __s1_353 = __p1_353; \ int32x4_t __s2_353 = __p2_353; \ __ret_353 = vqrdmlsh_s32(__s0_353, __s1_353, splat_laneq_s32(__s2_353, __p3_353)); \ __ret_353; \ }) #else #define vqrdmlsh_laneq_s32(__p0_354, __p1_354, __p2_354, __p3_354) __extension__ ({ \ int32x2_t __ret_354; \ int32x2_t __s0_354 = __p0_354; \ int32x2_t __s1_354 = __p1_354; \ int32x4_t __s2_354 = __p2_354; \ int32x2_t __rev0_354; __rev0_354 = __builtin_shufflevector(__s0_354, __s0_354, 1, 0); \ int32x2_t __rev1_354; __rev1_354 = __builtin_shufflevector(__s1_354, __s1_354, 1, 0); \ int32x4_t __rev2_354; __rev2_354 = __builtin_shufflevector(__s2_354, __s2_354, 3, 2, 1, 0); \ __ret_354 = __noswap_vqrdmlsh_s32(__rev0_354, __rev1_354, __noswap_splat_laneq_s32(__rev2_354, __p3_354)); \ __ret_354 = __builtin_shufflevector(__ret_354, __ret_354, 1, 0); \ __ret_354; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmlsh_laneq_s16(__p0_355, __p1_355, __p2_355, __p3_355) __extension__ ({ \ int16x4_t __ret_355; \ int16x4_t __s0_355 = __p0_355; \ int16x4_t __s1_355 = __p1_355; \ int16x8_t __s2_355 = __p2_355; \ __ret_355 = vqrdmlsh_s16(__s0_355, __s1_355, splat_laneq_s16(__s2_355, __p3_355)); \ __ret_355; \ }) #else #define vqrdmlsh_laneq_s16(__p0_356, __p1_356, __p2_356, __p3_356) __extension__ ({ \ int16x4_t __ret_356; \ int16x4_t __s0_356 = __p0_356; \ int16x4_t __s1_356 = __p1_356; \ int16x8_t __s2_356 = __p2_356; \ int16x4_t __rev0_356; __rev0_356 = __builtin_shufflevector(__s0_356, __s0_356, 3, 2, 1, 0); \ int16x4_t __rev1_356; __rev1_356 = __builtin_shufflevector(__s1_356, __s1_356, 3, 2, 1, 0); \ int16x8_t __rev2_356; __rev2_356 = __builtin_shufflevector(__s2_356, __s2_356, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_356 = __noswap_vqrdmlsh_s16(__rev0_356, __rev1_356, __noswap_splat_laneq_s16(__rev2_356, __p3_356)); \ __ret_356 = __builtin_shufflevector(__ret_356, __ret_356, 3, 2, 1, 0); \ __ret_356; \ }) #endif #endif #if defined(__aarch64__) #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #else __ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai float64x1_t vabd_f64(float64x1_t __p0, float64x1_t __p1) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 10); return __ret; } __ai float64_t vabdd_f64(float64_t __p0, float64_t __p1) { float64_t __ret; __ret = (float64_t) __builtin_neon_vabdd_f64(__p0, __p1); return __ret; } __ai float32_t vabds_f32(float32_t __p0, float32_t __p1) { float32_t __ret; __ret = (float32_t) __builtin_neon_vabds_f32(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vabsq_f64(float64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 42); return __ret; } #else __ai float64x2_t vabsq_f64(float64x2_t __p0) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vabsq_s64(int64x2_t __p0) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 35); return __ret; } #else __ai int64x2_t vabsq_s64(int64x2_t __p0) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai float64x1_t vabs_f64(float64x1_t __p0) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 10); return __ret; } __ai int64x1_t vabs_s64(int64x1_t __p0) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 3); return __ret; } __ai int64_t vabsd_s64(int64_t __p0) { int64_t __ret; __ret = (int64_t) __builtin_neon_vabsd_s64(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = __p0 + __p1; return __ret; } #else __ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 + __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai float64x1_t vadd_f64(float64x1_t __p0, float64x1_t __p1) { float64x1_t __ret; __ret = __p0 + __p1; return __ret; } __ai uint64_t vaddd_u64(uint64_t __p0, uint64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vaddd_u64(__p0, __p1); return __ret; } __ai int64_t vaddd_s64(int64_t __p0, int64_t __p1) { int64_t __ret; __ret = (int64_t) __builtin_neon_vaddd_s64(__p0, __p1); return __ret; } __ai poly128_t vaddq_p128(poly128_t __p0, poly128_t __p1) { poly128_t __ret; __ret = (poly128_t) __builtin_neon_vaddq_p128(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint16x8_t __ret; __ret = vcombine_u16(__p0, vaddhn_u32(__p1, __p2)); return __ret; } #else __ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint16x8_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __noswap_vcombine_u16(__rev0, __noswap_vaddhn_u32(__rev1, __rev2)); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint32x4_t __ret; __ret = vcombine_u32(__p0, vaddhn_u64(__p1, __p2)); return __ret; } #else __ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint32x4_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = __noswap_vcombine_u32(__rev0, __noswap_vaddhn_u64(__rev1, __rev2)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint8x16_t __ret; __ret = vcombine_u8(__p0, vaddhn_u16(__p1, __p2)); return __ret; } #else __ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint8x16_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vcombine_u8(__rev0, __noswap_vaddhn_u16(__rev1, __rev2)); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int16x8_t __ret; __ret = vcombine_s16(__p0, vaddhn_s32(__p1, __p2)); return __ret; } #else __ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int16x8_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __noswap_vcombine_s16(__rev0, __noswap_vaddhn_s32(__rev1, __rev2)); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { int32x4_t __ret; __ret = vcombine_s32(__p0, vaddhn_s64(__p1, __p2)); return __ret; } #else __ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { int32x4_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = __noswap_vcombine_s32(__rev0, __noswap_vaddhn_s64(__rev1, __rev2)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int8x16_t __ret; __ret = vcombine_s8(__p0, vaddhn_s16(__p1, __p2)); return __ret; } #else __ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int8x16_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vcombine_s8(__rev0, __noswap_vaddhn_s16(__rev1, __rev2)); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16_t vaddlvq_u8(uint8x16_t __p0) { uint16_t __ret; __ret = (uint16_t) __builtin_neon_vaddlvq_u8(__p0); return __ret; } #else __ai uint16_t vaddlvq_u8(uint8x16_t __p0) { uint16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16_t) __builtin_neon_vaddlvq_u8(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64_t vaddlvq_u32(uint32x4_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vaddlvq_u32(__p0); return __ret; } #else __ai uint64_t vaddlvq_u32(uint32x4_t __p0) { uint64_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint64_t) __builtin_neon_vaddlvq_u32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32_t vaddlvq_u16(uint16x8_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vaddlvq_u16(__p0); return __ret; } #else __ai uint32_t vaddlvq_u16(uint16x8_t __p0) { uint32_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint32_t) __builtin_neon_vaddlvq_u16(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16_t vaddlvq_s8(int8x16_t __p0) { int16_t __ret; __ret = (int16_t) __builtin_neon_vaddlvq_s8(__p0); return __ret; } #else __ai int16_t vaddlvq_s8(int8x16_t __p0) { int16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16_t) __builtin_neon_vaddlvq_s8(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64_t vaddlvq_s32(int32x4_t __p0) { int64_t __ret; __ret = (int64_t) __builtin_neon_vaddlvq_s32(__p0); return __ret; } #else __ai int64_t vaddlvq_s32(int32x4_t __p0) { int64_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int64_t) __builtin_neon_vaddlvq_s32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32_t vaddlvq_s16(int16x8_t __p0) { int32_t __ret; __ret = (int32_t) __builtin_neon_vaddlvq_s16(__p0); return __ret; } #else __ai int32_t vaddlvq_s16(int16x8_t __p0) { int32_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int32_t) __builtin_neon_vaddlvq_s16(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16_t vaddlv_u8(uint8x8_t __p0) { uint16_t __ret; __ret = (uint16_t) __builtin_neon_vaddlv_u8(__p0); return __ret; } #else __ai uint16_t vaddlv_u8(uint8x8_t __p0) { uint16_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16_t) __builtin_neon_vaddlv_u8(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64_t vaddlv_u32(uint32x2_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vaddlv_u32(__p0); return __ret; } #else __ai uint64_t vaddlv_u32(uint32x2_t __p0) { uint64_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64_t) __builtin_neon_vaddlv_u32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32_t vaddlv_u16(uint16x4_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vaddlv_u16(__p0); return __ret; } #else __ai uint32_t vaddlv_u16(uint16x4_t __p0) { uint32_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32_t) __builtin_neon_vaddlv_u16(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16_t vaddlv_s8(int8x8_t __p0) { int16_t __ret; __ret = (int16_t) __builtin_neon_vaddlv_s8(__p0); return __ret; } #else __ai int16_t vaddlv_s8(int8x8_t __p0) { int16_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16_t) __builtin_neon_vaddlv_s8(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64_t vaddlv_s32(int32x2_t __p0) { int64_t __ret; __ret = (int64_t) __builtin_neon_vaddlv_s32(__p0); return __ret; } #else __ai int64_t vaddlv_s32(int32x2_t __p0) { int64_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int64_t) __builtin_neon_vaddlv_s32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32_t vaddlv_s16(int16x4_t __p0) { int32_t __ret; __ret = (int32_t) __builtin_neon_vaddlv_s16(__p0); return __ret; } #else __ai int32_t vaddlv_s16(int16x4_t __p0) { int32_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int32_t) __builtin_neon_vaddlv_s16(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8_t vaddvq_u8(uint8x16_t __p0) { uint8_t __ret; __ret = (uint8_t) __builtin_neon_vaddvq_u8(__p0); return __ret; } #else __ai uint8_t vaddvq_u8(uint8x16_t __p0) { uint8_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8_t) __builtin_neon_vaddvq_u8(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32_t vaddvq_u32(uint32x4_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vaddvq_u32(__p0); return __ret; } #else __ai uint32_t vaddvq_u32(uint32x4_t __p0) { uint32_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32_t) __builtin_neon_vaddvq_u32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64_t vaddvq_u64(uint64x2_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vaddvq_u64(__p0); return __ret; } #else __ai uint64_t vaddvq_u64(uint64x2_t __p0) { uint64_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64_t) __builtin_neon_vaddvq_u64(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16_t vaddvq_u16(uint16x8_t __p0) { uint16_t __ret; __ret = (uint16_t) __builtin_neon_vaddvq_u16(__p0); return __ret; } #else __ai uint16_t vaddvq_u16(uint16x8_t __p0) { uint16_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16_t) __builtin_neon_vaddvq_u16(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8_t vaddvq_s8(int8x16_t __p0) { int8_t __ret; __ret = (int8_t) __builtin_neon_vaddvq_s8(__p0); return __ret; } #else __ai int8_t vaddvq_s8(int8x16_t __p0) { int8_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8_t) __builtin_neon_vaddvq_s8(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64_t vaddvq_f64(float64x2_t __p0) { float64_t __ret; __ret = (float64_t) __builtin_neon_vaddvq_f64(__p0); return __ret; } #else __ai float64_t vaddvq_f64(float64x2_t __p0) { float64_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64_t) __builtin_neon_vaddvq_f64(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32_t vaddvq_f32(float32x4_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vaddvq_f32(__p0); return __ret; } #else __ai float32_t vaddvq_f32(float32x4_t __p0) { float32_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32_t) __builtin_neon_vaddvq_f32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32_t vaddvq_s32(int32x4_t __p0) { int32_t __ret; __ret = (int32_t) __builtin_neon_vaddvq_s32(__p0); return __ret; } #else __ai int32_t vaddvq_s32(int32x4_t __p0) { int32_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int32_t) __builtin_neon_vaddvq_s32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64_t vaddvq_s64(int64x2_t __p0) { int64_t __ret; __ret = (int64_t) __builtin_neon_vaddvq_s64(__p0); return __ret; } #else __ai int64_t vaddvq_s64(int64x2_t __p0) { int64_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int64_t) __builtin_neon_vaddvq_s64(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16_t vaddvq_s16(int16x8_t __p0) { int16_t __ret; __ret = (int16_t) __builtin_neon_vaddvq_s16(__p0); return __ret; } #else __ai int16_t vaddvq_s16(int16x8_t __p0) { int16_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16_t) __builtin_neon_vaddvq_s16(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8_t vaddv_u8(uint8x8_t __p0) { uint8_t __ret; __ret = (uint8_t) __builtin_neon_vaddv_u8(__p0); return __ret; } #else __ai uint8_t vaddv_u8(uint8x8_t __p0) { uint8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8_t) __builtin_neon_vaddv_u8(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32_t vaddv_u32(uint32x2_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vaddv_u32(__p0); return __ret; } #else __ai uint32_t vaddv_u32(uint32x2_t __p0) { uint32_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32_t) __builtin_neon_vaddv_u32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16_t vaddv_u16(uint16x4_t __p0) { uint16_t __ret; __ret = (uint16_t) __builtin_neon_vaddv_u16(__p0); return __ret; } #else __ai uint16_t vaddv_u16(uint16x4_t __p0) { uint16_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16_t) __builtin_neon_vaddv_u16(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8_t vaddv_s8(int8x8_t __p0) { int8_t __ret; __ret = (int8_t) __builtin_neon_vaddv_s8(__p0); return __ret; } #else __ai int8_t vaddv_s8(int8x8_t __p0) { int8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8_t) __builtin_neon_vaddv_s8(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32_t vaddv_f32(float32x2_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vaddv_f32(__p0); return __ret; } #else __ai float32_t vaddv_f32(float32x2_t __p0) { float32_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32_t) __builtin_neon_vaddv_f32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32_t vaddv_s32(int32x2_t __p0) { int32_t __ret; __ret = (int32_t) __builtin_neon_vaddv_s32(__p0); return __ret; } #else __ai int32_t vaddv_s32(int32x2_t __p0) { int32_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int32_t) __builtin_neon_vaddv_s32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16_t vaddv_s16(int16x4_t __p0) { int16_t __ret; __ret = (int16_t) __builtin_neon_vaddv_s16(__p0); return __ret; } #else __ai int16_t vaddv_s16(int16x4_t __p0) { int16_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int16_t) __builtin_neon_vaddv_s16(__rev0); return __ret; } #endif __ai poly64x1_t vbsl_p64(uint64x1_t __p0, poly64x1_t __p1, poly64x1_t __p2) { poly64x1_t __ret; __ret = (poly64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 6); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) { poly64x2_t __ret; __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 38); return __ret; } #else __ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) { poly64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); poly64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 38); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); return __ret; } #else __ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai float64x1_t vbsl_f64(uint64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else __ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vcage_f64(float64x1_t __p0, float64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 19); return __ret; } __ai uint64_t vcaged_f64(float64_t __p0, float64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcaged_f64(__p0, __p1); return __ret; } __ai uint32_t vcages_f32(float32_t __p0, float32_t __p1) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vcages_f32(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else __ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vcagt_f64(float64x1_t __p0, float64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 19); return __ret; } __ai uint64_t vcagtd_f64(float64_t __p0, float64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcagtd_f64(__p0, __p1); return __ret; } __ai uint32_t vcagts_f32(float32_t __p0, float32_t __p1) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vcagts_f32(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else __ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vcale_f64(float64x1_t __p0, float64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 19); return __ret; } __ai uint64_t vcaled_f64(float64_t __p0, float64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcaled_f64(__p0, __p1); return __ret; } __ai uint32_t vcales_f32(float32_t __p0, float32_t __p1) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vcales_f32(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else __ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vcalt_f64(float64x1_t __p0, float64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 19); return __ret; } __ai uint64_t vcaltd_f64(float64_t __p0, float64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcaltd_f64(__p0, __p1); return __ret; } __ai uint32_t vcalts_f32(float32_t __p0, float32_t __p1) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vcalts_f32(__p0, __p1); return __ret; } __ai uint64x1_t vceq_p64(poly64x1_t __p0, poly64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0 == __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0 == __p1); return __ret; } #else __ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) { uint64x2_t __ret; poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t)(__rev0 == __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0 == __p1); return __ret; } #else __ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t)(__rev0 == __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0 == __p1); return __ret; } #else __ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t)(__rev0 == __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0 == __p1); return __ret; } #else __ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t)(__rev0 == __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vceq_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0 == __p1); return __ret; } __ai uint64x1_t vceq_f64(float64x1_t __p0, float64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0 == __p1); return __ret; } __ai uint64x1_t vceq_s64(int64x1_t __p0, int64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0 == __p1); return __ret; } __ai uint64_t vceqd_u64(uint64_t __p0, uint64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vceqd_u64(__p0, __p1); return __ret; } __ai uint64_t vceqd_s64(int64_t __p0, int64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vceqd_s64(__p0, __p1); return __ret; } __ai uint64_t vceqd_f64(float64_t __p0, float64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vceqd_f64(__p0, __p1); return __ret; } __ai uint32_t vceqs_f32(float32_t __p0, float32_t __p1) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vceqs_f32(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vceqz_p8(poly8x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16); return __ret; } #else __ai uint8x8_t vceqz_p8(poly8x8_t __p0) { uint8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif __ai uint64x1_t vceqz_p64(poly64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vceqzq_p8(poly8x16_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48); return __ret; } #else __ai uint8x16_t vceqzq_p8(poly8x16_t __p0) { uint8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vceqzq_p64(poly64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); return __ret; } #else __ai uint64x2_t vceqzq_p64(poly64x2_t __p0) { uint64x2_t __ret; poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vceqzq_u8(uint8x16_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48); return __ret; } #else __ai uint8x16_t vceqzq_u8(uint8x16_t __p0) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vceqzq_u32(uint32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50); return __ret; } #else __ai uint32x4_t vceqzq_u32(uint32x4_t __p0) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vceqzq_u64(uint64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); return __ret; } #else __ai uint64x2_t vceqzq_u64(uint64x2_t __p0) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vceqzq_u16(uint16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49); return __ret; } #else __ai uint16x8_t vceqzq_u16(uint16x8_t __p0) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vceqzq_s8(int8x16_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48); return __ret; } #else __ai uint8x16_t vceqzq_s8(int8x16_t __p0) { uint8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vceqzq_f64(float64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); return __ret; } #else __ai uint64x2_t vceqzq_f64(float64x2_t __p0) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vceqzq_f32(float32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50); return __ret; } #else __ai uint32x4_t vceqzq_f32(float32x4_t __p0) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vceqzq_s32(int32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50); return __ret; } #else __ai uint32x4_t vceqzq_s32(int32x4_t __p0) { uint32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vceqzq_s64(int64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); return __ret; } #else __ai uint64x2_t vceqzq_s64(int64x2_t __p0) { uint64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vceqzq_s16(int16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49); return __ret; } #else __ai uint16x8_t vceqzq_s16(int16x8_t __p0) { uint16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vceqz_u8(uint8x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16); return __ret; } #else __ai uint8x8_t vceqz_u8(uint8x8_t __p0) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vceqz_u32(uint32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18); return __ret; } #else __ai uint32x2_t vceqz_u32(uint32x2_t __p0) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vceqz_u64(uint64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vceqz_u16(uint16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17); return __ret; } #else __ai uint16x4_t vceqz_u16(uint16x4_t __p0) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vceqz_s8(int8x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16); return __ret; } #else __ai uint8x8_t vceqz_s8(int8x8_t __p0) { uint8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif __ai uint64x1_t vceqz_f64(float64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vceqz_f32(float32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18); return __ret; } #else __ai uint32x2_t vceqz_f32(float32x2_t __p0) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vceqz_s32(int32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18); return __ret; } #else __ai uint32x2_t vceqz_s32(int32x2_t __p0) { uint32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vceqz_s64(int64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vceqz_s16(int16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17); return __ret; } #else __ai uint16x4_t vceqz_s16(int16x4_t __p0) { uint16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif __ai uint64_t vceqzd_u64(uint64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vceqzd_u64(__p0); return __ret; } __ai uint64_t vceqzd_s64(int64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vceqzd_s64(__p0); return __ret; } __ai uint64_t vceqzd_f64(float64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vceqzd_f64(__p0); return __ret; } __ai uint32_t vceqzs_f32(float32_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vceqzs_f32(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0 >= __p1); return __ret; } #else __ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t)(__rev0 >= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0 >= __p1); return __ret; } #else __ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t)(__rev0 >= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0 >= __p1); return __ret; } #else __ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t)(__rev0 >= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vcge_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0 >= __p1); return __ret; } __ai uint64x1_t vcge_f64(float64x1_t __p0, float64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0 >= __p1); return __ret; } __ai uint64x1_t vcge_s64(int64x1_t __p0, int64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0 >= __p1); return __ret; } __ai uint64_t vcged_s64(int64_t __p0, int64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcged_s64(__p0, __p1); return __ret; } __ai uint64_t vcged_u64(uint64_t __p0, uint64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcged_u64(__p0, __p1); return __ret; } __ai uint64_t vcged_f64(float64_t __p0, float64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcged_f64(__p0, __p1); return __ret; } __ai uint32_t vcges_f32(float32_t __p0, float32_t __p1) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vcges_f32(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vcgezq_s8(int8x16_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 48); return __ret; } #else __ai uint8x16_t vcgezq_s8(int8x16_t __p0) { uint8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vcgezq_f64(float64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51); return __ret; } #else __ai uint64x2_t vcgezq_f64(float64x2_t __p0) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vcgezq_f32(float32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50); return __ret; } #else __ai uint32x4_t vcgezq_f32(float32x4_t __p0) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vcgezq_s32(int32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50); return __ret; } #else __ai uint32x4_t vcgezq_s32(int32x4_t __p0) { uint32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vcgezq_s64(int64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51); return __ret; } #else __ai uint64x2_t vcgezq_s64(int64x2_t __p0) { uint64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vcgezq_s16(int16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49); return __ret; } #else __ai uint16x8_t vcgezq_s16(int16x8_t __p0) { uint16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vcgez_s8(int8x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 16); return __ret; } #else __ai uint8x8_t vcgez_s8(int8x8_t __p0) { uint8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif __ai uint64x1_t vcgez_f64(float64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vcgez_f32(float32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18); return __ret; } #else __ai uint32x2_t vcgez_f32(float32x2_t __p0) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vcgez_s32(int32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18); return __ret; } #else __ai uint32x2_t vcgez_s32(int32x2_t __p0) { uint32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vcgez_s64(int64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vcgez_s16(int16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17); return __ret; } #else __ai uint16x4_t vcgez_s16(int16x4_t __p0) { uint16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif __ai uint64_t vcgezd_s64(int64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcgezd_s64(__p0); return __ret; } __ai uint64_t vcgezd_f64(float64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcgezd_f64(__p0); return __ret; } __ai uint32_t vcgezs_f32(float32_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vcgezs_f32(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0 > __p1); return __ret; } #else __ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t)(__rev0 > __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0 > __p1); return __ret; } #else __ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t)(__rev0 > __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0 > __p1); return __ret; } #else __ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t)(__rev0 > __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vcgt_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0 > __p1); return __ret; } __ai uint64x1_t vcgt_f64(float64x1_t __p0, float64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0 > __p1); return __ret; } __ai uint64x1_t vcgt_s64(int64x1_t __p0, int64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0 > __p1); return __ret; } __ai uint64_t vcgtd_s64(int64_t __p0, int64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcgtd_s64(__p0, __p1); return __ret; } __ai uint64_t vcgtd_u64(uint64_t __p0, uint64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcgtd_u64(__p0, __p1); return __ret; } __ai uint64_t vcgtd_f64(float64_t __p0, float64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcgtd_f64(__p0, __p1); return __ret; } __ai uint32_t vcgts_f32(float32_t __p0, float32_t __p1) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vcgts_f32(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vcgtzq_s8(int8x16_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 48); return __ret; } #else __ai uint8x16_t vcgtzq_s8(int8x16_t __p0) { uint8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vcgtzq_f64(float64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51); return __ret; } #else __ai uint64x2_t vcgtzq_f64(float64x2_t __p0) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vcgtzq_f32(float32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50); return __ret; } #else __ai uint32x4_t vcgtzq_f32(float32x4_t __p0) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vcgtzq_s32(int32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50); return __ret; } #else __ai uint32x4_t vcgtzq_s32(int32x4_t __p0) { uint32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vcgtzq_s64(int64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51); return __ret; } #else __ai uint64x2_t vcgtzq_s64(int64x2_t __p0) { uint64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vcgtzq_s16(int16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49); return __ret; } #else __ai uint16x8_t vcgtzq_s16(int16x8_t __p0) { uint16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vcgtz_s8(int8x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 16); return __ret; } #else __ai uint8x8_t vcgtz_s8(int8x8_t __p0) { uint8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif __ai uint64x1_t vcgtz_f64(float64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vcgtz_f32(float32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18); return __ret; } #else __ai uint32x2_t vcgtz_f32(float32x2_t __p0) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vcgtz_s32(int32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18); return __ret; } #else __ai uint32x2_t vcgtz_s32(int32x2_t __p0) { uint32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vcgtz_s64(int64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vcgtz_s16(int16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17); return __ret; } #else __ai uint16x4_t vcgtz_s16(int16x4_t __p0) { uint16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif __ai uint64_t vcgtzd_s64(int64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcgtzd_s64(__p0); return __ret; } __ai uint64_t vcgtzd_f64(float64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcgtzd_f64(__p0); return __ret; } __ai uint32_t vcgtzs_f32(float32_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vcgtzs_f32(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0 <= __p1); return __ret; } #else __ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t)(__rev0 <= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0 <= __p1); return __ret; } #else __ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t)(__rev0 <= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0 <= __p1); return __ret; } #else __ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t)(__rev0 <= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vcle_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0 <= __p1); return __ret; } __ai uint64x1_t vcle_f64(float64x1_t __p0, float64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0 <= __p1); return __ret; } __ai uint64x1_t vcle_s64(int64x1_t __p0, int64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0 <= __p1); return __ret; } __ai uint64_t vcled_u64(uint64_t __p0, uint64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcled_u64(__p0, __p1); return __ret; } __ai uint64_t vcled_s64(int64_t __p0, int64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcled_s64(__p0, __p1); return __ret; } __ai uint64_t vcled_f64(float64_t __p0, float64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcled_f64(__p0, __p1); return __ret; } __ai uint32_t vcles_f32(float32_t __p0, float32_t __p1) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vcles_f32(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vclezq_s8(int8x16_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 48); return __ret; } #else __ai uint8x16_t vclezq_s8(int8x16_t __p0) { uint8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vclezq_f64(float64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51); return __ret; } #else __ai uint64x2_t vclezq_f64(float64x2_t __p0) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vclezq_f32(float32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50); return __ret; } #else __ai uint32x4_t vclezq_f32(float32x4_t __p0) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vclezq_s32(int32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50); return __ret; } #else __ai uint32x4_t vclezq_s32(int32x4_t __p0) { uint32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vclezq_s64(int64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51); return __ret; } #else __ai uint64x2_t vclezq_s64(int64x2_t __p0) { uint64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vclezq_s16(int16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49); return __ret; } #else __ai uint16x8_t vclezq_s16(int16x8_t __p0) { uint16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vclez_s8(int8x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__p0, 16); return __ret; } #else __ai uint8x8_t vclez_s8(int8x8_t __p0) { uint8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif __ai uint64x1_t vclez_f64(float64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vclez_f32(float32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18); return __ret; } #else __ai uint32x2_t vclez_f32(float32x2_t __p0) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vclez_s32(int32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18); return __ret; } #else __ai uint32x2_t vclez_s32(int32x2_t __p0) { uint32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vclez_s64(int64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vclez_s16(int16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17); return __ret; } #else __ai uint16x4_t vclez_s16(int16x4_t __p0) { uint16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif __ai uint64_t vclezd_s64(int64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vclezd_s64(__p0); return __ret; } __ai uint64_t vclezd_f64(float64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vclezd_f64(__p0); return __ret; } __ai uint32_t vclezs_f32(float32_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vclezs_f32(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0 < __p1); return __ret; } #else __ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t)(__rev0 < __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0 < __p1); return __ret; } #else __ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t)(__rev0 < __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0 < __p1); return __ret; } #else __ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t)(__rev0 < __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vclt_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0 < __p1); return __ret; } __ai uint64x1_t vclt_f64(float64x1_t __p0, float64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0 < __p1); return __ret; } __ai uint64x1_t vclt_s64(int64x1_t __p0, int64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0 < __p1); return __ret; } __ai uint64_t vcltd_u64(uint64_t __p0, uint64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcltd_u64(__p0, __p1); return __ret; } __ai uint64_t vcltd_s64(int64_t __p0, int64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcltd_s64(__p0, __p1); return __ret; } __ai uint64_t vcltd_f64(float64_t __p0, float64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcltd_f64(__p0, __p1); return __ret; } __ai uint32_t vclts_f32(float32_t __p0, float32_t __p1) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vclts_f32(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vcltzq_s8(int8x16_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 48); return __ret; } #else __ai uint8x16_t vcltzq_s8(int8x16_t __p0) { uint8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vcltzq_f64(float64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51); return __ret; } #else __ai uint64x2_t vcltzq_f64(float64x2_t __p0) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vcltzq_f32(float32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50); return __ret; } #else __ai uint32x4_t vcltzq_f32(float32x4_t __p0) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vcltzq_s32(int32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50); return __ret; } #else __ai uint32x4_t vcltzq_s32(int32x4_t __p0) { uint32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vcltzq_s64(int64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51); return __ret; } #else __ai uint64x2_t vcltzq_s64(int64x2_t __p0) { uint64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vcltzq_s16(int16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49); return __ret; } #else __ai uint16x8_t vcltzq_s16(int16x8_t __p0) { uint16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vcltz_s8(int8x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 16); return __ret; } #else __ai uint8x8_t vcltz_s8(int8x8_t __p0) { uint8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif __ai uint64x1_t vcltz_f64(float64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vcltz_f32(float32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18); return __ret; } #else __ai uint32x2_t vcltz_f32(float32x2_t __p0) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vcltz_s32(int32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18); return __ret; } #else __ai uint32x2_t vcltz_s32(int32x2_t __p0) { uint32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vcltz_s64(int64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vcltz_s16(int16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17); return __ret; } #else __ai uint16x4_t vcltz_s16(int16x4_t __p0) { uint16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif __ai uint64_t vcltzd_s64(int64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcltzd_s64(__p0); return __ret; } __ai uint64_t vcltzd_f64(float64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcltzd_f64(__p0); return __ret; } __ai uint32_t vcltzs_f32(float32_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vcltzs_f32(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) { poly64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1); return __ret; } #else __ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) { poly64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) { float64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1); return __ret; } #else __ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) { float64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vcopyq_lane_p8(__p0_357, __p1_357, __p2_357, __p3_357) __extension__ ({ \ poly8x16_t __ret_357; \ poly8x16_t __s0_357 = __p0_357; \ poly8x8_t __s2_357 = __p2_357; \ __ret_357 = vsetq_lane_p8(vget_lane_p8(__s2_357, __p3_357), __s0_357, __p1_357); \ __ret_357; \ }) #else #define vcopyq_lane_p8(__p0_358, __p1_358, __p2_358, __p3_358) __extension__ ({ \ poly8x16_t __ret_358; \ poly8x16_t __s0_358 = __p0_358; \ poly8x8_t __s2_358 = __p2_358; \ poly8x16_t __rev0_358; __rev0_358 = __builtin_shufflevector(__s0_358, __s0_358, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ poly8x8_t __rev2_358; __rev2_358 = __builtin_shufflevector(__s2_358, __s2_358, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_358 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_358, __p3_358), __rev0_358, __p1_358); \ __ret_358 = __builtin_shufflevector(__ret_358, __ret_358, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_358; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopyq_lane_p16(__p0_359, __p1_359, __p2_359, __p3_359) __extension__ ({ \ poly16x8_t __ret_359; \ poly16x8_t __s0_359 = __p0_359; \ poly16x4_t __s2_359 = __p2_359; \ __ret_359 = vsetq_lane_p16(vget_lane_p16(__s2_359, __p3_359), __s0_359, __p1_359); \ __ret_359; \ }) #else #define vcopyq_lane_p16(__p0_360, __p1_360, __p2_360, __p3_360) __extension__ ({ \ poly16x8_t __ret_360; \ poly16x8_t __s0_360 = __p0_360; \ poly16x4_t __s2_360 = __p2_360; \ poly16x8_t __rev0_360; __rev0_360 = __builtin_shufflevector(__s0_360, __s0_360, 7, 6, 5, 4, 3, 2, 1, 0); \ poly16x4_t __rev2_360; __rev2_360 = __builtin_shufflevector(__s2_360, __s2_360, 3, 2, 1, 0); \ __ret_360 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_360, __p3_360), __rev0_360, __p1_360); \ __ret_360 = __builtin_shufflevector(__ret_360, __ret_360, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_360; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopyq_lane_u8(__p0_361, __p1_361, __p2_361, __p3_361) __extension__ ({ \ uint8x16_t __ret_361; \ uint8x16_t __s0_361 = __p0_361; \ uint8x8_t __s2_361 = __p2_361; \ __ret_361 = vsetq_lane_u8(vget_lane_u8(__s2_361, __p3_361), __s0_361, __p1_361); \ __ret_361; \ }) #else #define vcopyq_lane_u8(__p0_362, __p1_362, __p2_362, __p3_362) __extension__ ({ \ uint8x16_t __ret_362; \ uint8x16_t __s0_362 = __p0_362; \ uint8x8_t __s2_362 = __p2_362; \ uint8x16_t __rev0_362; __rev0_362 = __builtin_shufflevector(__s0_362, __s0_362, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ uint8x8_t __rev2_362; __rev2_362 = __builtin_shufflevector(__s2_362, __s2_362, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_362 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_362, __p3_362), __rev0_362, __p1_362); \ __ret_362 = __builtin_shufflevector(__ret_362, __ret_362, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_362; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopyq_lane_u32(__p0_363, __p1_363, __p2_363, __p3_363) __extension__ ({ \ uint32x4_t __ret_363; \ uint32x4_t __s0_363 = __p0_363; \ uint32x2_t __s2_363 = __p2_363; \ __ret_363 = vsetq_lane_u32(vget_lane_u32(__s2_363, __p3_363), __s0_363, __p1_363); \ __ret_363; \ }) #else #define vcopyq_lane_u32(__p0_364, __p1_364, __p2_364, __p3_364) __extension__ ({ \ uint32x4_t __ret_364; \ uint32x4_t __s0_364 = __p0_364; \ uint32x2_t __s2_364 = __p2_364; \ uint32x4_t __rev0_364; __rev0_364 = __builtin_shufflevector(__s0_364, __s0_364, 3, 2, 1, 0); \ uint32x2_t __rev2_364; __rev2_364 = __builtin_shufflevector(__s2_364, __s2_364, 1, 0); \ __ret_364 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_364, __p3_364), __rev0_364, __p1_364); \ __ret_364 = __builtin_shufflevector(__ret_364, __ret_364, 3, 2, 1, 0); \ __ret_364; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopyq_lane_u64(__p0_365, __p1_365, __p2_365, __p3_365) __extension__ ({ \ uint64x2_t __ret_365; \ uint64x2_t __s0_365 = __p0_365; \ uint64x1_t __s2_365 = __p2_365; \ __ret_365 = vsetq_lane_u64(vget_lane_u64(__s2_365, __p3_365), __s0_365, __p1_365); \ __ret_365; \ }) #else #define vcopyq_lane_u64(__p0_366, __p1_366, __p2_366, __p3_366) __extension__ ({ \ uint64x2_t __ret_366; \ uint64x2_t __s0_366 = __p0_366; \ uint64x1_t __s2_366 = __p2_366; \ uint64x2_t __rev0_366; __rev0_366 = __builtin_shufflevector(__s0_366, __s0_366, 1, 0); \ __ret_366 = __noswap_vsetq_lane_u64(vget_lane_u64(__s2_366, __p3_366), __rev0_366, __p1_366); \ __ret_366 = __builtin_shufflevector(__ret_366, __ret_366, 1, 0); \ __ret_366; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopyq_lane_u16(__p0_367, __p1_367, __p2_367, __p3_367) __extension__ ({ \ uint16x8_t __ret_367; \ uint16x8_t __s0_367 = __p0_367; \ uint16x4_t __s2_367 = __p2_367; \ __ret_367 = vsetq_lane_u16(vget_lane_u16(__s2_367, __p3_367), __s0_367, __p1_367); \ __ret_367; \ }) #else #define vcopyq_lane_u16(__p0_368, __p1_368, __p2_368, __p3_368) __extension__ ({ \ uint16x8_t __ret_368; \ uint16x8_t __s0_368 = __p0_368; \ uint16x4_t __s2_368 = __p2_368; \ uint16x8_t __rev0_368; __rev0_368 = __builtin_shufflevector(__s0_368, __s0_368, 7, 6, 5, 4, 3, 2, 1, 0); \ uint16x4_t __rev2_368; __rev2_368 = __builtin_shufflevector(__s2_368, __s2_368, 3, 2, 1, 0); \ __ret_368 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_368, __p3_368), __rev0_368, __p1_368); \ __ret_368 = __builtin_shufflevector(__ret_368, __ret_368, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_368; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopyq_lane_s8(__p0_369, __p1_369, __p2_369, __p3_369) __extension__ ({ \ int8x16_t __ret_369; \ int8x16_t __s0_369 = __p0_369; \ int8x8_t __s2_369 = __p2_369; \ __ret_369 = vsetq_lane_s8(vget_lane_s8(__s2_369, __p3_369), __s0_369, __p1_369); \ __ret_369; \ }) #else #define vcopyq_lane_s8(__p0_370, __p1_370, __p2_370, __p3_370) __extension__ ({ \ int8x16_t __ret_370; \ int8x16_t __s0_370 = __p0_370; \ int8x8_t __s2_370 = __p2_370; \ int8x16_t __rev0_370; __rev0_370 = __builtin_shufflevector(__s0_370, __s0_370, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x8_t __rev2_370; __rev2_370 = __builtin_shufflevector(__s2_370, __s2_370, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_370 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_370, __p3_370), __rev0_370, __p1_370); \ __ret_370 = __builtin_shufflevector(__ret_370, __ret_370, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_370; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopyq_lane_f32(__p0_371, __p1_371, __p2_371, __p3_371) __extension__ ({ \ float32x4_t __ret_371; \ float32x4_t __s0_371 = __p0_371; \ float32x2_t __s2_371 = __p2_371; \ __ret_371 = vsetq_lane_f32(vget_lane_f32(__s2_371, __p3_371), __s0_371, __p1_371); \ __ret_371; \ }) #else #define vcopyq_lane_f32(__p0_372, __p1_372, __p2_372, __p3_372) __extension__ ({ \ float32x4_t __ret_372; \ float32x4_t __s0_372 = __p0_372; \ float32x2_t __s2_372 = __p2_372; \ float32x4_t __rev0_372; __rev0_372 = __builtin_shufflevector(__s0_372, __s0_372, 3, 2, 1, 0); \ float32x2_t __rev2_372; __rev2_372 = __builtin_shufflevector(__s2_372, __s2_372, 1, 0); \ __ret_372 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_372, __p3_372), __rev0_372, __p1_372); \ __ret_372 = __builtin_shufflevector(__ret_372, __ret_372, 3, 2, 1, 0); \ __ret_372; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopyq_lane_s32(__p0_373, __p1_373, __p2_373, __p3_373) __extension__ ({ \ int32x4_t __ret_373; \ int32x4_t __s0_373 = __p0_373; \ int32x2_t __s2_373 = __p2_373; \ __ret_373 = vsetq_lane_s32(vget_lane_s32(__s2_373, __p3_373), __s0_373, __p1_373); \ __ret_373; \ }) #else #define vcopyq_lane_s32(__p0_374, __p1_374, __p2_374, __p3_374) __extension__ ({ \ int32x4_t __ret_374; \ int32x4_t __s0_374 = __p0_374; \ int32x2_t __s2_374 = __p2_374; \ int32x4_t __rev0_374; __rev0_374 = __builtin_shufflevector(__s0_374, __s0_374, 3, 2, 1, 0); \ int32x2_t __rev2_374; __rev2_374 = __builtin_shufflevector(__s2_374, __s2_374, 1, 0); \ __ret_374 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_374, __p3_374), __rev0_374, __p1_374); \ __ret_374 = __builtin_shufflevector(__ret_374, __ret_374, 3, 2, 1, 0); \ __ret_374; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopyq_lane_s64(__p0_375, __p1_375, __p2_375, __p3_375) __extension__ ({ \ int64x2_t __ret_375; \ int64x2_t __s0_375 = __p0_375; \ int64x1_t __s2_375 = __p2_375; \ __ret_375 = vsetq_lane_s64(vget_lane_s64(__s2_375, __p3_375), __s0_375, __p1_375); \ __ret_375; \ }) #else #define vcopyq_lane_s64(__p0_376, __p1_376, __p2_376, __p3_376) __extension__ ({ \ int64x2_t __ret_376; \ int64x2_t __s0_376 = __p0_376; \ int64x1_t __s2_376 = __p2_376; \ int64x2_t __rev0_376; __rev0_376 = __builtin_shufflevector(__s0_376, __s0_376, 1, 0); \ __ret_376 = __noswap_vsetq_lane_s64(vget_lane_s64(__s2_376, __p3_376), __rev0_376, __p1_376); \ __ret_376 = __builtin_shufflevector(__ret_376, __ret_376, 1, 0); \ __ret_376; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopyq_lane_s16(__p0_377, __p1_377, __p2_377, __p3_377) __extension__ ({ \ int16x8_t __ret_377; \ int16x8_t __s0_377 = __p0_377; \ int16x4_t __s2_377 = __p2_377; \ __ret_377 = vsetq_lane_s16(vget_lane_s16(__s2_377, __p3_377), __s0_377, __p1_377); \ __ret_377; \ }) #else #define vcopyq_lane_s16(__p0_378, __p1_378, __p2_378, __p3_378) __extension__ ({ \ int16x8_t __ret_378; \ int16x8_t __s0_378 = __p0_378; \ int16x4_t __s2_378 = __p2_378; \ int16x8_t __rev0_378; __rev0_378 = __builtin_shufflevector(__s0_378, __s0_378, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x4_t __rev2_378; __rev2_378 = __builtin_shufflevector(__s2_378, __s2_378, 3, 2, 1, 0); \ __ret_378 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_378, __p3_378), __rev0_378, __p1_378); \ __ret_378 = __builtin_shufflevector(__ret_378, __ret_378, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_378; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopy_lane_p8(__p0_379, __p1_379, __p2_379, __p3_379) __extension__ ({ \ poly8x8_t __ret_379; \ poly8x8_t __s0_379 = __p0_379; \ poly8x8_t __s2_379 = __p2_379; \ __ret_379 = vset_lane_p8(vget_lane_p8(__s2_379, __p3_379), __s0_379, __p1_379); \ __ret_379; \ }) #else #define vcopy_lane_p8(__p0_380, __p1_380, __p2_380, __p3_380) __extension__ ({ \ poly8x8_t __ret_380; \ poly8x8_t __s0_380 = __p0_380; \ poly8x8_t __s2_380 = __p2_380; \ poly8x8_t __rev0_380; __rev0_380 = __builtin_shufflevector(__s0_380, __s0_380, 7, 6, 5, 4, 3, 2, 1, 0); \ poly8x8_t __rev2_380; __rev2_380 = __builtin_shufflevector(__s2_380, __s2_380, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_380 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_380, __p3_380), __rev0_380, __p1_380); \ __ret_380 = __builtin_shufflevector(__ret_380, __ret_380, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_380; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopy_lane_p16(__p0_381, __p1_381, __p2_381, __p3_381) __extension__ ({ \ poly16x4_t __ret_381; \ poly16x4_t __s0_381 = __p0_381; \ poly16x4_t __s2_381 = __p2_381; \ __ret_381 = vset_lane_p16(vget_lane_p16(__s2_381, __p3_381), __s0_381, __p1_381); \ __ret_381; \ }) #else #define vcopy_lane_p16(__p0_382, __p1_382, __p2_382, __p3_382) __extension__ ({ \ poly16x4_t __ret_382; \ poly16x4_t __s0_382 = __p0_382; \ poly16x4_t __s2_382 = __p2_382; \ poly16x4_t __rev0_382; __rev0_382 = __builtin_shufflevector(__s0_382, __s0_382, 3, 2, 1, 0); \ poly16x4_t __rev2_382; __rev2_382 = __builtin_shufflevector(__s2_382, __s2_382, 3, 2, 1, 0); \ __ret_382 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_382, __p3_382), __rev0_382, __p1_382); \ __ret_382 = __builtin_shufflevector(__ret_382, __ret_382, 3, 2, 1, 0); \ __ret_382; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopy_lane_u8(__p0_383, __p1_383, __p2_383, __p3_383) __extension__ ({ \ uint8x8_t __ret_383; \ uint8x8_t __s0_383 = __p0_383; \ uint8x8_t __s2_383 = __p2_383; \ __ret_383 = vset_lane_u8(vget_lane_u8(__s2_383, __p3_383), __s0_383, __p1_383); \ __ret_383; \ }) #else #define vcopy_lane_u8(__p0_384, __p1_384, __p2_384, __p3_384) __extension__ ({ \ uint8x8_t __ret_384; \ uint8x8_t __s0_384 = __p0_384; \ uint8x8_t __s2_384 = __p2_384; \ uint8x8_t __rev0_384; __rev0_384 = __builtin_shufflevector(__s0_384, __s0_384, 7, 6, 5, 4, 3, 2, 1, 0); \ uint8x8_t __rev2_384; __rev2_384 = __builtin_shufflevector(__s2_384, __s2_384, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_384 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_384, __p3_384), __rev0_384, __p1_384); \ __ret_384 = __builtin_shufflevector(__ret_384, __ret_384, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_384; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopy_lane_u32(__p0_385, __p1_385, __p2_385, __p3_385) __extension__ ({ \ uint32x2_t __ret_385; \ uint32x2_t __s0_385 = __p0_385; \ uint32x2_t __s2_385 = __p2_385; \ __ret_385 = vset_lane_u32(vget_lane_u32(__s2_385, __p3_385), __s0_385, __p1_385); \ __ret_385; \ }) #else #define vcopy_lane_u32(__p0_386, __p1_386, __p2_386, __p3_386) __extension__ ({ \ uint32x2_t __ret_386; \ uint32x2_t __s0_386 = __p0_386; \ uint32x2_t __s2_386 = __p2_386; \ uint32x2_t __rev0_386; __rev0_386 = __builtin_shufflevector(__s0_386, __s0_386, 1, 0); \ uint32x2_t __rev2_386; __rev2_386 = __builtin_shufflevector(__s2_386, __s2_386, 1, 0); \ __ret_386 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_386, __p3_386), __rev0_386, __p1_386); \ __ret_386 = __builtin_shufflevector(__ret_386, __ret_386, 1, 0); \ __ret_386; \ }) #endif #define vcopy_lane_u64(__p0_387, __p1_387, __p2_387, __p3_387) __extension__ ({ \ uint64x1_t __ret_387; \ uint64x1_t __s0_387 = __p0_387; \ uint64x1_t __s2_387 = __p2_387; \ __ret_387 = vset_lane_u64(vget_lane_u64(__s2_387, __p3_387), __s0_387, __p1_387); \ __ret_387; \ }) #ifdef __LITTLE_ENDIAN__ #define vcopy_lane_u16(__p0_388, __p1_388, __p2_388, __p3_388) __extension__ ({ \ uint16x4_t __ret_388; \ uint16x4_t __s0_388 = __p0_388; \ uint16x4_t __s2_388 = __p2_388; \ __ret_388 = vset_lane_u16(vget_lane_u16(__s2_388, __p3_388), __s0_388, __p1_388); \ __ret_388; \ }) #else #define vcopy_lane_u16(__p0_389, __p1_389, __p2_389, __p3_389) __extension__ ({ \ uint16x4_t __ret_389; \ uint16x4_t __s0_389 = __p0_389; \ uint16x4_t __s2_389 = __p2_389; \ uint16x4_t __rev0_389; __rev0_389 = __builtin_shufflevector(__s0_389, __s0_389, 3, 2, 1, 0); \ uint16x4_t __rev2_389; __rev2_389 = __builtin_shufflevector(__s2_389, __s2_389, 3, 2, 1, 0); \ __ret_389 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_389, __p3_389), __rev0_389, __p1_389); \ __ret_389 = __builtin_shufflevector(__ret_389, __ret_389, 3, 2, 1, 0); \ __ret_389; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopy_lane_s8(__p0_390, __p1_390, __p2_390, __p3_390) __extension__ ({ \ int8x8_t __ret_390; \ int8x8_t __s0_390 = __p0_390; \ int8x8_t __s2_390 = __p2_390; \ __ret_390 = vset_lane_s8(vget_lane_s8(__s2_390, __p3_390), __s0_390, __p1_390); \ __ret_390; \ }) #else #define vcopy_lane_s8(__p0_391, __p1_391, __p2_391, __p3_391) __extension__ ({ \ int8x8_t __ret_391; \ int8x8_t __s0_391 = __p0_391; \ int8x8_t __s2_391 = __p2_391; \ int8x8_t __rev0_391; __rev0_391 = __builtin_shufflevector(__s0_391, __s0_391, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x8_t __rev2_391; __rev2_391 = __builtin_shufflevector(__s2_391, __s2_391, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_391 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_391, __p3_391), __rev0_391, __p1_391); \ __ret_391 = __builtin_shufflevector(__ret_391, __ret_391, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_391; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopy_lane_f32(__p0_392, __p1_392, __p2_392, __p3_392) __extension__ ({ \ float32x2_t __ret_392; \ float32x2_t __s0_392 = __p0_392; \ float32x2_t __s2_392 = __p2_392; \ __ret_392 = vset_lane_f32(vget_lane_f32(__s2_392, __p3_392), __s0_392, __p1_392); \ __ret_392; \ }) #else #define vcopy_lane_f32(__p0_393, __p1_393, __p2_393, __p3_393) __extension__ ({ \ float32x2_t __ret_393; \ float32x2_t __s0_393 = __p0_393; \ float32x2_t __s2_393 = __p2_393; \ float32x2_t __rev0_393; __rev0_393 = __builtin_shufflevector(__s0_393, __s0_393, 1, 0); \ float32x2_t __rev2_393; __rev2_393 = __builtin_shufflevector(__s2_393, __s2_393, 1, 0); \ __ret_393 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_393, __p3_393), __rev0_393, __p1_393); \ __ret_393 = __builtin_shufflevector(__ret_393, __ret_393, 1, 0); \ __ret_393; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopy_lane_s32(__p0_394, __p1_394, __p2_394, __p3_394) __extension__ ({ \ int32x2_t __ret_394; \ int32x2_t __s0_394 = __p0_394; \ int32x2_t __s2_394 = __p2_394; \ __ret_394 = vset_lane_s32(vget_lane_s32(__s2_394, __p3_394), __s0_394, __p1_394); \ __ret_394; \ }) #else #define vcopy_lane_s32(__p0_395, __p1_395, __p2_395, __p3_395) __extension__ ({ \ int32x2_t __ret_395; \ int32x2_t __s0_395 = __p0_395; \ int32x2_t __s2_395 = __p2_395; \ int32x2_t __rev0_395; __rev0_395 = __builtin_shufflevector(__s0_395, __s0_395, 1, 0); \ int32x2_t __rev2_395; __rev2_395 = __builtin_shufflevector(__s2_395, __s2_395, 1, 0); \ __ret_395 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_395, __p3_395), __rev0_395, __p1_395); \ __ret_395 = __builtin_shufflevector(__ret_395, __ret_395, 1, 0); \ __ret_395; \ }) #endif #define vcopy_lane_s64(__p0_396, __p1_396, __p2_396, __p3_396) __extension__ ({ \ int64x1_t __ret_396; \ int64x1_t __s0_396 = __p0_396; \ int64x1_t __s2_396 = __p2_396; \ __ret_396 = vset_lane_s64(vget_lane_s64(__s2_396, __p3_396), __s0_396, __p1_396); \ __ret_396; \ }) #ifdef __LITTLE_ENDIAN__ #define vcopy_lane_s16(__p0_397, __p1_397, __p2_397, __p3_397) __extension__ ({ \ int16x4_t __ret_397; \ int16x4_t __s0_397 = __p0_397; \ int16x4_t __s2_397 = __p2_397; \ __ret_397 = vset_lane_s16(vget_lane_s16(__s2_397, __p3_397), __s0_397, __p1_397); \ __ret_397; \ }) #else #define vcopy_lane_s16(__p0_398, __p1_398, __p2_398, __p3_398) __extension__ ({ \ int16x4_t __ret_398; \ int16x4_t __s0_398 = __p0_398; \ int16x4_t __s2_398 = __p2_398; \ int16x4_t __rev0_398; __rev0_398 = __builtin_shufflevector(__s0_398, __s0_398, 3, 2, 1, 0); \ int16x4_t __rev2_398; __rev2_398 = __builtin_shufflevector(__s2_398, __s2_398, 3, 2, 1, 0); \ __ret_398 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_398, __p3_398), __rev0_398, __p1_398); \ __ret_398 = __builtin_shufflevector(__ret_398, __ret_398, 3, 2, 1, 0); \ __ret_398; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopyq_laneq_p8(__p0_399, __p1_399, __p2_399, __p3_399) __extension__ ({ \ poly8x16_t __ret_399; \ poly8x16_t __s0_399 = __p0_399; \ poly8x16_t __s2_399 = __p2_399; \ __ret_399 = vsetq_lane_p8(vgetq_lane_p8(__s2_399, __p3_399), __s0_399, __p1_399); \ __ret_399; \ }) #else #define vcopyq_laneq_p8(__p0_400, __p1_400, __p2_400, __p3_400) __extension__ ({ \ poly8x16_t __ret_400; \ poly8x16_t __s0_400 = __p0_400; \ poly8x16_t __s2_400 = __p2_400; \ poly8x16_t __rev0_400; __rev0_400 = __builtin_shufflevector(__s0_400, __s0_400, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ poly8x16_t __rev2_400; __rev2_400 = __builtin_shufflevector(__s2_400, __s2_400, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_400 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_400, __p3_400), __rev0_400, __p1_400); \ __ret_400 = __builtin_shufflevector(__ret_400, __ret_400, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_400; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopyq_laneq_p16(__p0_401, __p1_401, __p2_401, __p3_401) __extension__ ({ \ poly16x8_t __ret_401; \ poly16x8_t __s0_401 = __p0_401; \ poly16x8_t __s2_401 = __p2_401; \ __ret_401 = vsetq_lane_p16(vgetq_lane_p16(__s2_401, __p3_401), __s0_401, __p1_401); \ __ret_401; \ }) #else #define vcopyq_laneq_p16(__p0_402, __p1_402, __p2_402, __p3_402) __extension__ ({ \ poly16x8_t __ret_402; \ poly16x8_t __s0_402 = __p0_402; \ poly16x8_t __s2_402 = __p2_402; \ poly16x8_t __rev0_402; __rev0_402 = __builtin_shufflevector(__s0_402, __s0_402, 7, 6, 5, 4, 3, 2, 1, 0); \ poly16x8_t __rev2_402; __rev2_402 = __builtin_shufflevector(__s2_402, __s2_402, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_402 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_402, __p3_402), __rev0_402, __p1_402); \ __ret_402 = __builtin_shufflevector(__ret_402, __ret_402, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_402; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopyq_laneq_u8(__p0_403, __p1_403, __p2_403, __p3_403) __extension__ ({ \ uint8x16_t __ret_403; \ uint8x16_t __s0_403 = __p0_403; \ uint8x16_t __s2_403 = __p2_403; \ __ret_403 = vsetq_lane_u8(vgetq_lane_u8(__s2_403, __p3_403), __s0_403, __p1_403); \ __ret_403; \ }) #else #define vcopyq_laneq_u8(__p0_404, __p1_404, __p2_404, __p3_404) __extension__ ({ \ uint8x16_t __ret_404; \ uint8x16_t __s0_404 = __p0_404; \ uint8x16_t __s2_404 = __p2_404; \ uint8x16_t __rev0_404; __rev0_404 = __builtin_shufflevector(__s0_404, __s0_404, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ uint8x16_t __rev2_404; __rev2_404 = __builtin_shufflevector(__s2_404, __s2_404, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_404 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_404, __p3_404), __rev0_404, __p1_404); \ __ret_404 = __builtin_shufflevector(__ret_404, __ret_404, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_404; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopyq_laneq_u32(__p0_405, __p1_405, __p2_405, __p3_405) __extension__ ({ \ uint32x4_t __ret_405; \ uint32x4_t __s0_405 = __p0_405; \ uint32x4_t __s2_405 = __p2_405; \ __ret_405 = vsetq_lane_u32(vgetq_lane_u32(__s2_405, __p3_405), __s0_405, __p1_405); \ __ret_405; \ }) #else #define vcopyq_laneq_u32(__p0_406, __p1_406, __p2_406, __p3_406) __extension__ ({ \ uint32x4_t __ret_406; \ uint32x4_t __s0_406 = __p0_406; \ uint32x4_t __s2_406 = __p2_406; \ uint32x4_t __rev0_406; __rev0_406 = __builtin_shufflevector(__s0_406, __s0_406, 3, 2, 1, 0); \ uint32x4_t __rev2_406; __rev2_406 = __builtin_shufflevector(__s2_406, __s2_406, 3, 2, 1, 0); \ __ret_406 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_406, __p3_406), __rev0_406, __p1_406); \ __ret_406 = __builtin_shufflevector(__ret_406, __ret_406, 3, 2, 1, 0); \ __ret_406; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopyq_laneq_u64(__p0_407, __p1_407, __p2_407, __p3_407) __extension__ ({ \ uint64x2_t __ret_407; \ uint64x2_t __s0_407 = __p0_407; \ uint64x2_t __s2_407 = __p2_407; \ __ret_407 = vsetq_lane_u64(vgetq_lane_u64(__s2_407, __p3_407), __s0_407, __p1_407); \ __ret_407; \ }) #else #define vcopyq_laneq_u64(__p0_408, __p1_408, __p2_408, __p3_408) __extension__ ({ \ uint64x2_t __ret_408; \ uint64x2_t __s0_408 = __p0_408; \ uint64x2_t __s2_408 = __p2_408; \ uint64x2_t __rev0_408; __rev0_408 = __builtin_shufflevector(__s0_408, __s0_408, 1, 0); \ uint64x2_t __rev2_408; __rev2_408 = __builtin_shufflevector(__s2_408, __s2_408, 1, 0); \ __ret_408 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_408, __p3_408), __rev0_408, __p1_408); \ __ret_408 = __builtin_shufflevector(__ret_408, __ret_408, 1, 0); \ __ret_408; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopyq_laneq_u16(__p0_409, __p1_409, __p2_409, __p3_409) __extension__ ({ \ uint16x8_t __ret_409; \ uint16x8_t __s0_409 = __p0_409; \ uint16x8_t __s2_409 = __p2_409; \ __ret_409 = vsetq_lane_u16(vgetq_lane_u16(__s2_409, __p3_409), __s0_409, __p1_409); \ __ret_409; \ }) #else #define vcopyq_laneq_u16(__p0_410, __p1_410, __p2_410, __p3_410) __extension__ ({ \ uint16x8_t __ret_410; \ uint16x8_t __s0_410 = __p0_410; \ uint16x8_t __s2_410 = __p2_410; \ uint16x8_t __rev0_410; __rev0_410 = __builtin_shufflevector(__s0_410, __s0_410, 7, 6, 5, 4, 3, 2, 1, 0); \ uint16x8_t __rev2_410; __rev2_410 = __builtin_shufflevector(__s2_410, __s2_410, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_410 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_410, __p3_410), __rev0_410, __p1_410); \ __ret_410 = __builtin_shufflevector(__ret_410, __ret_410, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_410; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopyq_laneq_s8(__p0_411, __p1_411, __p2_411, __p3_411) __extension__ ({ \ int8x16_t __ret_411; \ int8x16_t __s0_411 = __p0_411; \ int8x16_t __s2_411 = __p2_411; \ __ret_411 = vsetq_lane_s8(vgetq_lane_s8(__s2_411, __p3_411), __s0_411, __p1_411); \ __ret_411; \ }) #else #define vcopyq_laneq_s8(__p0_412, __p1_412, __p2_412, __p3_412) __extension__ ({ \ int8x16_t __ret_412; \ int8x16_t __s0_412 = __p0_412; \ int8x16_t __s2_412 = __p2_412; \ int8x16_t __rev0_412; __rev0_412 = __builtin_shufflevector(__s0_412, __s0_412, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x16_t __rev2_412; __rev2_412 = __builtin_shufflevector(__s2_412, __s2_412, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_412 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_412, __p3_412), __rev0_412, __p1_412); \ __ret_412 = __builtin_shufflevector(__ret_412, __ret_412, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_412; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopyq_laneq_f32(__p0_413, __p1_413, __p2_413, __p3_413) __extension__ ({ \ float32x4_t __ret_413; \ float32x4_t __s0_413 = __p0_413; \ float32x4_t __s2_413 = __p2_413; \ __ret_413 = vsetq_lane_f32(vgetq_lane_f32(__s2_413, __p3_413), __s0_413, __p1_413); \ __ret_413; \ }) #else #define vcopyq_laneq_f32(__p0_414, __p1_414, __p2_414, __p3_414) __extension__ ({ \ float32x4_t __ret_414; \ float32x4_t __s0_414 = __p0_414; \ float32x4_t __s2_414 = __p2_414; \ float32x4_t __rev0_414; __rev0_414 = __builtin_shufflevector(__s0_414, __s0_414, 3, 2, 1, 0); \ float32x4_t __rev2_414; __rev2_414 = __builtin_shufflevector(__s2_414, __s2_414, 3, 2, 1, 0); \ __ret_414 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_414, __p3_414), __rev0_414, __p1_414); \ __ret_414 = __builtin_shufflevector(__ret_414, __ret_414, 3, 2, 1, 0); \ __ret_414; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopyq_laneq_s32(__p0_415, __p1_415, __p2_415, __p3_415) __extension__ ({ \ int32x4_t __ret_415; \ int32x4_t __s0_415 = __p0_415; \ int32x4_t __s2_415 = __p2_415; \ __ret_415 = vsetq_lane_s32(vgetq_lane_s32(__s2_415, __p3_415), __s0_415, __p1_415); \ __ret_415; \ }) #else #define vcopyq_laneq_s32(__p0_416, __p1_416, __p2_416, __p3_416) __extension__ ({ \ int32x4_t __ret_416; \ int32x4_t __s0_416 = __p0_416; \ int32x4_t __s2_416 = __p2_416; \ int32x4_t __rev0_416; __rev0_416 = __builtin_shufflevector(__s0_416, __s0_416, 3, 2, 1, 0); \ int32x4_t __rev2_416; __rev2_416 = __builtin_shufflevector(__s2_416, __s2_416, 3, 2, 1, 0); \ __ret_416 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_416, __p3_416), __rev0_416, __p1_416); \ __ret_416 = __builtin_shufflevector(__ret_416, __ret_416, 3, 2, 1, 0); \ __ret_416; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopyq_laneq_s64(__p0_417, __p1_417, __p2_417, __p3_417) __extension__ ({ \ int64x2_t __ret_417; \ int64x2_t __s0_417 = __p0_417; \ int64x2_t __s2_417 = __p2_417; \ __ret_417 = vsetq_lane_s64(vgetq_lane_s64(__s2_417, __p3_417), __s0_417, __p1_417); \ __ret_417; \ }) #else #define vcopyq_laneq_s64(__p0_418, __p1_418, __p2_418, __p3_418) __extension__ ({ \ int64x2_t __ret_418; \ int64x2_t __s0_418 = __p0_418; \ int64x2_t __s2_418 = __p2_418; \ int64x2_t __rev0_418; __rev0_418 = __builtin_shufflevector(__s0_418, __s0_418, 1, 0); \ int64x2_t __rev2_418; __rev2_418 = __builtin_shufflevector(__s2_418, __s2_418, 1, 0); \ __ret_418 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_418, __p3_418), __rev0_418, __p1_418); \ __ret_418 = __builtin_shufflevector(__ret_418, __ret_418, 1, 0); \ __ret_418; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopyq_laneq_s16(__p0_419, __p1_419, __p2_419, __p3_419) __extension__ ({ \ int16x8_t __ret_419; \ int16x8_t __s0_419 = __p0_419; \ int16x8_t __s2_419 = __p2_419; \ __ret_419 = vsetq_lane_s16(vgetq_lane_s16(__s2_419, __p3_419), __s0_419, __p1_419); \ __ret_419; \ }) #else #define vcopyq_laneq_s16(__p0_420, __p1_420, __p2_420, __p3_420) __extension__ ({ \ int16x8_t __ret_420; \ int16x8_t __s0_420 = __p0_420; \ int16x8_t __s2_420 = __p2_420; \ int16x8_t __rev0_420; __rev0_420 = __builtin_shufflevector(__s0_420, __s0_420, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev2_420; __rev2_420 = __builtin_shufflevector(__s2_420, __s2_420, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_420 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_420, __p3_420), __rev0_420, __p1_420); \ __ret_420 = __builtin_shufflevector(__ret_420, __ret_420, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_420; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopy_laneq_p8(__p0_421, __p1_421, __p2_421, __p3_421) __extension__ ({ \ poly8x8_t __ret_421; \ poly8x8_t __s0_421 = __p0_421; \ poly8x16_t __s2_421 = __p2_421; \ __ret_421 = vset_lane_p8(vgetq_lane_p8(__s2_421, __p3_421), __s0_421, __p1_421); \ __ret_421; \ }) #else #define vcopy_laneq_p8(__p0_422, __p1_422, __p2_422, __p3_422) __extension__ ({ \ poly8x8_t __ret_422; \ poly8x8_t __s0_422 = __p0_422; \ poly8x16_t __s2_422 = __p2_422; \ poly8x8_t __rev0_422; __rev0_422 = __builtin_shufflevector(__s0_422, __s0_422, 7, 6, 5, 4, 3, 2, 1, 0); \ poly8x16_t __rev2_422; __rev2_422 = __builtin_shufflevector(__s2_422, __s2_422, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_422 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_422, __p3_422), __rev0_422, __p1_422); \ __ret_422 = __builtin_shufflevector(__ret_422, __ret_422, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_422; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopy_laneq_p16(__p0_423, __p1_423, __p2_423, __p3_423) __extension__ ({ \ poly16x4_t __ret_423; \ poly16x4_t __s0_423 = __p0_423; \ poly16x8_t __s2_423 = __p2_423; \ __ret_423 = vset_lane_p16(vgetq_lane_p16(__s2_423, __p3_423), __s0_423, __p1_423); \ __ret_423; \ }) #else #define vcopy_laneq_p16(__p0_424, __p1_424, __p2_424, __p3_424) __extension__ ({ \ poly16x4_t __ret_424; \ poly16x4_t __s0_424 = __p0_424; \ poly16x8_t __s2_424 = __p2_424; \ poly16x4_t __rev0_424; __rev0_424 = __builtin_shufflevector(__s0_424, __s0_424, 3, 2, 1, 0); \ poly16x8_t __rev2_424; __rev2_424 = __builtin_shufflevector(__s2_424, __s2_424, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_424 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_424, __p3_424), __rev0_424, __p1_424); \ __ret_424 = __builtin_shufflevector(__ret_424, __ret_424, 3, 2, 1, 0); \ __ret_424; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopy_laneq_u8(__p0_425, __p1_425, __p2_425, __p3_425) __extension__ ({ \ uint8x8_t __ret_425; \ uint8x8_t __s0_425 = __p0_425; \ uint8x16_t __s2_425 = __p2_425; \ __ret_425 = vset_lane_u8(vgetq_lane_u8(__s2_425, __p3_425), __s0_425, __p1_425); \ __ret_425; \ }) #else #define vcopy_laneq_u8(__p0_426, __p1_426, __p2_426, __p3_426) __extension__ ({ \ uint8x8_t __ret_426; \ uint8x8_t __s0_426 = __p0_426; \ uint8x16_t __s2_426 = __p2_426; \ uint8x8_t __rev0_426; __rev0_426 = __builtin_shufflevector(__s0_426, __s0_426, 7, 6, 5, 4, 3, 2, 1, 0); \ uint8x16_t __rev2_426; __rev2_426 = __builtin_shufflevector(__s2_426, __s2_426, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_426 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_426, __p3_426), __rev0_426, __p1_426); \ __ret_426 = __builtin_shufflevector(__ret_426, __ret_426, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_426; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopy_laneq_u32(__p0_427, __p1_427, __p2_427, __p3_427) __extension__ ({ \ uint32x2_t __ret_427; \ uint32x2_t __s0_427 = __p0_427; \ uint32x4_t __s2_427 = __p2_427; \ __ret_427 = vset_lane_u32(vgetq_lane_u32(__s2_427, __p3_427), __s0_427, __p1_427); \ __ret_427; \ }) #else #define vcopy_laneq_u32(__p0_428, __p1_428, __p2_428, __p3_428) __extension__ ({ \ uint32x2_t __ret_428; \ uint32x2_t __s0_428 = __p0_428; \ uint32x4_t __s2_428 = __p2_428; \ uint32x2_t __rev0_428; __rev0_428 = __builtin_shufflevector(__s0_428, __s0_428, 1, 0); \ uint32x4_t __rev2_428; __rev2_428 = __builtin_shufflevector(__s2_428, __s2_428, 3, 2, 1, 0); \ __ret_428 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_428, __p3_428), __rev0_428, __p1_428); \ __ret_428 = __builtin_shufflevector(__ret_428, __ret_428, 1, 0); \ __ret_428; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopy_laneq_u64(__p0_429, __p1_429, __p2_429, __p3_429) __extension__ ({ \ uint64x1_t __ret_429; \ uint64x1_t __s0_429 = __p0_429; \ uint64x2_t __s2_429 = __p2_429; \ __ret_429 = vset_lane_u64(vgetq_lane_u64(__s2_429, __p3_429), __s0_429, __p1_429); \ __ret_429; \ }) #else #define vcopy_laneq_u64(__p0_430, __p1_430, __p2_430, __p3_430) __extension__ ({ \ uint64x1_t __ret_430; \ uint64x1_t __s0_430 = __p0_430; \ uint64x2_t __s2_430 = __p2_430; \ uint64x2_t __rev2_430; __rev2_430 = __builtin_shufflevector(__s2_430, __s2_430, 1, 0); \ __ret_430 = vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_430, __p3_430), __s0_430, __p1_430); \ __ret_430; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopy_laneq_u16(__p0_431, __p1_431, __p2_431, __p3_431) __extension__ ({ \ uint16x4_t __ret_431; \ uint16x4_t __s0_431 = __p0_431; \ uint16x8_t __s2_431 = __p2_431; \ __ret_431 = vset_lane_u16(vgetq_lane_u16(__s2_431, __p3_431), __s0_431, __p1_431); \ __ret_431; \ }) #else #define vcopy_laneq_u16(__p0_432, __p1_432, __p2_432, __p3_432) __extension__ ({ \ uint16x4_t __ret_432; \ uint16x4_t __s0_432 = __p0_432; \ uint16x8_t __s2_432 = __p2_432; \ uint16x4_t __rev0_432; __rev0_432 = __builtin_shufflevector(__s0_432, __s0_432, 3, 2, 1, 0); \ uint16x8_t __rev2_432; __rev2_432 = __builtin_shufflevector(__s2_432, __s2_432, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_432 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_432, __p3_432), __rev0_432, __p1_432); \ __ret_432 = __builtin_shufflevector(__ret_432, __ret_432, 3, 2, 1, 0); \ __ret_432; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopy_laneq_s8(__p0_433, __p1_433, __p2_433, __p3_433) __extension__ ({ \ int8x8_t __ret_433; \ int8x8_t __s0_433 = __p0_433; \ int8x16_t __s2_433 = __p2_433; \ __ret_433 = vset_lane_s8(vgetq_lane_s8(__s2_433, __p3_433), __s0_433, __p1_433); \ __ret_433; \ }) #else #define vcopy_laneq_s8(__p0_434, __p1_434, __p2_434, __p3_434) __extension__ ({ \ int8x8_t __ret_434; \ int8x8_t __s0_434 = __p0_434; \ int8x16_t __s2_434 = __p2_434; \ int8x8_t __rev0_434; __rev0_434 = __builtin_shufflevector(__s0_434, __s0_434, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x16_t __rev2_434; __rev2_434 = __builtin_shufflevector(__s2_434, __s2_434, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_434 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_434, __p3_434), __rev0_434, __p1_434); \ __ret_434 = __builtin_shufflevector(__ret_434, __ret_434, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_434; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopy_laneq_f32(__p0_435, __p1_435, __p2_435, __p3_435) __extension__ ({ \ float32x2_t __ret_435; \ float32x2_t __s0_435 = __p0_435; \ float32x4_t __s2_435 = __p2_435; \ __ret_435 = vset_lane_f32(vgetq_lane_f32(__s2_435, __p3_435), __s0_435, __p1_435); \ __ret_435; \ }) #else #define vcopy_laneq_f32(__p0_436, __p1_436, __p2_436, __p3_436) __extension__ ({ \ float32x2_t __ret_436; \ float32x2_t __s0_436 = __p0_436; \ float32x4_t __s2_436 = __p2_436; \ float32x2_t __rev0_436; __rev0_436 = __builtin_shufflevector(__s0_436, __s0_436, 1, 0); \ float32x4_t __rev2_436; __rev2_436 = __builtin_shufflevector(__s2_436, __s2_436, 3, 2, 1, 0); \ __ret_436 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_436, __p3_436), __rev0_436, __p1_436); \ __ret_436 = __builtin_shufflevector(__ret_436, __ret_436, 1, 0); \ __ret_436; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopy_laneq_s32(__p0_437, __p1_437, __p2_437, __p3_437) __extension__ ({ \ int32x2_t __ret_437; \ int32x2_t __s0_437 = __p0_437; \ int32x4_t __s2_437 = __p2_437; \ __ret_437 = vset_lane_s32(vgetq_lane_s32(__s2_437, __p3_437), __s0_437, __p1_437); \ __ret_437; \ }) #else #define vcopy_laneq_s32(__p0_438, __p1_438, __p2_438, __p3_438) __extension__ ({ \ int32x2_t __ret_438; \ int32x2_t __s0_438 = __p0_438; \ int32x4_t __s2_438 = __p2_438; \ int32x2_t __rev0_438; __rev0_438 = __builtin_shufflevector(__s0_438, __s0_438, 1, 0); \ int32x4_t __rev2_438; __rev2_438 = __builtin_shufflevector(__s2_438, __s2_438, 3, 2, 1, 0); \ __ret_438 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_438, __p3_438), __rev0_438, __p1_438); \ __ret_438 = __builtin_shufflevector(__ret_438, __ret_438, 1, 0); \ __ret_438; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopy_laneq_s64(__p0_439, __p1_439, __p2_439, __p3_439) __extension__ ({ \ int64x1_t __ret_439; \ int64x1_t __s0_439 = __p0_439; \ int64x2_t __s2_439 = __p2_439; \ __ret_439 = vset_lane_s64(vgetq_lane_s64(__s2_439, __p3_439), __s0_439, __p1_439); \ __ret_439; \ }) #else #define vcopy_laneq_s64(__p0_440, __p1_440, __p2_440, __p3_440) __extension__ ({ \ int64x1_t __ret_440; \ int64x1_t __s0_440 = __p0_440; \ int64x2_t __s2_440 = __p2_440; \ int64x2_t __rev2_440; __rev2_440 = __builtin_shufflevector(__s2_440, __s2_440, 1, 0); \ __ret_440 = vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_440, __p3_440), __s0_440, __p1_440); \ __ret_440; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopy_laneq_s16(__p0_441, __p1_441, __p2_441, __p3_441) __extension__ ({ \ int16x4_t __ret_441; \ int16x4_t __s0_441 = __p0_441; \ int16x8_t __s2_441 = __p2_441; \ __ret_441 = vset_lane_s16(vgetq_lane_s16(__s2_441, __p3_441), __s0_441, __p1_441); \ __ret_441; \ }) #else #define vcopy_laneq_s16(__p0_442, __p1_442, __p2_442, __p3_442) __extension__ ({ \ int16x4_t __ret_442; \ int16x4_t __s0_442 = __p0_442; \ int16x8_t __s2_442 = __p2_442; \ int16x4_t __rev0_442; __rev0_442 = __builtin_shufflevector(__s0_442, __s0_442, 3, 2, 1, 0); \ int16x8_t __rev2_442; __rev2_442 = __builtin_shufflevector(__s2_442, __s2_442, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_442 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_442, __p3_442), __rev0_442, __p1_442); \ __ret_442 = __builtin_shufflevector(__ret_442, __ret_442, 3, 2, 1, 0); \ __ret_442; \ }) #endif #define vcreate_p64(__p0) __extension__ ({ \ poly64x1_t __ret; \ uint64_t __promote = __p0; \ __ret = (poly64x1_t)(__promote); \ __ret; \ }) #define vcreate_f64(__p0) __extension__ ({ \ float64x1_t __ret; \ uint64_t __promote = __p0; \ __ret = (float64x1_t)(__promote); \ __ret; \ }) __ai float32_t vcvts_f32_s32(int32_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vcvts_f32_s32(__p0); return __ret; } __ai float32_t vcvts_f32_u32(uint32_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vcvts_f32_u32(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vcvt_f32_f64(float64x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9); return __ret; } #else __ai float32x2_t vcvt_f32_f64(float64x2_t __p0) { float32x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__rev0, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai float32x2_t __noswap_vcvt_f32_f64(float64x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9); return __ret; } #endif __ai float64_t vcvtd_f64_s64(int64_t __p0) { float64_t __ret; __ret = (float64_t) __builtin_neon_vcvtd_f64_s64(__p0); return __ret; } __ai float64_t vcvtd_f64_u64(uint64_t __p0) { float64_t __ret; __ret = (float64_t) __builtin_neon_vcvtd_f64_u64(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 51); return __ret; } #else __ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) { float64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 35); return __ret; } #else __ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) { float64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai float64x1_t vcvt_f64_u64(uint64x1_t __p0) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 19); return __ret; } __ai float64x1_t vcvt_f64_s64(int64x1_t __p0) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 3); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vcvt_f64_f32(float32x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42); return __ret; } #else __ai float64x2_t vcvt_f64_f32(float32x2_t __p0) { float64x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__rev0, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai float64x2_t __noswap_vcvt_f64_f32(float32x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) { float16x8_t __ret; __ret = vcombine_f16(__p0, vcvt_f16_f32(__p1)); return __ret; } #else __ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) { float16x8_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __noswap_vcombine_f16(__rev0, __noswap_vcvt_f16_f32(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) { float32x4_t __ret; __ret = vcvt_f32_f16(vget_high_f16(__p0)); return __ret; } #else __ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) { float32x4_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vcvt_f32_f16(__noswap_vget_high_f16(__rev0)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { float32x4_t __ret; __ret = vcombine_f32(__p0, vcvt_f32_f64(__p1)); return __ret; } #else __ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { float32x4_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvt_f32_f64(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) { float64x2_t __ret; __ret = vcvt_f64_f32(vget_high_f32(__p0)); return __ret; } #else __ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) { float64x2_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __noswap_vcvt_f64_f32(__noswap_vget_high_f32(__rev0)); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #define vcvts_n_f32_u32(__p0, __p1) __extension__ ({ \ float32_t __ret; \ uint32_t __s0 = __p0; \ __ret = (float32_t) __builtin_neon_vcvts_n_f32_u32(__s0, __p1); \ __ret; \ }) #define vcvts_n_f32_s32(__p0, __p1) __extension__ ({ \ float32_t __ret; \ int32_t __s0 = __p0; \ __ret = (float32_t) __builtin_neon_vcvts_n_f32_s32(__s0, __p1); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \ float64x2_t __ret; \ uint64x2_t __s0 = __p0; \ __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 51); \ __ret; \ }) #else #define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \ float64x2_t __ret; \ uint64x2_t __s0 = __p0; \ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 51); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \ float64x2_t __ret; \ int64x2_t __s0 = __p0; \ __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 35); \ __ret; \ }) #else #define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \ float64x2_t __ret; \ int64x2_t __s0 = __p0; \ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 35); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #define vcvt_n_f64_u64(__p0, __p1) __extension__ ({ \ float64x1_t __ret; \ uint64x1_t __s0 = __p0; \ __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 19); \ __ret; \ }) #define vcvt_n_f64_s64(__p0, __p1) __extension__ ({ \ float64x1_t __ret; \ int64x1_t __s0 = __p0; \ __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 3); \ __ret; \ }) #define vcvtd_n_f64_u64(__p0, __p1) __extension__ ({ \ float64_t __ret; \ uint64_t __s0 = __p0; \ __ret = (float64_t) __builtin_neon_vcvtd_n_f64_u64(__s0, __p1); \ __ret; \ }) #define vcvtd_n_f64_s64(__p0, __p1) __extension__ ({ \ float64_t __ret; \ int64_t __s0 = __p0; \ __ret = (float64_t) __builtin_neon_vcvtd_n_f64_s64(__s0, __p1); \ __ret; \ }) #define vcvts_n_s32_f32(__p0, __p1) __extension__ ({ \ int32_t __ret; \ float32_t __s0 = __p0; \ __ret = (int32_t) __builtin_neon_vcvts_n_s32_f32(__s0, __p1); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \ int64x2_t __ret; \ float64x2_t __s0 = __p0; \ __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__s0, __p1, 35); \ __ret; \ }) #else #define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \ int64x2_t __ret; \ float64x2_t __s0 = __p0; \ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__rev0, __p1, 35); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #define vcvt_n_s64_f64(__p0, __p1) __extension__ ({ \ int64x1_t __ret; \ float64x1_t __s0 = __p0; \ __ret = (int64x1_t) __builtin_neon_vcvt_n_s64_v((int8x8_t)__s0, __p1, 3); \ __ret; \ }) #define vcvtd_n_s64_f64(__p0, __p1) __extension__ ({ \ int64_t __ret; \ float64_t __s0 = __p0; \ __ret = (int64_t) __builtin_neon_vcvtd_n_s64_f64(__s0, __p1); \ __ret; \ }) #define vcvts_n_u32_f32(__p0, __p1) __extension__ ({ \ uint32_t __ret; \ float32_t __s0 = __p0; \ __ret = (uint32_t) __builtin_neon_vcvts_n_u32_f32(__s0, __p1); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \ uint64x2_t __ret; \ float64x2_t __s0 = __p0; \ __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__s0, __p1, 51); \ __ret; \ }) #else #define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \ uint64x2_t __ret; \ float64x2_t __s0 = __p0; \ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__rev0, __p1, 51); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #define vcvt_n_u64_f64(__p0, __p1) __extension__ ({ \ uint64x1_t __ret; \ float64x1_t __s0 = __p0; \ __ret = (uint64x1_t) __builtin_neon_vcvt_n_u64_v((int8x8_t)__s0, __p1, 19); \ __ret; \ }) #define vcvtd_n_u64_f64(__p0, __p1) __extension__ ({ \ uint64_t __ret; \ float64_t __s0 = __p0; \ __ret = (uint64_t) __builtin_neon_vcvtd_n_u64_f64(__s0, __p1); \ __ret; \ }) __ai int32_t vcvts_s32_f32(float32_t __p0) { int32_t __ret; __ret = (int32_t) __builtin_neon_vcvts_s32_f32(__p0); return __ret; } __ai int64_t vcvtd_s64_f64(float64_t __p0) { int64_t __ret; __ret = (int64_t) __builtin_neon_vcvtd_s64_f64(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__p0, 35); return __ret; } #else __ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) { int64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__rev0, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai int64x1_t vcvt_s64_f64(float64x1_t __p0) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vcvt_s64_v((int8x8_t)__p0, 3); return __ret; } __ai uint32_t vcvts_u32_f32(float32_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vcvts_u32_f32(__p0); return __ret; } __ai uint64_t vcvtd_u64_f64(float64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcvtd_u64_f64(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__p0, 51); return __ret; } #else __ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__rev0, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vcvt_u64_f64(float64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vcvt_u64_v((int8x8_t)__p0, 19); return __ret; } __ai int32_t vcvtas_s32_f32(float32_t __p0) { int32_t __ret; __ret = (int32_t) __builtin_neon_vcvtas_s32_f32(__p0); return __ret; } __ai int64_t vcvtad_s64_f64(float64_t __p0) { int64_t __ret; __ret = (int64_t) __builtin_neon_vcvtad_s64_f64(__p0); return __ret; } __ai uint32_t vcvtas_u32_f32(float32_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vcvtas_u32_f32(__p0); return __ret; } __ai uint64_t vcvtad_u64_f64(float64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcvtad_u64_f64(__p0); return __ret; } __ai int32_t vcvtms_s32_f32(float32_t __p0) { int32_t __ret; __ret = (int32_t) __builtin_neon_vcvtms_s32_f32(__p0); return __ret; } __ai int64_t vcvtmd_s64_f64(float64_t __p0) { int64_t __ret; __ret = (int64_t) __builtin_neon_vcvtmd_s64_f64(__p0); return __ret; } __ai uint32_t vcvtms_u32_f32(float32_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vcvtms_u32_f32(__p0); return __ret; } __ai uint64_t vcvtmd_u64_f64(float64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcvtmd_u64_f64(__p0); return __ret; } __ai int32_t vcvtns_s32_f32(float32_t __p0) { int32_t __ret; __ret = (int32_t) __builtin_neon_vcvtns_s32_f32(__p0); return __ret; } __ai int64_t vcvtnd_s64_f64(float64_t __p0) { int64_t __ret; __ret = (int64_t) __builtin_neon_vcvtnd_s64_f64(__p0); return __ret; } __ai uint32_t vcvtns_u32_f32(float32_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vcvtns_u32_f32(__p0); return __ret; } __ai uint64_t vcvtnd_u64_f64(float64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcvtnd_u64_f64(__p0); return __ret; } __ai int32_t vcvtps_s32_f32(float32_t __p0) { int32_t __ret; __ret = (int32_t) __builtin_neon_vcvtps_s32_f32(__p0); return __ret; } __ai int64_t vcvtpd_s64_f64(float64_t __p0) { int64_t __ret; __ret = (int64_t) __builtin_neon_vcvtpd_s64_f64(__p0); return __ret; } __ai uint32_t vcvtps_u32_f32(float32_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vcvtps_u32_f32(__p0); return __ret; } __ai uint64_t vcvtpd_u64_f64(float64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcvtpd_u64_f64(__p0); return __ret; } __ai float32_t vcvtxd_f32_f64(float64_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vcvtxd_f32_f64(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42); return __ret; } #else __ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) { float32x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__rev0, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai float32x2_t __noswap_vcvtx_f32_f64(float64x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { float32x4_t __ret; __ret = vcombine_f32(__p0, vcvtx_f32_f64(__p1)); return __ret; } #else __ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { float32x4_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvtx_f32_f64(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = __p0 / __p1; return __ret; } #else __ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 / __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = __p0 / __p1; return __ret; } #else __ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 / __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif __ai float64x1_t vdiv_f64(float64x1_t __p0, float64x1_t __p1) { float64x1_t __ret; __ret = __p0 / __p1; return __ret; } #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = __p0 / __p1; return __ret; } #else __ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 / __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vdupb_lane_p8(__p0, __p1) __extension__ ({ \ poly8_t __ret; \ poly8x8_t __s0 = __p0; \ __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((poly8x8_t)__s0, __p1); \ __ret; \ }) #else #define vdupb_lane_p8(__p0, __p1) __extension__ ({ \ poly8_t __ret; \ poly8x8_t __s0 = __p0; \ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((poly8x8_t)__rev0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vduph_lane_p16(__p0, __p1) __extension__ ({ \ poly16_t __ret; \ poly16x4_t __s0 = __p0; \ __ret = (poly16_t) __builtin_neon_vduph_lane_i16((poly16x4_t)__s0, __p1); \ __ret; \ }) #else #define vduph_lane_p16(__p0, __p1) __extension__ ({ \ poly16_t __ret; \ poly16x4_t __s0 = __p0; \ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (poly16_t) __builtin_neon_vduph_lane_i16((poly16x4_t)__rev0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupb_lane_u8(__p0, __p1) __extension__ ({ \ uint8_t __ret; \ uint8x8_t __s0 = __p0; \ __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \ __ret; \ }) #else #define vdupb_lane_u8(__p0, __p1) __extension__ ({ \ uint8_t __ret; \ uint8x8_t __s0 = __p0; \ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdups_lane_u32(__p0, __p1) __extension__ ({ \ uint32_t __ret; \ uint32x2_t __s0 = __p0; \ __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__s0, __p1); \ __ret; \ }) #else #define vdups_lane_u32(__p0, __p1) __extension__ ({ \ uint32_t __ret; \ uint32x2_t __s0 = __p0; \ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__rev0, __p1); \ __ret; \ }) #endif #define vdupd_lane_u64(__p0, __p1) __extension__ ({ \ uint64_t __ret; \ uint64x1_t __s0 = __p0; \ __ret = (uint64_t) __builtin_neon_vdupd_lane_i64((int64x1_t)__s0, __p1); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vduph_lane_u16(__p0, __p1) __extension__ ({ \ uint16_t __ret; \ uint16x4_t __s0 = __p0; \ __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__s0, __p1); \ __ret; \ }) #else #define vduph_lane_u16(__p0, __p1) __extension__ ({ \ uint16_t __ret; \ uint16x4_t __s0 = __p0; \ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__rev0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupb_lane_s8(__p0, __p1) __extension__ ({ \ int8_t __ret; \ int8x8_t __s0 = __p0; \ __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \ __ret; \ }) #else #define vdupb_lane_s8(__p0, __p1) __extension__ ({ \ int8_t __ret; \ int8x8_t __s0 = __p0; \ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \ __ret; \ }) #endif #define vdupd_lane_f64(__p0, __p1) __extension__ ({ \ float64_t __ret; \ float64x1_t __s0 = __p0; \ __ret = (float64_t) __builtin_neon_vdupd_lane_f64((float64x1_t)__s0, __p1); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vdups_lane_f32(__p0, __p1) __extension__ ({ \ float32_t __ret; \ float32x2_t __s0 = __p0; \ __ret = (float32_t) __builtin_neon_vdups_lane_f32((float32x2_t)__s0, __p1); \ __ret; \ }) #else #define vdups_lane_f32(__p0, __p1) __extension__ ({ \ float32_t __ret; \ float32x2_t __s0 = __p0; \ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (float32_t) __builtin_neon_vdups_lane_f32((float32x2_t)__rev0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdups_lane_s32(__p0, __p1) __extension__ ({ \ int32_t __ret; \ int32x2_t __s0 = __p0; \ __ret = (int32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__s0, __p1); \ __ret; \ }) #else #define vdups_lane_s32(__p0, __p1) __extension__ ({ \ int32_t __ret; \ int32x2_t __s0 = __p0; \ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (int32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__rev0, __p1); \ __ret; \ }) #endif #define vdupd_lane_s64(__p0, __p1) __extension__ ({ \ int64_t __ret; \ int64x1_t __s0 = __p0; \ __ret = (int64_t) __builtin_neon_vdupd_lane_i64((int64x1_t)__s0, __p1); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vduph_lane_s16(__p0, __p1) __extension__ ({ \ int16_t __ret; \ int16x4_t __s0 = __p0; \ __ret = (int16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__s0, __p1); \ __ret; \ }) #else #define vduph_lane_s16(__p0, __p1) __extension__ ({ \ int16_t __ret; \ int16x4_t __s0 = __p0; \ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (int16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__rev0, __p1); \ __ret; \ }) #endif #define vdup_lane_p64(__p0_443, __p1_443) __extension__ ({ \ poly64x1_t __ret_443; \ poly64x1_t __s0_443 = __p0_443; \ __ret_443 = splat_lane_p64(__s0_443, __p1_443); \ __ret_443; \ }) #ifdef __LITTLE_ENDIAN__ #define vdupq_lane_p64(__p0_444, __p1_444) __extension__ ({ \ poly64x2_t __ret_444; \ poly64x1_t __s0_444 = __p0_444; \ __ret_444 = splatq_lane_p64(__s0_444, __p1_444); \ __ret_444; \ }) #else #define vdupq_lane_p64(__p0_445, __p1_445) __extension__ ({ \ poly64x2_t __ret_445; \ poly64x1_t __s0_445 = __p0_445; \ __ret_445 = __noswap_splatq_lane_p64(__s0_445, __p1_445); \ __ret_445 = __builtin_shufflevector(__ret_445, __ret_445, 1, 0); \ __ret_445; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupq_lane_f64(__p0_446, __p1_446) __extension__ ({ \ float64x2_t __ret_446; \ float64x1_t __s0_446 = __p0_446; \ __ret_446 = splatq_lane_f64(__s0_446, __p1_446); \ __ret_446; \ }) #else #define vdupq_lane_f64(__p0_447, __p1_447) __extension__ ({ \ float64x2_t __ret_447; \ float64x1_t __s0_447 = __p0_447; \ __ret_447 = __noswap_splatq_lane_f64(__s0_447, __p1_447); \ __ret_447 = __builtin_shufflevector(__ret_447, __ret_447, 1, 0); \ __ret_447; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupq_lane_f16(__p0_448, __p1_448) __extension__ ({ \ float16x8_t __ret_448; \ float16x4_t __s0_448 = __p0_448; \ __ret_448 = splatq_lane_f16(__s0_448, __p1_448); \ __ret_448; \ }) #else #define vdupq_lane_f16(__p0_449, __p1_449) __extension__ ({ \ float16x8_t __ret_449; \ float16x4_t __s0_449 = __p0_449; \ float16x4_t __rev0_449; __rev0_449 = __builtin_shufflevector(__s0_449, __s0_449, 3, 2, 1, 0); \ __ret_449 = __noswap_splatq_lane_f16(__rev0_449, __p1_449); \ __ret_449 = __builtin_shufflevector(__ret_449, __ret_449, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_449; \ }) #endif #define vdup_lane_f64(__p0_450, __p1_450) __extension__ ({ \ float64x1_t __ret_450; \ float64x1_t __s0_450 = __p0_450; \ __ret_450 = splat_lane_f64(__s0_450, __p1_450); \ __ret_450; \ }) #ifdef __LITTLE_ENDIAN__ #define vdup_lane_f16(__p0_451, __p1_451) __extension__ ({ \ float16x4_t __ret_451; \ float16x4_t __s0_451 = __p0_451; \ __ret_451 = splat_lane_f16(__s0_451, __p1_451); \ __ret_451; \ }) #else #define vdup_lane_f16(__p0_452, __p1_452) __extension__ ({ \ float16x4_t __ret_452; \ float16x4_t __s0_452 = __p0_452; \ float16x4_t __rev0_452; __rev0_452 = __builtin_shufflevector(__s0_452, __s0_452, 3, 2, 1, 0); \ __ret_452 = __noswap_splat_lane_f16(__rev0_452, __p1_452); \ __ret_452 = __builtin_shufflevector(__ret_452, __ret_452, 3, 2, 1, 0); \ __ret_452; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \ poly8_t __ret; \ poly8x16_t __s0 = __p0; \ __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((poly8x16_t)__s0, __p1); \ __ret; \ }) #else #define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \ poly8_t __ret; \ poly8x16_t __s0 = __p0; \ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((poly8x16_t)__rev0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vduph_laneq_p16(__p0, __p1) __extension__ ({ \ poly16_t __ret; \ poly16x8_t __s0 = __p0; \ __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((poly16x8_t)__s0, __p1); \ __ret; \ }) #else #define vduph_laneq_p16(__p0, __p1) __extension__ ({ \ poly16_t __ret; \ poly16x8_t __s0 = __p0; \ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((poly16x8_t)__rev0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \ uint8_t __ret; \ uint8x16_t __s0 = __p0; \ __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \ __ret; \ }) #else #define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \ uint8_t __ret; \ uint8x16_t __s0 = __p0; \ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdups_laneq_u32(__p0, __p1) __extension__ ({ \ uint32_t __ret; \ uint32x4_t __s0 = __p0; \ __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__s0, __p1); \ __ret; \ }) #else #define vdups_laneq_u32(__p0, __p1) __extension__ ({ \ uint32_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__rev0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \ uint64_t __ret; \ uint64x2_t __s0 = __p0; \ __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__s0, __p1); \ __ret; \ }) #else #define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \ uint64_t __ret; \ uint64x2_t __s0 = __p0; \ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__rev0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vduph_laneq_u16(__p0, __p1) __extension__ ({ \ uint16_t __ret; \ uint16x8_t __s0 = __p0; \ __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__s0, __p1); \ __ret; \ }) #else #define vduph_laneq_u16(__p0, __p1) __extension__ ({ \ uint16_t __ret; \ uint16x8_t __s0 = __p0; \ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__rev0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \ int8_t __ret; \ int8x16_t __s0 = __p0; \ __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \ __ret; \ }) #else #define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \ int8_t __ret; \ int8x16_t __s0 = __p0; \ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \ float64_t __ret; \ float64x2_t __s0 = __p0; \ __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((float64x2_t)__s0, __p1); \ __ret; \ }) #else #define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \ float64_t __ret; \ float64x2_t __s0 = __p0; \ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((float64x2_t)__rev0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdups_laneq_f32(__p0, __p1) __extension__ ({ \ float32_t __ret; \ float32x4_t __s0 = __p0; \ __ret = (float32_t) __builtin_neon_vdups_laneq_f32((float32x4_t)__s0, __p1); \ __ret; \ }) #else #define vdups_laneq_f32(__p0, __p1) __extension__ ({ \ float32_t __ret; \ float32x4_t __s0 = __p0; \ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (float32_t) __builtin_neon_vdups_laneq_f32((float32x4_t)__rev0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdups_laneq_s32(__p0, __p1) __extension__ ({ \ int32_t __ret; \ int32x4_t __s0 = __p0; \ __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__s0, __p1); \ __ret; \ }) #else #define vdups_laneq_s32(__p0, __p1) __extension__ ({ \ int32_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__rev0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \ int64_t __ret; \ int64x2_t __s0 = __p0; \ __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__s0, __p1); \ __ret; \ }) #else #define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \ int64_t __ret; \ int64x2_t __s0 = __p0; \ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__rev0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vduph_laneq_s16(__p0, __p1) __extension__ ({ \ int16_t __ret; \ int16x8_t __s0 = __p0; \ __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__s0, __p1); \ __ret; \ }) #else #define vduph_laneq_s16(__p0, __p1) __extension__ ({ \ int16_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__rev0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdup_laneq_p8(__p0_453, __p1_453) __extension__ ({ \ poly8x8_t __ret_453; \ poly8x16_t __s0_453 = __p0_453; \ __ret_453 = splat_laneq_p8(__s0_453, __p1_453); \ __ret_453; \ }) #else #define vdup_laneq_p8(__p0_454, __p1_454) __extension__ ({ \ poly8x8_t __ret_454; \ poly8x16_t __s0_454 = __p0_454; \ poly8x16_t __rev0_454; __rev0_454 = __builtin_shufflevector(__s0_454, __s0_454, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_454 = __noswap_splat_laneq_p8(__rev0_454, __p1_454); \ __ret_454 = __builtin_shufflevector(__ret_454, __ret_454, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_454; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdup_laneq_p64(__p0_455, __p1_455) __extension__ ({ \ poly64x1_t __ret_455; \ poly64x2_t __s0_455 = __p0_455; \ __ret_455 = splat_laneq_p64(__s0_455, __p1_455); \ __ret_455; \ }) #else #define vdup_laneq_p64(__p0_456, __p1_456) __extension__ ({ \ poly64x1_t __ret_456; \ poly64x2_t __s0_456 = __p0_456; \ poly64x2_t __rev0_456; __rev0_456 = __builtin_shufflevector(__s0_456, __s0_456, 1, 0); \ __ret_456 = __noswap_splat_laneq_p64(__rev0_456, __p1_456); \ __ret_456; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdup_laneq_p16(__p0_457, __p1_457) __extension__ ({ \ poly16x4_t __ret_457; \ poly16x8_t __s0_457 = __p0_457; \ __ret_457 = splat_laneq_p16(__s0_457, __p1_457); \ __ret_457; \ }) #else #define vdup_laneq_p16(__p0_458, __p1_458) __extension__ ({ \ poly16x4_t __ret_458; \ poly16x8_t __s0_458 = __p0_458; \ poly16x8_t __rev0_458; __rev0_458 = __builtin_shufflevector(__s0_458, __s0_458, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_458 = __noswap_splat_laneq_p16(__rev0_458, __p1_458); \ __ret_458 = __builtin_shufflevector(__ret_458, __ret_458, 3, 2, 1, 0); \ __ret_458; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupq_laneq_p8(__p0_459, __p1_459) __extension__ ({ \ poly8x16_t __ret_459; \ poly8x16_t __s0_459 = __p0_459; \ __ret_459 = splatq_laneq_p8(__s0_459, __p1_459); \ __ret_459; \ }) #else #define vdupq_laneq_p8(__p0_460, __p1_460) __extension__ ({ \ poly8x16_t __ret_460; \ poly8x16_t __s0_460 = __p0_460; \ poly8x16_t __rev0_460; __rev0_460 = __builtin_shufflevector(__s0_460, __s0_460, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_460 = __noswap_splatq_laneq_p8(__rev0_460, __p1_460); \ __ret_460 = __builtin_shufflevector(__ret_460, __ret_460, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_460; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupq_laneq_p64(__p0_461, __p1_461) __extension__ ({ \ poly64x2_t __ret_461; \ poly64x2_t __s0_461 = __p0_461; \ __ret_461 = splatq_laneq_p64(__s0_461, __p1_461); \ __ret_461; \ }) #else #define vdupq_laneq_p64(__p0_462, __p1_462) __extension__ ({ \ poly64x2_t __ret_462; \ poly64x2_t __s0_462 = __p0_462; \ poly64x2_t __rev0_462; __rev0_462 = __builtin_shufflevector(__s0_462, __s0_462, 1, 0); \ __ret_462 = __noswap_splatq_laneq_p64(__rev0_462, __p1_462); \ __ret_462 = __builtin_shufflevector(__ret_462, __ret_462, 1, 0); \ __ret_462; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupq_laneq_p16(__p0_463, __p1_463) __extension__ ({ \ poly16x8_t __ret_463; \ poly16x8_t __s0_463 = __p0_463; \ __ret_463 = splatq_laneq_p16(__s0_463, __p1_463); \ __ret_463; \ }) #else #define vdupq_laneq_p16(__p0_464, __p1_464) __extension__ ({ \ poly16x8_t __ret_464; \ poly16x8_t __s0_464 = __p0_464; \ poly16x8_t __rev0_464; __rev0_464 = __builtin_shufflevector(__s0_464, __s0_464, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_464 = __noswap_splatq_laneq_p16(__rev0_464, __p1_464); \ __ret_464 = __builtin_shufflevector(__ret_464, __ret_464, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_464; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupq_laneq_u8(__p0_465, __p1_465) __extension__ ({ \ uint8x16_t __ret_465; \ uint8x16_t __s0_465 = __p0_465; \ __ret_465 = splatq_laneq_u8(__s0_465, __p1_465); \ __ret_465; \ }) #else #define vdupq_laneq_u8(__p0_466, __p1_466) __extension__ ({ \ uint8x16_t __ret_466; \ uint8x16_t __s0_466 = __p0_466; \ uint8x16_t __rev0_466; __rev0_466 = __builtin_shufflevector(__s0_466, __s0_466, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_466 = __noswap_splatq_laneq_u8(__rev0_466, __p1_466); \ __ret_466 = __builtin_shufflevector(__ret_466, __ret_466, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_466; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupq_laneq_u32(__p0_467, __p1_467) __extension__ ({ \ uint32x4_t __ret_467; \ uint32x4_t __s0_467 = __p0_467; \ __ret_467 = splatq_laneq_u32(__s0_467, __p1_467); \ __ret_467; \ }) #else #define vdupq_laneq_u32(__p0_468, __p1_468) __extension__ ({ \ uint32x4_t __ret_468; \ uint32x4_t __s0_468 = __p0_468; \ uint32x4_t __rev0_468; __rev0_468 = __builtin_shufflevector(__s0_468, __s0_468, 3, 2, 1, 0); \ __ret_468 = __noswap_splatq_laneq_u32(__rev0_468, __p1_468); \ __ret_468 = __builtin_shufflevector(__ret_468, __ret_468, 3, 2, 1, 0); \ __ret_468; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupq_laneq_u64(__p0_469, __p1_469) __extension__ ({ \ uint64x2_t __ret_469; \ uint64x2_t __s0_469 = __p0_469; \ __ret_469 = splatq_laneq_u64(__s0_469, __p1_469); \ __ret_469; \ }) #else #define vdupq_laneq_u64(__p0_470, __p1_470) __extension__ ({ \ uint64x2_t __ret_470; \ uint64x2_t __s0_470 = __p0_470; \ uint64x2_t __rev0_470; __rev0_470 = __builtin_shufflevector(__s0_470, __s0_470, 1, 0); \ __ret_470 = __noswap_splatq_laneq_u64(__rev0_470, __p1_470); \ __ret_470 = __builtin_shufflevector(__ret_470, __ret_470, 1, 0); \ __ret_470; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupq_laneq_u16(__p0_471, __p1_471) __extension__ ({ \ uint16x8_t __ret_471; \ uint16x8_t __s0_471 = __p0_471; \ __ret_471 = splatq_laneq_u16(__s0_471, __p1_471); \ __ret_471; \ }) #else #define vdupq_laneq_u16(__p0_472, __p1_472) __extension__ ({ \ uint16x8_t __ret_472; \ uint16x8_t __s0_472 = __p0_472; \ uint16x8_t __rev0_472; __rev0_472 = __builtin_shufflevector(__s0_472, __s0_472, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_472 = __noswap_splatq_laneq_u16(__rev0_472, __p1_472); \ __ret_472 = __builtin_shufflevector(__ret_472, __ret_472, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_472; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupq_laneq_s8(__p0_473, __p1_473) __extension__ ({ \ int8x16_t __ret_473; \ int8x16_t __s0_473 = __p0_473; \ __ret_473 = splatq_laneq_s8(__s0_473, __p1_473); \ __ret_473; \ }) #else #define vdupq_laneq_s8(__p0_474, __p1_474) __extension__ ({ \ int8x16_t __ret_474; \ int8x16_t __s0_474 = __p0_474; \ int8x16_t __rev0_474; __rev0_474 = __builtin_shufflevector(__s0_474, __s0_474, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_474 = __noswap_splatq_laneq_s8(__rev0_474, __p1_474); \ __ret_474 = __builtin_shufflevector(__ret_474, __ret_474, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_474; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupq_laneq_f64(__p0_475, __p1_475) __extension__ ({ \ float64x2_t __ret_475; \ float64x2_t __s0_475 = __p0_475; \ __ret_475 = splatq_laneq_f64(__s0_475, __p1_475); \ __ret_475; \ }) #else #define vdupq_laneq_f64(__p0_476, __p1_476) __extension__ ({ \ float64x2_t __ret_476; \ float64x2_t __s0_476 = __p0_476; \ float64x2_t __rev0_476; __rev0_476 = __builtin_shufflevector(__s0_476, __s0_476, 1, 0); \ __ret_476 = __noswap_splatq_laneq_f64(__rev0_476, __p1_476); \ __ret_476 = __builtin_shufflevector(__ret_476, __ret_476, 1, 0); \ __ret_476; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupq_laneq_f32(__p0_477, __p1_477) __extension__ ({ \ float32x4_t __ret_477; \ float32x4_t __s0_477 = __p0_477; \ __ret_477 = splatq_laneq_f32(__s0_477, __p1_477); \ __ret_477; \ }) #else #define vdupq_laneq_f32(__p0_478, __p1_478) __extension__ ({ \ float32x4_t __ret_478; \ float32x4_t __s0_478 = __p0_478; \ float32x4_t __rev0_478; __rev0_478 = __builtin_shufflevector(__s0_478, __s0_478, 3, 2, 1, 0); \ __ret_478 = __noswap_splatq_laneq_f32(__rev0_478, __p1_478); \ __ret_478 = __builtin_shufflevector(__ret_478, __ret_478, 3, 2, 1, 0); \ __ret_478; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupq_laneq_f16(__p0_479, __p1_479) __extension__ ({ \ float16x8_t __ret_479; \ float16x8_t __s0_479 = __p0_479; \ __ret_479 = splatq_laneq_f16(__s0_479, __p1_479); \ __ret_479; \ }) #else #define vdupq_laneq_f16(__p0_480, __p1_480) __extension__ ({ \ float16x8_t __ret_480; \ float16x8_t __s0_480 = __p0_480; \ float16x8_t __rev0_480; __rev0_480 = __builtin_shufflevector(__s0_480, __s0_480, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_480 = __noswap_splatq_laneq_f16(__rev0_480, __p1_480); \ __ret_480 = __builtin_shufflevector(__ret_480, __ret_480, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_480; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupq_laneq_s32(__p0_481, __p1_481) __extension__ ({ \ int32x4_t __ret_481; \ int32x4_t __s0_481 = __p0_481; \ __ret_481 = splatq_laneq_s32(__s0_481, __p1_481); \ __ret_481; \ }) #else #define vdupq_laneq_s32(__p0_482, __p1_482) __extension__ ({ \ int32x4_t __ret_482; \ int32x4_t __s0_482 = __p0_482; \ int32x4_t __rev0_482; __rev0_482 = __builtin_shufflevector(__s0_482, __s0_482, 3, 2, 1, 0); \ __ret_482 = __noswap_splatq_laneq_s32(__rev0_482, __p1_482); \ __ret_482 = __builtin_shufflevector(__ret_482, __ret_482, 3, 2, 1, 0); \ __ret_482; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupq_laneq_s64(__p0_483, __p1_483) __extension__ ({ \ int64x2_t __ret_483; \ int64x2_t __s0_483 = __p0_483; \ __ret_483 = splatq_laneq_s64(__s0_483, __p1_483); \ __ret_483; \ }) #else #define vdupq_laneq_s64(__p0_484, __p1_484) __extension__ ({ \ int64x2_t __ret_484; \ int64x2_t __s0_484 = __p0_484; \ int64x2_t __rev0_484; __rev0_484 = __builtin_shufflevector(__s0_484, __s0_484, 1, 0); \ __ret_484 = __noswap_splatq_laneq_s64(__rev0_484, __p1_484); \ __ret_484 = __builtin_shufflevector(__ret_484, __ret_484, 1, 0); \ __ret_484; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdupq_laneq_s16(__p0_485, __p1_485) __extension__ ({ \ int16x8_t __ret_485; \ int16x8_t __s0_485 = __p0_485; \ __ret_485 = splatq_laneq_s16(__s0_485, __p1_485); \ __ret_485; \ }) #else #define vdupq_laneq_s16(__p0_486, __p1_486) __extension__ ({ \ int16x8_t __ret_486; \ int16x8_t __s0_486 = __p0_486; \ int16x8_t __rev0_486; __rev0_486 = __builtin_shufflevector(__s0_486, __s0_486, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_486 = __noswap_splatq_laneq_s16(__rev0_486, __p1_486); \ __ret_486 = __builtin_shufflevector(__ret_486, __ret_486, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_486; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdup_laneq_u8(__p0_487, __p1_487) __extension__ ({ \ uint8x8_t __ret_487; \ uint8x16_t __s0_487 = __p0_487; \ __ret_487 = splat_laneq_u8(__s0_487, __p1_487); \ __ret_487; \ }) #else #define vdup_laneq_u8(__p0_488, __p1_488) __extension__ ({ \ uint8x8_t __ret_488; \ uint8x16_t __s0_488 = __p0_488; \ uint8x16_t __rev0_488; __rev0_488 = __builtin_shufflevector(__s0_488, __s0_488, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_488 = __noswap_splat_laneq_u8(__rev0_488, __p1_488); \ __ret_488 = __builtin_shufflevector(__ret_488, __ret_488, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_488; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdup_laneq_u32(__p0_489, __p1_489) __extension__ ({ \ uint32x2_t __ret_489; \ uint32x4_t __s0_489 = __p0_489; \ __ret_489 = splat_laneq_u32(__s0_489, __p1_489); \ __ret_489; \ }) #else #define vdup_laneq_u32(__p0_490, __p1_490) __extension__ ({ \ uint32x2_t __ret_490; \ uint32x4_t __s0_490 = __p0_490; \ uint32x4_t __rev0_490; __rev0_490 = __builtin_shufflevector(__s0_490, __s0_490, 3, 2, 1, 0); \ __ret_490 = __noswap_splat_laneq_u32(__rev0_490, __p1_490); \ __ret_490 = __builtin_shufflevector(__ret_490, __ret_490, 1, 0); \ __ret_490; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdup_laneq_u64(__p0_491, __p1_491) __extension__ ({ \ uint64x1_t __ret_491; \ uint64x2_t __s0_491 = __p0_491; \ __ret_491 = splat_laneq_u64(__s0_491, __p1_491); \ __ret_491; \ }) #else #define vdup_laneq_u64(__p0_492, __p1_492) __extension__ ({ \ uint64x1_t __ret_492; \ uint64x2_t __s0_492 = __p0_492; \ uint64x2_t __rev0_492; __rev0_492 = __builtin_shufflevector(__s0_492, __s0_492, 1, 0); \ __ret_492 = __noswap_splat_laneq_u64(__rev0_492, __p1_492); \ __ret_492; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdup_laneq_u16(__p0_493, __p1_493) __extension__ ({ \ uint16x4_t __ret_493; \ uint16x8_t __s0_493 = __p0_493; \ __ret_493 = splat_laneq_u16(__s0_493, __p1_493); \ __ret_493; \ }) #else #define vdup_laneq_u16(__p0_494, __p1_494) __extension__ ({ \ uint16x4_t __ret_494; \ uint16x8_t __s0_494 = __p0_494; \ uint16x8_t __rev0_494; __rev0_494 = __builtin_shufflevector(__s0_494, __s0_494, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_494 = __noswap_splat_laneq_u16(__rev0_494, __p1_494); \ __ret_494 = __builtin_shufflevector(__ret_494, __ret_494, 3, 2, 1, 0); \ __ret_494; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdup_laneq_s8(__p0_495, __p1_495) __extension__ ({ \ int8x8_t __ret_495; \ int8x16_t __s0_495 = __p0_495; \ __ret_495 = splat_laneq_s8(__s0_495, __p1_495); \ __ret_495; \ }) #else #define vdup_laneq_s8(__p0_496, __p1_496) __extension__ ({ \ int8x8_t __ret_496; \ int8x16_t __s0_496 = __p0_496; \ int8x16_t __rev0_496; __rev0_496 = __builtin_shufflevector(__s0_496, __s0_496, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_496 = __noswap_splat_laneq_s8(__rev0_496, __p1_496); \ __ret_496 = __builtin_shufflevector(__ret_496, __ret_496, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_496; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdup_laneq_f64(__p0_497, __p1_497) __extension__ ({ \ float64x1_t __ret_497; \ float64x2_t __s0_497 = __p0_497; \ __ret_497 = splat_laneq_f64(__s0_497, __p1_497); \ __ret_497; \ }) #else #define vdup_laneq_f64(__p0_498, __p1_498) __extension__ ({ \ float64x1_t __ret_498; \ float64x2_t __s0_498 = __p0_498; \ float64x2_t __rev0_498; __rev0_498 = __builtin_shufflevector(__s0_498, __s0_498, 1, 0); \ __ret_498 = __noswap_splat_laneq_f64(__rev0_498, __p1_498); \ __ret_498; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdup_laneq_f32(__p0_499, __p1_499) __extension__ ({ \ float32x2_t __ret_499; \ float32x4_t __s0_499 = __p0_499; \ __ret_499 = splat_laneq_f32(__s0_499, __p1_499); \ __ret_499; \ }) #else #define vdup_laneq_f32(__p0_500, __p1_500) __extension__ ({ \ float32x2_t __ret_500; \ float32x4_t __s0_500 = __p0_500; \ float32x4_t __rev0_500; __rev0_500 = __builtin_shufflevector(__s0_500, __s0_500, 3, 2, 1, 0); \ __ret_500 = __noswap_splat_laneq_f32(__rev0_500, __p1_500); \ __ret_500 = __builtin_shufflevector(__ret_500, __ret_500, 1, 0); \ __ret_500; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdup_laneq_f16(__p0_501, __p1_501) __extension__ ({ \ float16x4_t __ret_501; \ float16x8_t __s0_501 = __p0_501; \ __ret_501 = splat_laneq_f16(__s0_501, __p1_501); \ __ret_501; \ }) #else #define vdup_laneq_f16(__p0_502, __p1_502) __extension__ ({ \ float16x4_t __ret_502; \ float16x8_t __s0_502 = __p0_502; \ float16x8_t __rev0_502; __rev0_502 = __builtin_shufflevector(__s0_502, __s0_502, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_502 = __noswap_splat_laneq_f16(__rev0_502, __p1_502); \ __ret_502 = __builtin_shufflevector(__ret_502, __ret_502, 3, 2, 1, 0); \ __ret_502; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdup_laneq_s32(__p0_503, __p1_503) __extension__ ({ \ int32x2_t __ret_503; \ int32x4_t __s0_503 = __p0_503; \ __ret_503 = splat_laneq_s32(__s0_503, __p1_503); \ __ret_503; \ }) #else #define vdup_laneq_s32(__p0_504, __p1_504) __extension__ ({ \ int32x2_t __ret_504; \ int32x4_t __s0_504 = __p0_504; \ int32x4_t __rev0_504; __rev0_504 = __builtin_shufflevector(__s0_504, __s0_504, 3, 2, 1, 0); \ __ret_504 = __noswap_splat_laneq_s32(__rev0_504, __p1_504); \ __ret_504 = __builtin_shufflevector(__ret_504, __ret_504, 1, 0); \ __ret_504; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdup_laneq_s64(__p0_505, __p1_505) __extension__ ({ \ int64x1_t __ret_505; \ int64x2_t __s0_505 = __p0_505; \ __ret_505 = splat_laneq_s64(__s0_505, __p1_505); \ __ret_505; \ }) #else #define vdup_laneq_s64(__p0_506, __p1_506) __extension__ ({ \ int64x1_t __ret_506; \ int64x2_t __s0_506 = __p0_506; \ int64x2_t __rev0_506; __rev0_506 = __builtin_shufflevector(__s0_506, __s0_506, 1, 0); \ __ret_506 = __noswap_splat_laneq_s64(__rev0_506, __p1_506); \ __ret_506; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vdup_laneq_s16(__p0_507, __p1_507) __extension__ ({ \ int16x4_t __ret_507; \ int16x8_t __s0_507 = __p0_507; \ __ret_507 = splat_laneq_s16(__s0_507, __p1_507); \ __ret_507; \ }) #else #define vdup_laneq_s16(__p0_508, __p1_508) __extension__ ({ \ int16x4_t __ret_508; \ int16x8_t __s0_508 = __p0_508; \ int16x8_t __rev0_508; __rev0_508 = __builtin_shufflevector(__s0_508, __s0_508, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_508 = __noswap_splat_laneq_s16(__rev0_508, __p1_508); \ __ret_508 = __builtin_shufflevector(__ret_508, __ret_508, 3, 2, 1, 0); \ __ret_508; \ }) #endif __ai poly64x1_t vdup_n_p64(poly64_t __p0) { poly64x1_t __ret; __ret = (poly64x1_t) {__p0}; return __ret; } #ifdef __LITTLE_ENDIAN__ __ai poly64x2_t vdupq_n_p64(poly64_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t) {__p0, __p0}; return __ret; } #else __ai poly64x2_t vdupq_n_p64(poly64_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t) {__p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vdupq_n_f64(float64_t __p0) { float64x2_t __ret; __ret = (float64x2_t) {__p0, __p0}; return __ret; } #else __ai float64x2_t vdupq_n_f64(float64_t __p0) { float64x2_t __ret; __ret = (float64x2_t) {__p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai float64x1_t vdup_n_f64(float64_t __p0) { float64x1_t __ret; __ret = (float64x1_t) {__p0}; return __ret; } #define vext_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x1_t __ret; \ poly64x1_t __s0 = __p0; \ poly64x1_t __s1 = __p1; \ __ret = (poly64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vextq_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x2_t __ret; \ poly64x2_t __s0 = __p0; \ poly64x2_t __s1 = __p1; \ __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \ __ret; \ }) #else #define vextq_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x2_t __ret; \ poly64x2_t __s0 = __p0; \ poly64x2_t __s1 = __p1; \ poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vextq_f64(__p0, __p1, __p2) __extension__ ({ \ float64x2_t __ret; \ float64x2_t __s0 = __p0; \ float64x2_t __s1 = __p1; \ __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 42); \ __ret; \ }) #else #define vextq_f64(__p0, __p1, __p2) __extension__ ({ \ float64x2_t __ret; \ float64x2_t __s0 = __p0; \ float64x2_t __s1 = __p1; \ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 42); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #define vext_f64(__p0, __p1, __p2) __extension__ ({ \ float64x1_t __ret; \ float64x1_t __s0 = __p0; \ float64x1_t __s1 = __p1; \ __ret = (float64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); return __ret; } #else __ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai float64x2_t __noswap_vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); return __ret; } #endif __ai float64x1_t vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); return __ret; } #define vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ float64_t __ret; \ float64_t __s0 = __p0; \ float64_t __s1 = __p1; \ float64x1_t __s2 = __p2; \ __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (float64x1_t)__s2, __p3); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ float32_t __ret; \ float32_t __s0 = __p0; \ float32_t __s1 = __p1; \ float32x2_t __s2 = __p2; \ __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__s2, __p3); \ __ret; \ }) #else #define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ float32_t __ret; \ float32_t __s0 = __p0; \ float32_t __s1 = __p1; \ float32x2_t __s2 = __p2; \ float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__rev2, __p3); \ __ret; \ }) #define __noswap_vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ float32_t __ret; \ float32_t __s0 = __p0; \ float32_t __s1 = __p1; \ float32x2_t __s2 = __p2; \ __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__s2, __p3); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ float64x2_t __ret; \ float64x2_t __s0 = __p0; \ float64x2_t __s1 = __p1; \ float64x1_t __s2 = __p2; \ __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \ __ret; \ }) #else #define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ float64x2_t __ret; \ float64x2_t __s0 = __p0; \ float64x2_t __s1 = __p1; \ float64x1_t __s2 = __p2; \ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__s2, __p3, 42); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ float64x2_t __ret; \ float64x2_t __s0 = __p0; \ float64x2_t __s1 = __p1; \ float64x1_t __s2 = __p2; \ __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ float32x4_t __ret; \ float32x4_t __s0 = __p0; \ float32x4_t __s1 = __p1; \ float32x2_t __s2 = __p2; \ __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \ __ret; \ }) #else #define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ float32x4_t __ret; \ float32x4_t __s0 = __p0; \ float32x4_t __s1 = __p1; \ float32x2_t __s2 = __p2; \ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 41); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ float32x4_t __ret; \ float32x4_t __s0 = __p0; \ float32x4_t __s1 = __p1; \ float32x2_t __s2 = __p2; \ __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \ __ret; \ }) #endif #define vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ float64x1_t __ret; \ float64x1_t __s0 = __p0; \ float64x1_t __s1 = __p1; \ float64x1_t __s2 = __p2; \ __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ float32x2_t __ret; \ float32x2_t __s0 = __p0; \ float32x2_t __s1 = __p1; \ float32x2_t __s2 = __p2; \ __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \ __ret; \ }) #else #define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ float32x2_t __ret; \ float32x2_t __s0 = __p0; \ float32x2_t __s1 = __p1; \ float32x2_t __s2 = __p2; \ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 9); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ float32x2_t __ret; \ float32x2_t __s0 = __p0; \ float32x2_t __s1 = __p1; \ float32x2_t __s2 = __p2; \ __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ float64_t __ret; \ float64_t __s0 = __p0; \ float64_t __s1 = __p1; \ float64x2_t __s2 = __p2; \ __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__s2, __p3); \ __ret; \ }) #else #define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ float64_t __ret; \ float64_t __s0 = __p0; \ float64_t __s1 = __p1; \ float64x2_t __s2 = __p2; \ float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__rev2, __p3); \ __ret; \ }) #define __noswap_vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ float64_t __ret; \ float64_t __s0 = __p0; \ float64_t __s1 = __p1; \ float64x2_t __s2 = __p2; \ __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__s2, __p3); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ float32_t __ret; \ float32_t __s0 = __p0; \ float32_t __s1 = __p1; \ float32x4_t __s2 = __p2; \ __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__s2, __p3); \ __ret; \ }) #else #define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ float32_t __ret; \ float32_t __s0 = __p0; \ float32_t __s1 = __p1; \ float32x4_t __s2 = __p2; \ float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__rev2, __p3); \ __ret; \ }) #define __noswap_vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ float32_t __ret; \ float32_t __s0 = __p0; \ float32_t __s1 = __p1; \ float32x4_t __s2 = __p2; \ __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__s2, __p3); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ float64x2_t __ret; \ float64x2_t __s0 = __p0; \ float64x2_t __s1 = __p1; \ float64x2_t __s2 = __p2; \ __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \ __ret; \ }) #else #define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ float64x2_t __ret; \ float64x2_t __s0 = __p0; \ float64x2_t __s1 = __p1; \ float64x2_t __s2 = __p2; \ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 42); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ float64x2_t __ret; \ float64x2_t __s0 = __p0; \ float64x2_t __s1 = __p1; \ float64x2_t __s2 = __p2; \ __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ float32x4_t __ret; \ float32x4_t __s0 = __p0; \ float32x4_t __s1 = __p1; \ float32x4_t __s2 = __p2; \ __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \ __ret; \ }) #else #define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ float32x4_t __ret; \ float32x4_t __s0 = __p0; \ float32x4_t __s1 = __p1; \ float32x4_t __s2 = __p2; \ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 41); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #define __noswap_vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ float32x4_t __ret; \ float32x4_t __s0 = __p0; \ float32x4_t __s1 = __p1; \ float32x4_t __s2 = __p2; \ __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ float64x1_t __ret; \ float64x1_t __s0 = __p0; \ float64x1_t __s1 = __p1; \ float64x2_t __s2 = __p2; \ __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \ __ret; \ }) #else #define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ float64x1_t __ret; \ float64x1_t __s0 = __p0; \ float64x1_t __s1 = __p1; \ float64x2_t __s2 = __p2; \ float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__rev2, __p3, 10); \ __ret; \ }) #define __noswap_vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ float64x1_t __ret; \ float64x1_t __s0 = __p0; \ float64x1_t __s1 = __p1; \ float64x2_t __s2 = __p2; \ __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ float32x2_t __ret; \ float32x2_t __s0 = __p0; \ float32x2_t __s1 = __p1; \ float32x4_t __s2 = __p2; \ __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \ __ret; \ }) #else #define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ float32x2_t __ret; \ float32x2_t __s0 = __p0; \ float32x2_t __s1 = __p1; \ float32x4_t __s2 = __p2; \ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 9); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ float32x2_t __ret; \ float32x2_t __s0 = __p0; \ float32x2_t __s1 = __p1; \ float32x4_t __s2 = __p2; \ __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { float64x2_t __ret; __ret = vfmaq_f64(__p0, __p1, (float64x2_t) {__p2, __p2}); return __ret; } #else __ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __noswap_vfmaq_f64(__rev0, __rev1, (float64x2_t) {__p2, __p2}); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai float64x1_t vfma_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) { float64x1_t __ret; __ret = vfma_f64(__p0, __p1, (float64x1_t) {__p2}); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = vfmaq_f64(__p0, -__p1, __p2); return __ret; } #else __ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = __noswap_vfmaq_f64(__rev0, -__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai float64x1_t vfms_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { float64x1_t __ret; __ret = vfma_f64(__p0, -__p1, __p2); return __ret; } #define vfmsd_lane_f64(__p0_509, __p1_509, __p2_509, __p3_509) __extension__ ({ \ float64_t __ret_509; \ float64_t __s0_509 = __p0_509; \ float64_t __s1_509 = __p1_509; \ float64x1_t __s2_509 = __p2_509; \ __ret_509 = vfmad_lane_f64(__s0_509, -__s1_509, __s2_509, __p3_509); \ __ret_509; \ }) #ifdef __LITTLE_ENDIAN__ #define vfmss_lane_f32(__p0_510, __p1_510, __p2_510, __p3_510) __extension__ ({ \ float32_t __ret_510; \ float32_t __s0_510 = __p0_510; \ float32_t __s1_510 = __p1_510; \ float32x2_t __s2_510 = __p2_510; \ __ret_510 = vfmas_lane_f32(__s0_510, -__s1_510, __s2_510, __p3_510); \ __ret_510; \ }) #else #define vfmss_lane_f32(__p0_511, __p1_511, __p2_511, __p3_511) __extension__ ({ \ float32_t __ret_511; \ float32_t __s0_511 = __p0_511; \ float32_t __s1_511 = __p1_511; \ float32x2_t __s2_511 = __p2_511; \ float32x2_t __rev2_511; __rev2_511 = __builtin_shufflevector(__s2_511, __s2_511, 1, 0); \ __ret_511 = __noswap_vfmas_lane_f32(__s0_511, -__s1_511, __rev2_511, __p3_511); \ __ret_511; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmsq_lane_f64(__p0_512, __p1_512, __p2_512, __p3_512) __extension__ ({ \ float64x2_t __ret_512; \ float64x2_t __s0_512 = __p0_512; \ float64x2_t __s1_512 = __p1_512; \ float64x1_t __s2_512 = __p2_512; \ __ret_512 = vfmaq_lane_f64(__s0_512, -__s1_512, __s2_512, __p3_512); \ __ret_512; \ }) #else #define vfmsq_lane_f64(__p0_513, __p1_513, __p2_513, __p3_513) __extension__ ({ \ float64x2_t __ret_513; \ float64x2_t __s0_513 = __p0_513; \ float64x2_t __s1_513 = __p1_513; \ float64x1_t __s2_513 = __p2_513; \ float64x2_t __rev0_513; __rev0_513 = __builtin_shufflevector(__s0_513, __s0_513, 1, 0); \ float64x2_t __rev1_513; __rev1_513 = __builtin_shufflevector(__s1_513, __s1_513, 1, 0); \ __ret_513 = __noswap_vfmaq_lane_f64(__rev0_513, -__rev1_513, __s2_513, __p3_513); \ __ret_513 = __builtin_shufflevector(__ret_513, __ret_513, 1, 0); \ __ret_513; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmsq_lane_f32(__p0_514, __p1_514, __p2_514, __p3_514) __extension__ ({ \ float32x4_t __ret_514; \ float32x4_t __s0_514 = __p0_514; \ float32x4_t __s1_514 = __p1_514; \ float32x2_t __s2_514 = __p2_514; \ __ret_514 = vfmaq_lane_f32(__s0_514, -__s1_514, __s2_514, __p3_514); \ __ret_514; \ }) #else #define vfmsq_lane_f32(__p0_515, __p1_515, __p2_515, __p3_515) __extension__ ({ \ float32x4_t __ret_515; \ float32x4_t __s0_515 = __p0_515; \ float32x4_t __s1_515 = __p1_515; \ float32x2_t __s2_515 = __p2_515; \ float32x4_t __rev0_515; __rev0_515 = __builtin_shufflevector(__s0_515, __s0_515, 3, 2, 1, 0); \ float32x4_t __rev1_515; __rev1_515 = __builtin_shufflevector(__s1_515, __s1_515, 3, 2, 1, 0); \ float32x2_t __rev2_515; __rev2_515 = __builtin_shufflevector(__s2_515, __s2_515, 1, 0); \ __ret_515 = __noswap_vfmaq_lane_f32(__rev0_515, -__rev1_515, __rev2_515, __p3_515); \ __ret_515 = __builtin_shufflevector(__ret_515, __ret_515, 3, 2, 1, 0); \ __ret_515; \ }) #endif #define vfms_lane_f64(__p0_516, __p1_516, __p2_516, __p3_516) __extension__ ({ \ float64x1_t __ret_516; \ float64x1_t __s0_516 = __p0_516; \ float64x1_t __s1_516 = __p1_516; \ float64x1_t __s2_516 = __p2_516; \ __ret_516 = vfma_lane_f64(__s0_516, -__s1_516, __s2_516, __p3_516); \ __ret_516; \ }) #ifdef __LITTLE_ENDIAN__ #define vfms_lane_f32(__p0_517, __p1_517, __p2_517, __p3_517) __extension__ ({ \ float32x2_t __ret_517; \ float32x2_t __s0_517 = __p0_517; \ float32x2_t __s1_517 = __p1_517; \ float32x2_t __s2_517 = __p2_517; \ __ret_517 = vfma_lane_f32(__s0_517, -__s1_517, __s2_517, __p3_517); \ __ret_517; \ }) #else #define vfms_lane_f32(__p0_518, __p1_518, __p2_518, __p3_518) __extension__ ({ \ float32x2_t __ret_518; \ float32x2_t __s0_518 = __p0_518; \ float32x2_t __s1_518 = __p1_518; \ float32x2_t __s2_518 = __p2_518; \ float32x2_t __rev0_518; __rev0_518 = __builtin_shufflevector(__s0_518, __s0_518, 1, 0); \ float32x2_t __rev1_518; __rev1_518 = __builtin_shufflevector(__s1_518, __s1_518, 1, 0); \ float32x2_t __rev2_518; __rev2_518 = __builtin_shufflevector(__s2_518, __s2_518, 1, 0); \ __ret_518 = __noswap_vfma_lane_f32(__rev0_518, -__rev1_518, __rev2_518, __p3_518); \ __ret_518 = __builtin_shufflevector(__ret_518, __ret_518, 1, 0); \ __ret_518; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmsd_laneq_f64(__p0_519, __p1_519, __p2_519, __p3_519) __extension__ ({ \ float64_t __ret_519; \ float64_t __s0_519 = __p0_519; \ float64_t __s1_519 = __p1_519; \ float64x2_t __s2_519 = __p2_519; \ __ret_519 = vfmad_laneq_f64(__s0_519, -__s1_519, __s2_519, __p3_519); \ __ret_519; \ }) #else #define vfmsd_laneq_f64(__p0_520, __p1_520, __p2_520, __p3_520) __extension__ ({ \ float64_t __ret_520; \ float64_t __s0_520 = __p0_520; \ float64_t __s1_520 = __p1_520; \ float64x2_t __s2_520 = __p2_520; \ float64x2_t __rev2_520; __rev2_520 = __builtin_shufflevector(__s2_520, __s2_520, 1, 0); \ __ret_520 = __noswap_vfmad_laneq_f64(__s0_520, -__s1_520, __rev2_520, __p3_520); \ __ret_520; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmss_laneq_f32(__p0_521, __p1_521, __p2_521, __p3_521) __extension__ ({ \ float32_t __ret_521; \ float32_t __s0_521 = __p0_521; \ float32_t __s1_521 = __p1_521; \ float32x4_t __s2_521 = __p2_521; \ __ret_521 = vfmas_laneq_f32(__s0_521, -__s1_521, __s2_521, __p3_521); \ __ret_521; \ }) #else #define vfmss_laneq_f32(__p0_522, __p1_522, __p2_522, __p3_522) __extension__ ({ \ float32_t __ret_522; \ float32_t __s0_522 = __p0_522; \ float32_t __s1_522 = __p1_522; \ float32x4_t __s2_522 = __p2_522; \ float32x4_t __rev2_522; __rev2_522 = __builtin_shufflevector(__s2_522, __s2_522, 3, 2, 1, 0); \ __ret_522 = __noswap_vfmas_laneq_f32(__s0_522, -__s1_522, __rev2_522, __p3_522); \ __ret_522; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmsq_laneq_f64(__p0_523, __p1_523, __p2_523, __p3_523) __extension__ ({ \ float64x2_t __ret_523; \ float64x2_t __s0_523 = __p0_523; \ float64x2_t __s1_523 = __p1_523; \ float64x2_t __s2_523 = __p2_523; \ __ret_523 = vfmaq_laneq_f64(__s0_523, -__s1_523, __s2_523, __p3_523); \ __ret_523; \ }) #else #define vfmsq_laneq_f64(__p0_524, __p1_524, __p2_524, __p3_524) __extension__ ({ \ float64x2_t __ret_524; \ float64x2_t __s0_524 = __p0_524; \ float64x2_t __s1_524 = __p1_524; \ float64x2_t __s2_524 = __p2_524; \ float64x2_t __rev0_524; __rev0_524 = __builtin_shufflevector(__s0_524, __s0_524, 1, 0); \ float64x2_t __rev1_524; __rev1_524 = __builtin_shufflevector(__s1_524, __s1_524, 1, 0); \ float64x2_t __rev2_524; __rev2_524 = __builtin_shufflevector(__s2_524, __s2_524, 1, 0); \ __ret_524 = __noswap_vfmaq_laneq_f64(__rev0_524, -__rev1_524, __rev2_524, __p3_524); \ __ret_524 = __builtin_shufflevector(__ret_524, __ret_524, 1, 0); \ __ret_524; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmsq_laneq_f32(__p0_525, __p1_525, __p2_525, __p3_525) __extension__ ({ \ float32x4_t __ret_525; \ float32x4_t __s0_525 = __p0_525; \ float32x4_t __s1_525 = __p1_525; \ float32x4_t __s2_525 = __p2_525; \ __ret_525 = vfmaq_laneq_f32(__s0_525, -__s1_525, __s2_525, __p3_525); \ __ret_525; \ }) #else #define vfmsq_laneq_f32(__p0_526, __p1_526, __p2_526, __p3_526) __extension__ ({ \ float32x4_t __ret_526; \ float32x4_t __s0_526 = __p0_526; \ float32x4_t __s1_526 = __p1_526; \ float32x4_t __s2_526 = __p2_526; \ float32x4_t __rev0_526; __rev0_526 = __builtin_shufflevector(__s0_526, __s0_526, 3, 2, 1, 0); \ float32x4_t __rev1_526; __rev1_526 = __builtin_shufflevector(__s1_526, __s1_526, 3, 2, 1, 0); \ float32x4_t __rev2_526; __rev2_526 = __builtin_shufflevector(__s2_526, __s2_526, 3, 2, 1, 0); \ __ret_526 = __noswap_vfmaq_laneq_f32(__rev0_526, -__rev1_526, __rev2_526, __p3_526); \ __ret_526 = __builtin_shufflevector(__ret_526, __ret_526, 3, 2, 1, 0); \ __ret_526; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfms_laneq_f64(__p0_527, __p1_527, __p2_527, __p3_527) __extension__ ({ \ float64x1_t __ret_527; \ float64x1_t __s0_527 = __p0_527; \ float64x1_t __s1_527 = __p1_527; \ float64x2_t __s2_527 = __p2_527; \ __ret_527 = vfma_laneq_f64(__s0_527, -__s1_527, __s2_527, __p3_527); \ __ret_527; \ }) #else #define vfms_laneq_f64(__p0_528, __p1_528, __p2_528, __p3_528) __extension__ ({ \ float64x1_t __ret_528; \ float64x1_t __s0_528 = __p0_528; \ float64x1_t __s1_528 = __p1_528; \ float64x2_t __s2_528 = __p2_528; \ float64x2_t __rev2_528; __rev2_528 = __builtin_shufflevector(__s2_528, __s2_528, 1, 0); \ __ret_528 = __noswap_vfma_laneq_f64(__s0_528, -__s1_528, __rev2_528, __p3_528); \ __ret_528; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfms_laneq_f32(__p0_529, __p1_529, __p2_529, __p3_529) __extension__ ({ \ float32x2_t __ret_529; \ float32x2_t __s0_529 = __p0_529; \ float32x2_t __s1_529 = __p1_529; \ float32x4_t __s2_529 = __p2_529; \ __ret_529 = vfma_laneq_f32(__s0_529, -__s1_529, __s2_529, __p3_529); \ __ret_529; \ }) #else #define vfms_laneq_f32(__p0_530, __p1_530, __p2_530, __p3_530) __extension__ ({ \ float32x2_t __ret_530; \ float32x2_t __s0_530 = __p0_530; \ float32x2_t __s1_530 = __p1_530; \ float32x4_t __s2_530 = __p2_530; \ float32x2_t __rev0_530; __rev0_530 = __builtin_shufflevector(__s0_530, __s0_530, 1, 0); \ float32x2_t __rev1_530; __rev1_530 = __builtin_shufflevector(__s1_530, __s1_530, 1, 0); \ float32x4_t __rev2_530; __rev2_530 = __builtin_shufflevector(__s2_530, __s2_530, 3, 2, 1, 0); \ __ret_530 = __noswap_vfma_laneq_f32(__rev0_530, -__rev1_530, __rev2_530, __p3_530); \ __ret_530 = __builtin_shufflevector(__ret_530, __ret_530, 1, 0); \ __ret_530; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { float64x2_t __ret; __ret = vfmaq_f64(__p0, -__p1, (float64x2_t) {__p2, __p2}); return __ret; } #else __ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __noswap_vfmaq_f64(__rev0, -__rev1, (float64x2_t) {__p2, __p2}); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { float32x4_t __ret; __ret = vfmaq_f32(__p0, -__p1, (float32x4_t) {__p2, __p2, __p2, __p2}); return __ret; } #else __ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __noswap_vfmaq_f32(__rev0, -__rev1, (float32x4_t) {__p2, __p2, __p2, __p2}); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif __ai float64x1_t vfms_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) { float64x1_t __ret; __ret = vfma_f64(__p0, -__p1, (float64x1_t) {__p2}); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { float32x2_t __ret; __ret = vfma_f32(__p0, -__p1, (float32x2_t) {__p2, __p2}); return __ret; } #else __ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __noswap_vfma_f32(__rev0, -__rev1, (float32x2_t) {__p2, __p2}); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly64x1_t vget_high_p64(poly64x2_t __p0) { poly64x1_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1); return __ret; } #else __ai poly64x1_t vget_high_p64(poly64x2_t __p0) { poly64x1_t __ret; poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1); return __ret; } __ai poly64x1_t __noswap_vget_high_p64(poly64x2_t __p0) { poly64x1_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64x1_t vget_high_f64(float64x2_t __p0) { float64x1_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1); return __ret; } #else __ai float64x1_t vget_high_f64(float64x2_t __p0) { float64x1_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1); return __ret; } #endif #define vget_lane_p64(__p0, __p1) __extension__ ({ \ poly64_t __ret; \ poly64x1_t __s0 = __p0; \ __ret = (poly64_t) __builtin_neon_vget_lane_i64((poly64x1_t)__s0, __p1); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vgetq_lane_p64(__p0, __p1) __extension__ ({ \ poly64_t __ret; \ poly64x2_t __s0 = __p0; \ __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__s0, __p1); \ __ret; \ }) #else #define vgetq_lane_p64(__p0, __p1) __extension__ ({ \ poly64_t __ret; \ poly64x2_t __s0 = __p0; \ poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__rev0, __p1); \ __ret; \ }) #define __noswap_vgetq_lane_p64(__p0, __p1) __extension__ ({ \ poly64_t __ret; \ poly64x2_t __s0 = __p0; \ __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vgetq_lane_f64(__p0, __p1) __extension__ ({ \ float64_t __ret; \ float64x2_t __s0 = __p0; \ __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__s0, __p1); \ __ret; \ }) #else #define vgetq_lane_f64(__p0, __p1) __extension__ ({ \ float64_t __ret; \ float64x2_t __s0 = __p0; \ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__rev0, __p1); \ __ret; \ }) #define __noswap_vgetq_lane_f64(__p0, __p1) __extension__ ({ \ float64_t __ret; \ float64x2_t __s0 = __p0; \ __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__s0, __p1); \ __ret; \ }) #endif #define vget_lane_f64(__p0, __p1) __extension__ ({ \ float64_t __ret; \ float64x1_t __s0 = __p0; \ __ret = (float64_t) __builtin_neon_vget_lane_f64((float64x1_t)__s0, __p1); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ __ai poly64x1_t vget_low_p64(poly64x2_t __p0) { poly64x1_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 0); return __ret; } #else __ai poly64x1_t vget_low_p64(poly64x2_t __p0) { poly64x1_t __ret; poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64x1_t vget_low_f64(float64x2_t __p0) { float64x1_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 0); return __ret; } #else __ai float64x1_t vget_low_f64(float64x2_t __p0) { float64x1_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 0); return __ret; } #endif #define vld1_p64(__p0) __extension__ ({ \ poly64x1_t __ret; \ __ret = (poly64x1_t) __builtin_neon_vld1_v(__p0, 6); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld1q_p64(__p0) __extension__ ({ \ poly64x2_t __ret; \ __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \ __ret; \ }) #else #define vld1q_p64(__p0) __extension__ ({ \ poly64x2_t __ret; \ __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_f64(__p0) __extension__ ({ \ float64x2_t __ret; \ __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \ __ret; \ }) #else #define vld1q_f64(__p0) __extension__ ({ \ float64x2_t __ret; \ __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #define vld1_f64(__p0) __extension__ ({ \ float64x1_t __ret; \ __ret = (float64x1_t) __builtin_neon_vld1_v(__p0, 10); \ __ret; \ }) #define vld1_dup_p64(__p0) __extension__ ({ \ poly64x1_t __ret; \ __ret = (poly64x1_t) __builtin_neon_vld1_dup_v(__p0, 6); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld1q_dup_p64(__p0) __extension__ ({ \ poly64x2_t __ret; \ __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \ __ret; \ }) #else #define vld1q_dup_p64(__p0) __extension__ ({ \ poly64x2_t __ret; \ __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_dup_f64(__p0) __extension__ ({ \ float64x2_t __ret; \ __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \ __ret; \ }) #else #define vld1q_dup_f64(__p0) __extension__ ({ \ float64x2_t __ret; \ __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #define vld1_dup_f64(__p0) __extension__ ({ \ float64x1_t __ret; \ __ret = (float64x1_t) __builtin_neon_vld1_dup_v(__p0, 10); \ __ret; \ }) #define vld1_lane_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x1_t __ret; \ poly64x1_t __s1 = __p1; \ __ret = (poly64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x2_t __ret; \ poly64x2_t __s1 = __p1; \ __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \ __ret; \ }) #else #define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x2_t __ret; \ poly64x2_t __s1 = __p1; \ poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ float64x2_t __ret; \ float64x2_t __s1 = __p1; \ __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \ __ret; \ }) #else #define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ float64x2_t __ret; \ float64x2_t __s1 = __p1; \ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #define vld1_lane_f64(__p0, __p1, __p2) __extension__ ({ \ float64x1_t __ret; \ float64x1_t __s1 = __p1; \ __ret = (float64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \ __ret; \ }) #define vld1_p64_x2(__p0) __extension__ ({ \ poly64x1x2_t __ret; \ __builtin_neon_vld1_x2_v(&__ret, __p0, 6); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld1q_p64_x2(__p0) __extension__ ({ \ poly64x2x2_t __ret; \ __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \ __ret; \ }) #else #define vld1q_p64_x2(__p0) __extension__ ({ \ poly64x2x2_t __ret; \ __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_f64_x2(__p0) __extension__ ({ \ float64x2x2_t __ret; \ __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \ __ret; \ }) #else #define vld1q_f64_x2(__p0) __extension__ ({ \ float64x2x2_t __ret; \ __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret; \ }) #endif #define vld1_f64_x2(__p0) __extension__ ({ \ float64x1x2_t __ret; \ __builtin_neon_vld1_x2_v(&__ret, __p0, 10); \ __ret; \ }) #define vld1_p64_x3(__p0) __extension__ ({ \ poly64x1x3_t __ret; \ __builtin_neon_vld1_x3_v(&__ret, __p0, 6); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld1q_p64_x3(__p0) __extension__ ({ \ poly64x2x3_t __ret; \ __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \ __ret; \ }) #else #define vld1q_p64_x3(__p0) __extension__ ({ \ poly64x2x3_t __ret; \ __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_f64_x3(__p0) __extension__ ({ \ float64x2x3_t __ret; \ __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \ __ret; \ }) #else #define vld1q_f64_x3(__p0) __extension__ ({ \ float64x2x3_t __ret; \ __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret; \ }) #endif #define vld1_f64_x3(__p0) __extension__ ({ \ float64x1x3_t __ret; \ __builtin_neon_vld1_x3_v(&__ret, __p0, 10); \ __ret; \ }) #define vld1_p64_x4(__p0) __extension__ ({ \ poly64x1x4_t __ret; \ __builtin_neon_vld1_x4_v(&__ret, __p0, 6); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld1q_p64_x4(__p0) __extension__ ({ \ poly64x2x4_t __ret; \ __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \ __ret; \ }) #else #define vld1q_p64_x4(__p0) __extension__ ({ \ poly64x2x4_t __ret; \ __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld1q_f64_x4(__p0) __extension__ ({ \ float64x2x4_t __ret; \ __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \ __ret; \ }) #else #define vld1q_f64_x4(__p0) __extension__ ({ \ float64x2x4_t __ret; \ __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif #define vld1_f64_x4(__p0) __extension__ ({ \ float64x1x4_t __ret; \ __builtin_neon_vld1_x4_v(&__ret, __p0, 10); \ __ret; \ }) #define vld2_p64(__p0) __extension__ ({ \ poly64x1x2_t __ret; \ __builtin_neon_vld2_v(&__ret, __p0, 6); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld2q_p64(__p0) __extension__ ({ \ poly64x2x2_t __ret; \ __builtin_neon_vld2q_v(&__ret, __p0, 38); \ __ret; \ }) #else #define vld2q_p64(__p0) __extension__ ({ \ poly64x2x2_t __ret; \ __builtin_neon_vld2q_v(&__ret, __p0, 38); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_u64(__p0) __extension__ ({ \ uint64x2x2_t __ret; \ __builtin_neon_vld2q_v(&__ret, __p0, 51); \ __ret; \ }) #else #define vld2q_u64(__p0) __extension__ ({ \ uint64x2x2_t __ret; \ __builtin_neon_vld2q_v(&__ret, __p0, 51); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_f64(__p0) __extension__ ({ \ float64x2x2_t __ret; \ __builtin_neon_vld2q_v(&__ret, __p0, 42); \ __ret; \ }) #else #define vld2q_f64(__p0) __extension__ ({ \ float64x2x2_t __ret; \ __builtin_neon_vld2q_v(&__ret, __p0, 42); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_s64(__p0) __extension__ ({ \ int64x2x2_t __ret; \ __builtin_neon_vld2q_v(&__ret, __p0, 35); \ __ret; \ }) #else #define vld2q_s64(__p0) __extension__ ({ \ int64x2x2_t __ret; \ __builtin_neon_vld2q_v(&__ret, __p0, 35); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret; \ }) #endif #define vld2_f64(__p0) __extension__ ({ \ float64x1x2_t __ret; \ __builtin_neon_vld2_v(&__ret, __p0, 10); \ __ret; \ }) #define vld2_dup_p64(__p0) __extension__ ({ \ poly64x1x2_t __ret; \ __builtin_neon_vld2_dup_v(&__ret, __p0, 6); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld2q_dup_p64(__p0) __extension__ ({ \ poly64x2x2_t __ret; \ __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \ __ret; \ }) #else #define vld2q_dup_p64(__p0) __extension__ ({ \ poly64x2x2_t __ret; \ __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_dup_f64(__p0) __extension__ ({ \ float64x2x2_t __ret; \ __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \ __ret; \ }) #else #define vld2q_dup_f64(__p0) __extension__ ({ \ float64x2x2_t __ret; \ __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret; \ }) #endif #define vld2_dup_f64(__p0) __extension__ ({ \ float64x1x2_t __ret; \ __builtin_neon_vld2_dup_v(&__ret, __p0, 10); \ __ret; \ }) #define vld2_lane_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x1x2_t __ret; \ poly64x1x2_t __s1 = __p1; \ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x16x2_t __ret; \ poly8x16x2_t __s1 = __p1; \ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \ __ret; \ }) #else #define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x16x2_t __ret; \ poly8x16x2_t __s1 = __p1; \ poly8x16x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x2x2_t __ret; \ poly64x2x2_t __s1 = __p1; \ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \ __ret; \ }) #else #define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x2x2_t __ret; \ poly64x2x2_t __s1 = __p1; \ poly64x2x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x16x2_t __ret; \ uint8x16x2_t __s1 = __p1; \ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \ __ret; \ }) #else #define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x16x2_t __ret; \ uint8x16x2_t __s1 = __p1; \ uint8x16x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2x2_t __ret; \ uint64x2x2_t __s1 = __p1; \ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \ __ret; \ }) #else #define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2x2_t __ret; \ uint64x2x2_t __s1 = __p1; \ uint64x2x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x16x2_t __ret; \ int8x16x2_t __s1 = __p1; \ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \ __ret; \ }) #else #define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x16x2_t __ret; \ int8x16x2_t __s1 = __p1; \ int8x16x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ float64x2x2_t __ret; \ float64x2x2_t __s1 = __p1; \ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 42); \ __ret; \ }) #else #define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ float64x2x2_t __ret; \ float64x2x2_t __s1 = __p1; \ float64x2x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 42); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ int64x2x2_t __ret; \ int64x2x2_t __s1 = __p1; \ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 35); \ __ret; \ }) #else #define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ int64x2x2_t __ret; \ int64x2x2_t __s1 = __p1; \ int64x2x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 35); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret; \ }) #endif #define vld2_lane_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x1x2_t __ret; \ uint64x1x2_t __s1 = __p1; \ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \ __ret; \ }) #define vld2_lane_f64(__p0, __p1, __p2) __extension__ ({ \ float64x1x2_t __ret; \ float64x1x2_t __s1 = __p1; \ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 10); \ __ret; \ }) #define vld2_lane_s64(__p0, __p1, __p2) __extension__ ({ \ int64x1x2_t __ret; \ int64x1x2_t __s1 = __p1; \ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 3); \ __ret; \ }) #define vld3_p64(__p0) __extension__ ({ \ poly64x1x3_t __ret; \ __builtin_neon_vld3_v(&__ret, __p0, 6); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld3q_p64(__p0) __extension__ ({ \ poly64x2x3_t __ret; \ __builtin_neon_vld3q_v(&__ret, __p0, 38); \ __ret; \ }) #else #define vld3q_p64(__p0) __extension__ ({ \ poly64x2x3_t __ret; \ __builtin_neon_vld3q_v(&__ret, __p0, 38); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_u64(__p0) __extension__ ({ \ uint64x2x3_t __ret; \ __builtin_neon_vld3q_v(&__ret, __p0, 51); \ __ret; \ }) #else #define vld3q_u64(__p0) __extension__ ({ \ uint64x2x3_t __ret; \ __builtin_neon_vld3q_v(&__ret, __p0, 51); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_f64(__p0) __extension__ ({ \ float64x2x3_t __ret; \ __builtin_neon_vld3q_v(&__ret, __p0, 42); \ __ret; \ }) #else #define vld3q_f64(__p0) __extension__ ({ \ float64x2x3_t __ret; \ __builtin_neon_vld3q_v(&__ret, __p0, 42); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_s64(__p0) __extension__ ({ \ int64x2x3_t __ret; \ __builtin_neon_vld3q_v(&__ret, __p0, 35); \ __ret; \ }) #else #define vld3q_s64(__p0) __extension__ ({ \ int64x2x3_t __ret; \ __builtin_neon_vld3q_v(&__ret, __p0, 35); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret; \ }) #endif #define vld3_f64(__p0) __extension__ ({ \ float64x1x3_t __ret; \ __builtin_neon_vld3_v(&__ret, __p0, 10); \ __ret; \ }) #define vld3_dup_p64(__p0) __extension__ ({ \ poly64x1x3_t __ret; \ __builtin_neon_vld3_dup_v(&__ret, __p0, 6); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld3q_dup_p64(__p0) __extension__ ({ \ poly64x2x3_t __ret; \ __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \ __ret; \ }) #else #define vld3q_dup_p64(__p0) __extension__ ({ \ poly64x2x3_t __ret; \ __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_dup_f64(__p0) __extension__ ({ \ float64x2x3_t __ret; \ __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \ __ret; \ }) #else #define vld3q_dup_f64(__p0) __extension__ ({ \ float64x2x3_t __ret; \ __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret; \ }) #endif #define vld3_dup_f64(__p0) __extension__ ({ \ float64x1x3_t __ret; \ __builtin_neon_vld3_dup_v(&__ret, __p0, 10); \ __ret; \ }) #define vld3_lane_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x1x3_t __ret; \ poly64x1x3_t __s1 = __p1; \ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x16x3_t __ret; \ poly8x16x3_t __s1 = __p1; \ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \ __ret; \ }) #else #define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x16x3_t __ret; \ poly8x16x3_t __s1 = __p1; \ poly8x16x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x2x3_t __ret; \ poly64x2x3_t __s1 = __p1; \ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \ __ret; \ }) #else #define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x2x3_t __ret; \ poly64x2x3_t __s1 = __p1; \ poly64x2x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x16x3_t __ret; \ uint8x16x3_t __s1 = __p1; \ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \ __ret; \ }) #else #define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x16x3_t __ret; \ uint8x16x3_t __s1 = __p1; \ uint8x16x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2x3_t __ret; \ uint64x2x3_t __s1 = __p1; \ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \ __ret; \ }) #else #define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2x3_t __ret; \ uint64x2x3_t __s1 = __p1; \ uint64x2x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x16x3_t __ret; \ int8x16x3_t __s1 = __p1; \ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \ __ret; \ }) #else #define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x16x3_t __ret; \ int8x16x3_t __s1 = __p1; \ int8x16x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ float64x2x3_t __ret; \ float64x2x3_t __s1 = __p1; \ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 42); \ __ret; \ }) #else #define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ float64x2x3_t __ret; \ float64x2x3_t __s1 = __p1; \ float64x2x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 42); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ int64x2x3_t __ret; \ int64x2x3_t __s1 = __p1; \ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 35); \ __ret; \ }) #else #define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ int64x2x3_t __ret; \ int64x2x3_t __s1 = __p1; \ int64x2x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 35); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret; \ }) #endif #define vld3_lane_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x1x3_t __ret; \ uint64x1x3_t __s1 = __p1; \ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \ __ret; \ }) #define vld3_lane_f64(__p0, __p1, __p2) __extension__ ({ \ float64x1x3_t __ret; \ float64x1x3_t __s1 = __p1; \ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 10); \ __ret; \ }) #define vld3_lane_s64(__p0, __p1, __p2) __extension__ ({ \ int64x1x3_t __ret; \ int64x1x3_t __s1 = __p1; \ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 3); \ __ret; \ }) #define vld4_p64(__p0) __extension__ ({ \ poly64x1x4_t __ret; \ __builtin_neon_vld4_v(&__ret, __p0, 6); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld4q_p64(__p0) __extension__ ({ \ poly64x2x4_t __ret; \ __builtin_neon_vld4q_v(&__ret, __p0, 38); \ __ret; \ }) #else #define vld4q_p64(__p0) __extension__ ({ \ poly64x2x4_t __ret; \ __builtin_neon_vld4q_v(&__ret, __p0, 38); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_u64(__p0) __extension__ ({ \ uint64x2x4_t __ret; \ __builtin_neon_vld4q_v(&__ret, __p0, 51); \ __ret; \ }) #else #define vld4q_u64(__p0) __extension__ ({ \ uint64x2x4_t __ret; \ __builtin_neon_vld4q_v(&__ret, __p0, 51); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_f64(__p0) __extension__ ({ \ float64x2x4_t __ret; \ __builtin_neon_vld4q_v(&__ret, __p0, 42); \ __ret; \ }) #else #define vld4q_f64(__p0) __extension__ ({ \ float64x2x4_t __ret; \ __builtin_neon_vld4q_v(&__ret, __p0, 42); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_s64(__p0) __extension__ ({ \ int64x2x4_t __ret; \ __builtin_neon_vld4q_v(&__ret, __p0, 35); \ __ret; \ }) #else #define vld4q_s64(__p0) __extension__ ({ \ int64x2x4_t __ret; \ __builtin_neon_vld4q_v(&__ret, __p0, 35); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif #define vld4_f64(__p0) __extension__ ({ \ float64x1x4_t __ret; \ __builtin_neon_vld4_v(&__ret, __p0, 10); \ __ret; \ }) #define vld4_dup_p64(__p0) __extension__ ({ \ poly64x1x4_t __ret; \ __builtin_neon_vld4_dup_v(&__ret, __p0, 6); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld4q_dup_p64(__p0) __extension__ ({ \ poly64x2x4_t __ret; \ __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \ __ret; \ }) #else #define vld4q_dup_p64(__p0) __extension__ ({ \ poly64x2x4_t __ret; \ __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_dup_f64(__p0) __extension__ ({ \ float64x2x4_t __ret; \ __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \ __ret; \ }) #else #define vld4q_dup_f64(__p0) __extension__ ({ \ float64x2x4_t __ret; \ __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif #define vld4_dup_f64(__p0) __extension__ ({ \ float64x1x4_t __ret; \ __builtin_neon_vld4_dup_v(&__ret, __p0, 10); \ __ret; \ }) #define vld4_lane_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x1x4_t __ret; \ poly64x1x4_t __s1 = __p1; \ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x16x4_t __ret; \ poly8x16x4_t __s1 = __p1; \ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \ __ret; \ }) #else #define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x16x4_t __ret; \ poly8x16x4_t __s1 = __p1; \ poly8x16x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x2x4_t __ret; \ poly64x2x4_t __s1 = __p1; \ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \ __ret; \ }) #else #define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x2x4_t __ret; \ poly64x2x4_t __s1 = __p1; \ poly64x2x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x16x4_t __ret; \ uint8x16x4_t __s1 = __p1; \ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \ __ret; \ }) #else #define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x16x4_t __ret; \ uint8x16x4_t __s1 = __p1; \ uint8x16x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2x4_t __ret; \ uint64x2x4_t __s1 = __p1; \ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \ __ret; \ }) #else #define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2x4_t __ret; \ uint64x2x4_t __s1 = __p1; \ uint64x2x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x16x4_t __ret; \ int8x16x4_t __s1 = __p1; \ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \ __ret; \ }) #else #define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x16x4_t __ret; \ int8x16x4_t __s1 = __p1; \ int8x16x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ float64x2x4_t __ret; \ float64x2x4_t __s1 = __p1; \ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 42); \ __ret; \ }) #else #define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ float64x2x4_t __ret; \ float64x2x4_t __s1 = __p1; \ float64x2x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 42); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ int64x2x4_t __ret; \ int64x2x4_t __s1 = __p1; \ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 35); \ __ret; \ }) #else #define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ int64x2x4_t __ret; \ int64x2x4_t __s1 = __p1; \ int64x2x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 35); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif #define vld4_lane_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x1x4_t __ret; \ uint64x1x4_t __s1 = __p1; \ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \ __ret; \ }) #define vld4_lane_f64(__p0, __p1, __p2) __extension__ ({ \ float64x1x4_t __ret; \ float64x1x4_t __s1 = __p1; \ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 10); \ __ret; \ }) #define vld4_lane_s64(__p0, __p1, __p2) __extension__ ({ \ int64x1x4_t __ret; \ int64x1x4_t __s1 = __p1; \ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 3); \ __ret; \ }) #define vldrq_p128(__p0) __extension__ ({ \ poly128_t __ret; \ __ret = (poly128_t) __builtin_neon_vldrq_p128(__p0); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #else __ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai float64x1_t vmax_f64(float64x1_t __p0, float64x1_t __p1) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 10); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai float64_t vmaxnmvq_f64(float64x2_t __p0) { float64_t __ret; __ret = (float64_t) __builtin_neon_vmaxnmvq_f64(__p0); return __ret; } #else __ai float64_t vmaxnmvq_f64(float64x2_t __p0) { float64_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64_t) __builtin_neon_vmaxnmvq_f64(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32_t vmaxnmvq_f32(float32x4_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vmaxnmvq_f32(__p0); return __ret; } #else __ai float32_t vmaxnmvq_f32(float32x4_t __p0) { float32_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32_t) __builtin_neon_vmaxnmvq_f32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32_t vmaxnmv_f32(float32x2_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vmaxnmv_f32(__p0); return __ret; } #else __ai float32_t vmaxnmv_f32(float32x2_t __p0) { float32_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32_t) __builtin_neon_vmaxnmv_f32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8_t vmaxvq_u8(uint8x16_t __p0) { uint8_t __ret; __ret = (uint8_t) __builtin_neon_vmaxvq_u8(__p0); return __ret; } #else __ai uint8_t vmaxvq_u8(uint8x16_t __p0) { uint8_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8_t) __builtin_neon_vmaxvq_u8(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32_t vmaxvq_u32(uint32x4_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vmaxvq_u32(__p0); return __ret; } #else __ai uint32_t vmaxvq_u32(uint32x4_t __p0) { uint32_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32_t) __builtin_neon_vmaxvq_u32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16_t vmaxvq_u16(uint16x8_t __p0) { uint16_t __ret; __ret = (uint16_t) __builtin_neon_vmaxvq_u16(__p0); return __ret; } #else __ai uint16_t vmaxvq_u16(uint16x8_t __p0) { uint16_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16_t) __builtin_neon_vmaxvq_u16(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8_t vmaxvq_s8(int8x16_t __p0) { int8_t __ret; __ret = (int8_t) __builtin_neon_vmaxvq_s8(__p0); return __ret; } #else __ai int8_t vmaxvq_s8(int8x16_t __p0) { int8_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8_t) __builtin_neon_vmaxvq_s8(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64_t vmaxvq_f64(float64x2_t __p0) { float64_t __ret; __ret = (float64_t) __builtin_neon_vmaxvq_f64(__p0); return __ret; } #else __ai float64_t vmaxvq_f64(float64x2_t __p0) { float64_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64_t) __builtin_neon_vmaxvq_f64(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32_t vmaxvq_f32(float32x4_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vmaxvq_f32(__p0); return __ret; } #else __ai float32_t vmaxvq_f32(float32x4_t __p0) { float32_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32_t) __builtin_neon_vmaxvq_f32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32_t vmaxvq_s32(int32x4_t __p0) { int32_t __ret; __ret = (int32_t) __builtin_neon_vmaxvq_s32(__p0); return __ret; } #else __ai int32_t vmaxvq_s32(int32x4_t __p0) { int32_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int32_t) __builtin_neon_vmaxvq_s32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16_t vmaxvq_s16(int16x8_t __p0) { int16_t __ret; __ret = (int16_t) __builtin_neon_vmaxvq_s16(__p0); return __ret; } #else __ai int16_t vmaxvq_s16(int16x8_t __p0) { int16_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16_t) __builtin_neon_vmaxvq_s16(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8_t vmaxv_u8(uint8x8_t __p0) { uint8_t __ret; __ret = (uint8_t) __builtin_neon_vmaxv_u8(__p0); return __ret; } #else __ai uint8_t vmaxv_u8(uint8x8_t __p0) { uint8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8_t) __builtin_neon_vmaxv_u8(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32_t vmaxv_u32(uint32x2_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vmaxv_u32(__p0); return __ret; } #else __ai uint32_t vmaxv_u32(uint32x2_t __p0) { uint32_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32_t) __builtin_neon_vmaxv_u32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16_t vmaxv_u16(uint16x4_t __p0) { uint16_t __ret; __ret = (uint16_t) __builtin_neon_vmaxv_u16(__p0); return __ret; } #else __ai uint16_t vmaxv_u16(uint16x4_t __p0) { uint16_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16_t) __builtin_neon_vmaxv_u16(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8_t vmaxv_s8(int8x8_t __p0) { int8_t __ret; __ret = (int8_t) __builtin_neon_vmaxv_s8(__p0); return __ret; } #else __ai int8_t vmaxv_s8(int8x8_t __p0) { int8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8_t) __builtin_neon_vmaxv_s8(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32_t vmaxv_f32(float32x2_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vmaxv_f32(__p0); return __ret; } #else __ai float32_t vmaxv_f32(float32x2_t __p0) { float32_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32_t) __builtin_neon_vmaxv_f32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32_t vmaxv_s32(int32x2_t __p0) { int32_t __ret; __ret = (int32_t) __builtin_neon_vmaxv_s32(__p0); return __ret; } #else __ai int32_t vmaxv_s32(int32x2_t __p0) { int32_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int32_t) __builtin_neon_vmaxv_s32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16_t vmaxv_s16(int16x4_t __p0) { int16_t __ret; __ret = (int16_t) __builtin_neon_vmaxv_s16(__p0); return __ret; } #else __ai int16_t vmaxv_s16(int16x4_t __p0) { int16_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int16_t) __builtin_neon_vmaxv_s16(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #else __ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai float64x1_t vmin_f64(float64x1_t __p0, float64x1_t __p1) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 10); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai float64_t vminnmvq_f64(float64x2_t __p0) { float64_t __ret; __ret = (float64_t) __builtin_neon_vminnmvq_f64(__p0); return __ret; } #else __ai float64_t vminnmvq_f64(float64x2_t __p0) { float64_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64_t) __builtin_neon_vminnmvq_f64(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32_t vminnmvq_f32(float32x4_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vminnmvq_f32(__p0); return __ret; } #else __ai float32_t vminnmvq_f32(float32x4_t __p0) { float32_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32_t) __builtin_neon_vminnmvq_f32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32_t vminnmv_f32(float32x2_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vminnmv_f32(__p0); return __ret; } #else __ai float32_t vminnmv_f32(float32x2_t __p0) { float32_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32_t) __builtin_neon_vminnmv_f32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8_t vminvq_u8(uint8x16_t __p0) { uint8_t __ret; __ret = (uint8_t) __builtin_neon_vminvq_u8(__p0); return __ret; } #else __ai uint8_t vminvq_u8(uint8x16_t __p0) { uint8_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8_t) __builtin_neon_vminvq_u8(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32_t vminvq_u32(uint32x4_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vminvq_u32(__p0); return __ret; } #else __ai uint32_t vminvq_u32(uint32x4_t __p0) { uint32_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32_t) __builtin_neon_vminvq_u32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16_t vminvq_u16(uint16x8_t __p0) { uint16_t __ret; __ret = (uint16_t) __builtin_neon_vminvq_u16(__p0); return __ret; } #else __ai uint16_t vminvq_u16(uint16x8_t __p0) { uint16_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16_t) __builtin_neon_vminvq_u16(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8_t vminvq_s8(int8x16_t __p0) { int8_t __ret; __ret = (int8_t) __builtin_neon_vminvq_s8(__p0); return __ret; } #else __ai int8_t vminvq_s8(int8x16_t __p0) { int8_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8_t) __builtin_neon_vminvq_s8(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64_t vminvq_f64(float64x2_t __p0) { float64_t __ret; __ret = (float64_t) __builtin_neon_vminvq_f64(__p0); return __ret; } #else __ai float64_t vminvq_f64(float64x2_t __p0) { float64_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64_t) __builtin_neon_vminvq_f64(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32_t vminvq_f32(float32x4_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vminvq_f32(__p0); return __ret; } #else __ai float32_t vminvq_f32(float32x4_t __p0) { float32_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32_t) __builtin_neon_vminvq_f32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32_t vminvq_s32(int32x4_t __p0) { int32_t __ret; __ret = (int32_t) __builtin_neon_vminvq_s32(__p0); return __ret; } #else __ai int32_t vminvq_s32(int32x4_t __p0) { int32_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int32_t) __builtin_neon_vminvq_s32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16_t vminvq_s16(int16x8_t __p0) { int16_t __ret; __ret = (int16_t) __builtin_neon_vminvq_s16(__p0); return __ret; } #else __ai int16_t vminvq_s16(int16x8_t __p0) { int16_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16_t) __builtin_neon_vminvq_s16(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8_t vminv_u8(uint8x8_t __p0) { uint8_t __ret; __ret = (uint8_t) __builtin_neon_vminv_u8(__p0); return __ret; } #else __ai uint8_t vminv_u8(uint8x8_t __p0) { uint8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8_t) __builtin_neon_vminv_u8(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32_t vminv_u32(uint32x2_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vminv_u32(__p0); return __ret; } #else __ai uint32_t vminv_u32(uint32x2_t __p0) { uint32_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32_t) __builtin_neon_vminv_u32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16_t vminv_u16(uint16x4_t __p0) { uint16_t __ret; __ret = (uint16_t) __builtin_neon_vminv_u16(__p0); return __ret; } #else __ai uint16_t vminv_u16(uint16x4_t __p0) { uint16_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16_t) __builtin_neon_vminv_u16(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8_t vminv_s8(int8x8_t __p0) { int8_t __ret; __ret = (int8_t) __builtin_neon_vminv_s8(__p0); return __ret; } #else __ai int8_t vminv_s8(int8x8_t __p0) { int8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8_t) __builtin_neon_vminv_s8(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32_t vminv_f32(float32x2_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vminv_f32(__p0); return __ret; } #else __ai float32_t vminv_f32(float32x2_t __p0) { float32_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32_t) __builtin_neon_vminv_f32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32_t vminv_s32(int32x2_t __p0) { int32_t __ret; __ret = (int32_t) __builtin_neon_vminv_s32(__p0); return __ret; } #else __ai int32_t vminv_s32(int32x2_t __p0) { int32_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int32_t) __builtin_neon_vminv_s32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16_t vminv_s16(int16x4_t __p0) { int16_t __ret; __ret = (int16_t) __builtin_neon_vminv_s16(__p0); return __ret; } #else __ai int16_t vminv_s16(int16x4_t __p0) { int16_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int16_t) __builtin_neon_vminv_s16(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = __p0 + __p1 * __p2; return __ret; } #else __ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = __rev0 + __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai float64x1_t vmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { float64x1_t __ret; __ret = __p0 + __p1 * __p2; return __ret; } #ifdef __LITTLE_ENDIAN__ #define vmlaq_laneq_u32(__p0_531, __p1_531, __p2_531, __p3_531) __extension__ ({ \ uint32x4_t __ret_531; \ uint32x4_t __s0_531 = __p0_531; \ uint32x4_t __s1_531 = __p1_531; \ uint32x4_t __s2_531 = __p2_531; \ __ret_531 = __s0_531 + __s1_531 * splatq_laneq_u32(__s2_531, __p3_531); \ __ret_531; \ }) #else #define vmlaq_laneq_u32(__p0_532, __p1_532, __p2_532, __p3_532) __extension__ ({ \ uint32x4_t __ret_532; \ uint32x4_t __s0_532 = __p0_532; \ uint32x4_t __s1_532 = __p1_532; \ uint32x4_t __s2_532 = __p2_532; \ uint32x4_t __rev0_532; __rev0_532 = __builtin_shufflevector(__s0_532, __s0_532, 3, 2, 1, 0); \ uint32x4_t __rev1_532; __rev1_532 = __builtin_shufflevector(__s1_532, __s1_532, 3, 2, 1, 0); \ uint32x4_t __rev2_532; __rev2_532 = __builtin_shufflevector(__s2_532, __s2_532, 3, 2, 1, 0); \ __ret_532 = __rev0_532 + __rev1_532 * __noswap_splatq_laneq_u32(__rev2_532, __p3_532); \ __ret_532 = __builtin_shufflevector(__ret_532, __ret_532, 3, 2, 1, 0); \ __ret_532; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlaq_laneq_u16(__p0_533, __p1_533, __p2_533, __p3_533) __extension__ ({ \ uint16x8_t __ret_533; \ uint16x8_t __s0_533 = __p0_533; \ uint16x8_t __s1_533 = __p1_533; \ uint16x8_t __s2_533 = __p2_533; \ __ret_533 = __s0_533 + __s1_533 * splatq_laneq_u16(__s2_533, __p3_533); \ __ret_533; \ }) #else #define vmlaq_laneq_u16(__p0_534, __p1_534, __p2_534, __p3_534) __extension__ ({ \ uint16x8_t __ret_534; \ uint16x8_t __s0_534 = __p0_534; \ uint16x8_t __s1_534 = __p1_534; \ uint16x8_t __s2_534 = __p2_534; \ uint16x8_t __rev0_534; __rev0_534 = __builtin_shufflevector(__s0_534, __s0_534, 7, 6, 5, 4, 3, 2, 1, 0); \ uint16x8_t __rev1_534; __rev1_534 = __builtin_shufflevector(__s1_534, __s1_534, 7, 6, 5, 4, 3, 2, 1, 0); \ uint16x8_t __rev2_534; __rev2_534 = __builtin_shufflevector(__s2_534, __s2_534, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_534 = __rev0_534 + __rev1_534 * __noswap_splatq_laneq_u16(__rev2_534, __p3_534); \ __ret_534 = __builtin_shufflevector(__ret_534, __ret_534, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_534; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlaq_laneq_f32(__p0_535, __p1_535, __p2_535, __p3_535) __extension__ ({ \ float32x4_t __ret_535; \ float32x4_t __s0_535 = __p0_535; \ float32x4_t __s1_535 = __p1_535; \ float32x4_t __s2_535 = __p2_535; \ __ret_535 = __s0_535 + __s1_535 * splatq_laneq_f32(__s2_535, __p3_535); \ __ret_535; \ }) #else #define vmlaq_laneq_f32(__p0_536, __p1_536, __p2_536, __p3_536) __extension__ ({ \ float32x4_t __ret_536; \ float32x4_t __s0_536 = __p0_536; \ float32x4_t __s1_536 = __p1_536; \ float32x4_t __s2_536 = __p2_536; \ float32x4_t __rev0_536; __rev0_536 = __builtin_shufflevector(__s0_536, __s0_536, 3, 2, 1, 0); \ float32x4_t __rev1_536; __rev1_536 = __builtin_shufflevector(__s1_536, __s1_536, 3, 2, 1, 0); \ float32x4_t __rev2_536; __rev2_536 = __builtin_shufflevector(__s2_536, __s2_536, 3, 2, 1, 0); \ __ret_536 = __rev0_536 + __rev1_536 * __noswap_splatq_laneq_f32(__rev2_536, __p3_536); \ __ret_536 = __builtin_shufflevector(__ret_536, __ret_536, 3, 2, 1, 0); \ __ret_536; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlaq_laneq_s32(__p0_537, __p1_537, __p2_537, __p3_537) __extension__ ({ \ int32x4_t __ret_537; \ int32x4_t __s0_537 = __p0_537; \ int32x4_t __s1_537 = __p1_537; \ int32x4_t __s2_537 = __p2_537; \ __ret_537 = __s0_537 + __s1_537 * splatq_laneq_s32(__s2_537, __p3_537); \ __ret_537; \ }) #else #define vmlaq_laneq_s32(__p0_538, __p1_538, __p2_538, __p3_538) __extension__ ({ \ int32x4_t __ret_538; \ int32x4_t __s0_538 = __p0_538; \ int32x4_t __s1_538 = __p1_538; \ int32x4_t __s2_538 = __p2_538; \ int32x4_t __rev0_538; __rev0_538 = __builtin_shufflevector(__s0_538, __s0_538, 3, 2, 1, 0); \ int32x4_t __rev1_538; __rev1_538 = __builtin_shufflevector(__s1_538, __s1_538, 3, 2, 1, 0); \ int32x4_t __rev2_538; __rev2_538 = __builtin_shufflevector(__s2_538, __s2_538, 3, 2, 1, 0); \ __ret_538 = __rev0_538 + __rev1_538 * __noswap_splatq_laneq_s32(__rev2_538, __p3_538); \ __ret_538 = __builtin_shufflevector(__ret_538, __ret_538, 3, 2, 1, 0); \ __ret_538; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlaq_laneq_s16(__p0_539, __p1_539, __p2_539, __p3_539) __extension__ ({ \ int16x8_t __ret_539; \ int16x8_t __s0_539 = __p0_539; \ int16x8_t __s1_539 = __p1_539; \ int16x8_t __s2_539 = __p2_539; \ __ret_539 = __s0_539 + __s1_539 * splatq_laneq_s16(__s2_539, __p3_539); \ __ret_539; \ }) #else #define vmlaq_laneq_s16(__p0_540, __p1_540, __p2_540, __p3_540) __extension__ ({ \ int16x8_t __ret_540; \ int16x8_t __s0_540 = __p0_540; \ int16x8_t __s1_540 = __p1_540; \ int16x8_t __s2_540 = __p2_540; \ int16x8_t __rev0_540; __rev0_540 = __builtin_shufflevector(__s0_540, __s0_540, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev1_540; __rev1_540 = __builtin_shufflevector(__s1_540, __s1_540, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev2_540; __rev2_540 = __builtin_shufflevector(__s2_540, __s2_540, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_540 = __rev0_540 + __rev1_540 * __noswap_splatq_laneq_s16(__rev2_540, __p3_540); \ __ret_540 = __builtin_shufflevector(__ret_540, __ret_540, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_540; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmla_laneq_u32(__p0_541, __p1_541, __p2_541, __p3_541) __extension__ ({ \ uint32x2_t __ret_541; \ uint32x2_t __s0_541 = __p0_541; \ uint32x2_t __s1_541 = __p1_541; \ uint32x4_t __s2_541 = __p2_541; \ __ret_541 = __s0_541 + __s1_541 * splat_laneq_u32(__s2_541, __p3_541); \ __ret_541; \ }) #else #define vmla_laneq_u32(__p0_542, __p1_542, __p2_542, __p3_542) __extension__ ({ \ uint32x2_t __ret_542; \ uint32x2_t __s0_542 = __p0_542; \ uint32x2_t __s1_542 = __p1_542; \ uint32x4_t __s2_542 = __p2_542; \ uint32x2_t __rev0_542; __rev0_542 = __builtin_shufflevector(__s0_542, __s0_542, 1, 0); \ uint32x2_t __rev1_542; __rev1_542 = __builtin_shufflevector(__s1_542, __s1_542, 1, 0); \ uint32x4_t __rev2_542; __rev2_542 = __builtin_shufflevector(__s2_542, __s2_542, 3, 2, 1, 0); \ __ret_542 = __rev0_542 + __rev1_542 * __noswap_splat_laneq_u32(__rev2_542, __p3_542); \ __ret_542 = __builtin_shufflevector(__ret_542, __ret_542, 1, 0); \ __ret_542; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmla_laneq_u16(__p0_543, __p1_543, __p2_543, __p3_543) __extension__ ({ \ uint16x4_t __ret_543; \ uint16x4_t __s0_543 = __p0_543; \ uint16x4_t __s1_543 = __p1_543; \ uint16x8_t __s2_543 = __p2_543; \ __ret_543 = __s0_543 + __s1_543 * splat_laneq_u16(__s2_543, __p3_543); \ __ret_543; \ }) #else #define vmla_laneq_u16(__p0_544, __p1_544, __p2_544, __p3_544) __extension__ ({ \ uint16x4_t __ret_544; \ uint16x4_t __s0_544 = __p0_544; \ uint16x4_t __s1_544 = __p1_544; \ uint16x8_t __s2_544 = __p2_544; \ uint16x4_t __rev0_544; __rev0_544 = __builtin_shufflevector(__s0_544, __s0_544, 3, 2, 1, 0); \ uint16x4_t __rev1_544; __rev1_544 = __builtin_shufflevector(__s1_544, __s1_544, 3, 2, 1, 0); \ uint16x8_t __rev2_544; __rev2_544 = __builtin_shufflevector(__s2_544, __s2_544, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_544 = __rev0_544 + __rev1_544 * __noswap_splat_laneq_u16(__rev2_544, __p3_544); \ __ret_544 = __builtin_shufflevector(__ret_544, __ret_544, 3, 2, 1, 0); \ __ret_544; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmla_laneq_f32(__p0_545, __p1_545, __p2_545, __p3_545) __extension__ ({ \ float32x2_t __ret_545; \ float32x2_t __s0_545 = __p0_545; \ float32x2_t __s1_545 = __p1_545; \ float32x4_t __s2_545 = __p2_545; \ __ret_545 = __s0_545 + __s1_545 * splat_laneq_f32(__s2_545, __p3_545); \ __ret_545; \ }) #else #define vmla_laneq_f32(__p0_546, __p1_546, __p2_546, __p3_546) __extension__ ({ \ float32x2_t __ret_546; \ float32x2_t __s0_546 = __p0_546; \ float32x2_t __s1_546 = __p1_546; \ float32x4_t __s2_546 = __p2_546; \ float32x2_t __rev0_546; __rev0_546 = __builtin_shufflevector(__s0_546, __s0_546, 1, 0); \ float32x2_t __rev1_546; __rev1_546 = __builtin_shufflevector(__s1_546, __s1_546, 1, 0); \ float32x4_t __rev2_546; __rev2_546 = __builtin_shufflevector(__s2_546, __s2_546, 3, 2, 1, 0); \ __ret_546 = __rev0_546 + __rev1_546 * __noswap_splat_laneq_f32(__rev2_546, __p3_546); \ __ret_546 = __builtin_shufflevector(__ret_546, __ret_546, 1, 0); \ __ret_546; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmla_laneq_s32(__p0_547, __p1_547, __p2_547, __p3_547) __extension__ ({ \ int32x2_t __ret_547; \ int32x2_t __s0_547 = __p0_547; \ int32x2_t __s1_547 = __p1_547; \ int32x4_t __s2_547 = __p2_547; \ __ret_547 = __s0_547 + __s1_547 * splat_laneq_s32(__s2_547, __p3_547); \ __ret_547; \ }) #else #define vmla_laneq_s32(__p0_548, __p1_548, __p2_548, __p3_548) __extension__ ({ \ int32x2_t __ret_548; \ int32x2_t __s0_548 = __p0_548; \ int32x2_t __s1_548 = __p1_548; \ int32x4_t __s2_548 = __p2_548; \ int32x2_t __rev0_548; __rev0_548 = __builtin_shufflevector(__s0_548, __s0_548, 1, 0); \ int32x2_t __rev1_548; __rev1_548 = __builtin_shufflevector(__s1_548, __s1_548, 1, 0); \ int32x4_t __rev2_548; __rev2_548 = __builtin_shufflevector(__s2_548, __s2_548, 3, 2, 1, 0); \ __ret_548 = __rev0_548 + __rev1_548 * __noswap_splat_laneq_s32(__rev2_548, __p3_548); \ __ret_548 = __builtin_shufflevector(__ret_548, __ret_548, 1, 0); \ __ret_548; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmla_laneq_s16(__p0_549, __p1_549, __p2_549, __p3_549) __extension__ ({ \ int16x4_t __ret_549; \ int16x4_t __s0_549 = __p0_549; \ int16x4_t __s1_549 = __p1_549; \ int16x8_t __s2_549 = __p2_549; \ __ret_549 = __s0_549 + __s1_549 * splat_laneq_s16(__s2_549, __p3_549); \ __ret_549; \ }) #else #define vmla_laneq_s16(__p0_550, __p1_550, __p2_550, __p3_550) __extension__ ({ \ int16x4_t __ret_550; \ int16x4_t __s0_550 = __p0_550; \ int16x4_t __s1_550 = __p1_550; \ int16x8_t __s2_550 = __p2_550; \ int16x4_t __rev0_550; __rev0_550 = __builtin_shufflevector(__s0_550, __s0_550, 3, 2, 1, 0); \ int16x4_t __rev1_550; __rev1_550 = __builtin_shufflevector(__s1_550, __s1_550, 3, 2, 1, 0); \ int16x8_t __rev2_550; __rev2_550 = __builtin_shufflevector(__s2_550, __s2_550, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_550 = __rev0_550 + __rev1_550 * __noswap_splat_laneq_s16(__rev2_550, __p3_550); \ __ret_550 = __builtin_shufflevector(__ret_550, __ret_550, 3, 2, 1, 0); \ __ret_550; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlal_high_lane_u32(__p0_551, __p1_551, __p2_551, __p3_551) __extension__ ({ \ uint64x2_t __ret_551; \ uint64x2_t __s0_551 = __p0_551; \ uint32x4_t __s1_551 = __p1_551; \ uint32x2_t __s2_551 = __p2_551; \ __ret_551 = __s0_551 + vmull_u32(vget_high_u32(__s1_551), splat_lane_u32(__s2_551, __p3_551)); \ __ret_551; \ }) #else #define vmlal_high_lane_u32(__p0_552, __p1_552, __p2_552, __p3_552) __extension__ ({ \ uint64x2_t __ret_552; \ uint64x2_t __s0_552 = __p0_552; \ uint32x4_t __s1_552 = __p1_552; \ uint32x2_t __s2_552 = __p2_552; \ uint64x2_t __rev0_552; __rev0_552 = __builtin_shufflevector(__s0_552, __s0_552, 1, 0); \ uint32x4_t __rev1_552; __rev1_552 = __builtin_shufflevector(__s1_552, __s1_552, 3, 2, 1, 0); \ uint32x2_t __rev2_552; __rev2_552 = __builtin_shufflevector(__s2_552, __s2_552, 1, 0); \ __ret_552 = __rev0_552 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_552), __noswap_splat_lane_u32(__rev2_552, __p3_552)); \ __ret_552 = __builtin_shufflevector(__ret_552, __ret_552, 1, 0); \ __ret_552; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlal_high_lane_u16(__p0_553, __p1_553, __p2_553, __p3_553) __extension__ ({ \ uint32x4_t __ret_553; \ uint32x4_t __s0_553 = __p0_553; \ uint16x8_t __s1_553 = __p1_553; \ uint16x4_t __s2_553 = __p2_553; \ __ret_553 = __s0_553 + vmull_u16(vget_high_u16(__s1_553), splat_lane_u16(__s2_553, __p3_553)); \ __ret_553; \ }) #else #define vmlal_high_lane_u16(__p0_554, __p1_554, __p2_554, __p3_554) __extension__ ({ \ uint32x4_t __ret_554; \ uint32x4_t __s0_554 = __p0_554; \ uint16x8_t __s1_554 = __p1_554; \ uint16x4_t __s2_554 = __p2_554; \ uint32x4_t __rev0_554; __rev0_554 = __builtin_shufflevector(__s0_554, __s0_554, 3, 2, 1, 0); \ uint16x8_t __rev1_554; __rev1_554 = __builtin_shufflevector(__s1_554, __s1_554, 7, 6, 5, 4, 3, 2, 1, 0); \ uint16x4_t __rev2_554; __rev2_554 = __builtin_shufflevector(__s2_554, __s2_554, 3, 2, 1, 0); \ __ret_554 = __rev0_554 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_554), __noswap_splat_lane_u16(__rev2_554, __p3_554)); \ __ret_554 = __builtin_shufflevector(__ret_554, __ret_554, 3, 2, 1, 0); \ __ret_554; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlal_high_lane_s32(__p0_555, __p1_555, __p2_555, __p3_555) __extension__ ({ \ int64x2_t __ret_555; \ int64x2_t __s0_555 = __p0_555; \ int32x4_t __s1_555 = __p1_555; \ int32x2_t __s2_555 = __p2_555; \ __ret_555 = __s0_555 + vmull_s32(vget_high_s32(__s1_555), splat_lane_s32(__s2_555, __p3_555)); \ __ret_555; \ }) #else #define vmlal_high_lane_s32(__p0_556, __p1_556, __p2_556, __p3_556) __extension__ ({ \ int64x2_t __ret_556; \ int64x2_t __s0_556 = __p0_556; \ int32x4_t __s1_556 = __p1_556; \ int32x2_t __s2_556 = __p2_556; \ int64x2_t __rev0_556; __rev0_556 = __builtin_shufflevector(__s0_556, __s0_556, 1, 0); \ int32x4_t __rev1_556; __rev1_556 = __builtin_shufflevector(__s1_556, __s1_556, 3, 2, 1, 0); \ int32x2_t __rev2_556; __rev2_556 = __builtin_shufflevector(__s2_556, __s2_556, 1, 0); \ __ret_556 = __rev0_556 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_556), __noswap_splat_lane_s32(__rev2_556, __p3_556)); \ __ret_556 = __builtin_shufflevector(__ret_556, __ret_556, 1, 0); \ __ret_556; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlal_high_lane_s16(__p0_557, __p1_557, __p2_557, __p3_557) __extension__ ({ \ int32x4_t __ret_557; \ int32x4_t __s0_557 = __p0_557; \ int16x8_t __s1_557 = __p1_557; \ int16x4_t __s2_557 = __p2_557; \ __ret_557 = __s0_557 + vmull_s16(vget_high_s16(__s1_557), splat_lane_s16(__s2_557, __p3_557)); \ __ret_557; \ }) #else #define vmlal_high_lane_s16(__p0_558, __p1_558, __p2_558, __p3_558) __extension__ ({ \ int32x4_t __ret_558; \ int32x4_t __s0_558 = __p0_558; \ int16x8_t __s1_558 = __p1_558; \ int16x4_t __s2_558 = __p2_558; \ int32x4_t __rev0_558; __rev0_558 = __builtin_shufflevector(__s0_558, __s0_558, 3, 2, 1, 0); \ int16x8_t __rev1_558; __rev1_558 = __builtin_shufflevector(__s1_558, __s1_558, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x4_t __rev2_558; __rev2_558 = __builtin_shufflevector(__s2_558, __s2_558, 3, 2, 1, 0); \ __ret_558 = __rev0_558 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_558), __noswap_splat_lane_s16(__rev2_558, __p3_558)); \ __ret_558 = __builtin_shufflevector(__ret_558, __ret_558, 3, 2, 1, 0); \ __ret_558; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlal_high_laneq_u32(__p0_559, __p1_559, __p2_559, __p3_559) __extension__ ({ \ uint64x2_t __ret_559; \ uint64x2_t __s0_559 = __p0_559; \ uint32x4_t __s1_559 = __p1_559; \ uint32x4_t __s2_559 = __p2_559; \ __ret_559 = __s0_559 + vmull_u32(vget_high_u32(__s1_559), splat_laneq_u32(__s2_559, __p3_559)); \ __ret_559; \ }) #else #define vmlal_high_laneq_u32(__p0_560, __p1_560, __p2_560, __p3_560) __extension__ ({ \ uint64x2_t __ret_560; \ uint64x2_t __s0_560 = __p0_560; \ uint32x4_t __s1_560 = __p1_560; \ uint32x4_t __s2_560 = __p2_560; \ uint64x2_t __rev0_560; __rev0_560 = __builtin_shufflevector(__s0_560, __s0_560, 1, 0); \ uint32x4_t __rev1_560; __rev1_560 = __builtin_shufflevector(__s1_560, __s1_560, 3, 2, 1, 0); \ uint32x4_t __rev2_560; __rev2_560 = __builtin_shufflevector(__s2_560, __s2_560, 3, 2, 1, 0); \ __ret_560 = __rev0_560 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_560), __noswap_splat_laneq_u32(__rev2_560, __p3_560)); \ __ret_560 = __builtin_shufflevector(__ret_560, __ret_560, 1, 0); \ __ret_560; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlal_high_laneq_u16(__p0_561, __p1_561, __p2_561, __p3_561) __extension__ ({ \ uint32x4_t __ret_561; \ uint32x4_t __s0_561 = __p0_561; \ uint16x8_t __s1_561 = __p1_561; \ uint16x8_t __s2_561 = __p2_561; \ __ret_561 = __s0_561 + vmull_u16(vget_high_u16(__s1_561), splat_laneq_u16(__s2_561, __p3_561)); \ __ret_561; \ }) #else #define vmlal_high_laneq_u16(__p0_562, __p1_562, __p2_562, __p3_562) __extension__ ({ \ uint32x4_t __ret_562; \ uint32x4_t __s0_562 = __p0_562; \ uint16x8_t __s1_562 = __p1_562; \ uint16x8_t __s2_562 = __p2_562; \ uint32x4_t __rev0_562; __rev0_562 = __builtin_shufflevector(__s0_562, __s0_562, 3, 2, 1, 0); \ uint16x8_t __rev1_562; __rev1_562 = __builtin_shufflevector(__s1_562, __s1_562, 7, 6, 5, 4, 3, 2, 1, 0); \ uint16x8_t __rev2_562; __rev2_562 = __builtin_shufflevector(__s2_562, __s2_562, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_562 = __rev0_562 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_562), __noswap_splat_laneq_u16(__rev2_562, __p3_562)); \ __ret_562 = __builtin_shufflevector(__ret_562, __ret_562, 3, 2, 1, 0); \ __ret_562; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlal_high_laneq_s32(__p0_563, __p1_563, __p2_563, __p3_563) __extension__ ({ \ int64x2_t __ret_563; \ int64x2_t __s0_563 = __p0_563; \ int32x4_t __s1_563 = __p1_563; \ int32x4_t __s2_563 = __p2_563; \ __ret_563 = __s0_563 + vmull_s32(vget_high_s32(__s1_563), splat_laneq_s32(__s2_563, __p3_563)); \ __ret_563; \ }) #else #define vmlal_high_laneq_s32(__p0_564, __p1_564, __p2_564, __p3_564) __extension__ ({ \ int64x2_t __ret_564; \ int64x2_t __s0_564 = __p0_564; \ int32x4_t __s1_564 = __p1_564; \ int32x4_t __s2_564 = __p2_564; \ int64x2_t __rev0_564; __rev0_564 = __builtin_shufflevector(__s0_564, __s0_564, 1, 0); \ int32x4_t __rev1_564; __rev1_564 = __builtin_shufflevector(__s1_564, __s1_564, 3, 2, 1, 0); \ int32x4_t __rev2_564; __rev2_564 = __builtin_shufflevector(__s2_564, __s2_564, 3, 2, 1, 0); \ __ret_564 = __rev0_564 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_564), __noswap_splat_laneq_s32(__rev2_564, __p3_564)); \ __ret_564 = __builtin_shufflevector(__ret_564, __ret_564, 1, 0); \ __ret_564; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlal_high_laneq_s16(__p0_565, __p1_565, __p2_565, __p3_565) __extension__ ({ \ int32x4_t __ret_565; \ int32x4_t __s0_565 = __p0_565; \ int16x8_t __s1_565 = __p1_565; \ int16x8_t __s2_565 = __p2_565; \ __ret_565 = __s0_565 + vmull_s16(vget_high_s16(__s1_565), splat_laneq_s16(__s2_565, __p3_565)); \ __ret_565; \ }) #else #define vmlal_high_laneq_s16(__p0_566, __p1_566, __p2_566, __p3_566) __extension__ ({ \ int32x4_t __ret_566; \ int32x4_t __s0_566 = __p0_566; \ int16x8_t __s1_566 = __p1_566; \ int16x8_t __s2_566 = __p2_566; \ int32x4_t __rev0_566; __rev0_566 = __builtin_shufflevector(__s0_566, __s0_566, 3, 2, 1, 0); \ int16x8_t __rev1_566; __rev1_566 = __builtin_shufflevector(__s1_566, __s1_566, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev2_566; __rev2_566 = __builtin_shufflevector(__s2_566, __s2_566, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_566 = __rev0_566 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_566), __noswap_splat_laneq_s16(__rev2_566, __p3_566)); \ __ret_566 = __builtin_shufflevector(__ret_566, __ret_566, 3, 2, 1, 0); \ __ret_566; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlal_laneq_u32(__p0_567, __p1_567, __p2_567, __p3_567) __extension__ ({ \ uint64x2_t __ret_567; \ uint64x2_t __s0_567 = __p0_567; \ uint32x2_t __s1_567 = __p1_567; \ uint32x4_t __s2_567 = __p2_567; \ __ret_567 = __s0_567 + vmull_u32(__s1_567, splat_laneq_u32(__s2_567, __p3_567)); \ __ret_567; \ }) #else #define vmlal_laneq_u32(__p0_568, __p1_568, __p2_568, __p3_568) __extension__ ({ \ uint64x2_t __ret_568; \ uint64x2_t __s0_568 = __p0_568; \ uint32x2_t __s1_568 = __p1_568; \ uint32x4_t __s2_568 = __p2_568; \ uint64x2_t __rev0_568; __rev0_568 = __builtin_shufflevector(__s0_568, __s0_568, 1, 0); \ uint32x2_t __rev1_568; __rev1_568 = __builtin_shufflevector(__s1_568, __s1_568, 1, 0); \ uint32x4_t __rev2_568; __rev2_568 = __builtin_shufflevector(__s2_568, __s2_568, 3, 2, 1, 0); \ __ret_568 = __rev0_568 + __noswap_vmull_u32(__rev1_568, __noswap_splat_laneq_u32(__rev2_568, __p3_568)); \ __ret_568 = __builtin_shufflevector(__ret_568, __ret_568, 1, 0); \ __ret_568; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlal_laneq_u16(__p0_569, __p1_569, __p2_569, __p3_569) __extension__ ({ \ uint32x4_t __ret_569; \ uint32x4_t __s0_569 = __p0_569; \ uint16x4_t __s1_569 = __p1_569; \ uint16x8_t __s2_569 = __p2_569; \ __ret_569 = __s0_569 + vmull_u16(__s1_569, splat_laneq_u16(__s2_569, __p3_569)); \ __ret_569; \ }) #else #define vmlal_laneq_u16(__p0_570, __p1_570, __p2_570, __p3_570) __extension__ ({ \ uint32x4_t __ret_570; \ uint32x4_t __s0_570 = __p0_570; \ uint16x4_t __s1_570 = __p1_570; \ uint16x8_t __s2_570 = __p2_570; \ uint32x4_t __rev0_570; __rev0_570 = __builtin_shufflevector(__s0_570, __s0_570, 3, 2, 1, 0); \ uint16x4_t __rev1_570; __rev1_570 = __builtin_shufflevector(__s1_570, __s1_570, 3, 2, 1, 0); \ uint16x8_t __rev2_570; __rev2_570 = __builtin_shufflevector(__s2_570, __s2_570, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_570 = __rev0_570 + __noswap_vmull_u16(__rev1_570, __noswap_splat_laneq_u16(__rev2_570, __p3_570)); \ __ret_570 = __builtin_shufflevector(__ret_570, __ret_570, 3, 2, 1, 0); \ __ret_570; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlal_laneq_s32(__p0_571, __p1_571, __p2_571, __p3_571) __extension__ ({ \ int64x2_t __ret_571; \ int64x2_t __s0_571 = __p0_571; \ int32x2_t __s1_571 = __p1_571; \ int32x4_t __s2_571 = __p2_571; \ __ret_571 = __s0_571 + vmull_s32(__s1_571, splat_laneq_s32(__s2_571, __p3_571)); \ __ret_571; \ }) #else #define vmlal_laneq_s32(__p0_572, __p1_572, __p2_572, __p3_572) __extension__ ({ \ int64x2_t __ret_572; \ int64x2_t __s0_572 = __p0_572; \ int32x2_t __s1_572 = __p1_572; \ int32x4_t __s2_572 = __p2_572; \ int64x2_t __rev0_572; __rev0_572 = __builtin_shufflevector(__s0_572, __s0_572, 1, 0); \ int32x2_t __rev1_572; __rev1_572 = __builtin_shufflevector(__s1_572, __s1_572, 1, 0); \ int32x4_t __rev2_572; __rev2_572 = __builtin_shufflevector(__s2_572, __s2_572, 3, 2, 1, 0); \ __ret_572 = __rev0_572 + __noswap_vmull_s32(__rev1_572, __noswap_splat_laneq_s32(__rev2_572, __p3_572)); \ __ret_572 = __builtin_shufflevector(__ret_572, __ret_572, 1, 0); \ __ret_572; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlal_laneq_s16(__p0_573, __p1_573, __p2_573, __p3_573) __extension__ ({ \ int32x4_t __ret_573; \ int32x4_t __s0_573 = __p0_573; \ int16x4_t __s1_573 = __p1_573; \ int16x8_t __s2_573 = __p2_573; \ __ret_573 = __s0_573 + vmull_s16(__s1_573, splat_laneq_s16(__s2_573, __p3_573)); \ __ret_573; \ }) #else #define vmlal_laneq_s16(__p0_574, __p1_574, __p2_574, __p3_574) __extension__ ({ \ int32x4_t __ret_574; \ int32x4_t __s0_574 = __p0_574; \ int16x4_t __s1_574 = __p1_574; \ int16x8_t __s2_574 = __p2_574; \ int32x4_t __rev0_574; __rev0_574 = __builtin_shufflevector(__s0_574, __s0_574, 3, 2, 1, 0); \ int16x4_t __rev1_574; __rev1_574 = __builtin_shufflevector(__s1_574, __s1_574, 3, 2, 1, 0); \ int16x8_t __rev2_574; __rev2_574 = __builtin_shufflevector(__s2_574, __s2_574, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_574 = __rev0_574 + __noswap_vmull_s16(__rev1_574, __noswap_splat_laneq_s16(__rev2_574, __p3_574)); \ __ret_574 = __builtin_shufflevector(__ret_574, __ret_574, 3, 2, 1, 0); \ __ret_574; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = __p0 - __p1 * __p2; return __ret; } #else __ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = __rev0 - __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai float64x1_t vmls_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { float64x1_t __ret; __ret = __p0 - __p1 * __p2; return __ret; } #ifdef __LITTLE_ENDIAN__ #define vmlsq_laneq_u32(__p0_575, __p1_575, __p2_575, __p3_575) __extension__ ({ \ uint32x4_t __ret_575; \ uint32x4_t __s0_575 = __p0_575; \ uint32x4_t __s1_575 = __p1_575; \ uint32x4_t __s2_575 = __p2_575; \ __ret_575 = __s0_575 - __s1_575 * splatq_laneq_u32(__s2_575, __p3_575); \ __ret_575; \ }) #else #define vmlsq_laneq_u32(__p0_576, __p1_576, __p2_576, __p3_576) __extension__ ({ \ uint32x4_t __ret_576; \ uint32x4_t __s0_576 = __p0_576; \ uint32x4_t __s1_576 = __p1_576; \ uint32x4_t __s2_576 = __p2_576; \ uint32x4_t __rev0_576; __rev0_576 = __builtin_shufflevector(__s0_576, __s0_576, 3, 2, 1, 0); \ uint32x4_t __rev1_576; __rev1_576 = __builtin_shufflevector(__s1_576, __s1_576, 3, 2, 1, 0); \ uint32x4_t __rev2_576; __rev2_576 = __builtin_shufflevector(__s2_576, __s2_576, 3, 2, 1, 0); \ __ret_576 = __rev0_576 - __rev1_576 * __noswap_splatq_laneq_u32(__rev2_576, __p3_576); \ __ret_576 = __builtin_shufflevector(__ret_576, __ret_576, 3, 2, 1, 0); \ __ret_576; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlsq_laneq_u16(__p0_577, __p1_577, __p2_577, __p3_577) __extension__ ({ \ uint16x8_t __ret_577; \ uint16x8_t __s0_577 = __p0_577; \ uint16x8_t __s1_577 = __p1_577; \ uint16x8_t __s2_577 = __p2_577; \ __ret_577 = __s0_577 - __s1_577 * splatq_laneq_u16(__s2_577, __p3_577); \ __ret_577; \ }) #else #define vmlsq_laneq_u16(__p0_578, __p1_578, __p2_578, __p3_578) __extension__ ({ \ uint16x8_t __ret_578; \ uint16x8_t __s0_578 = __p0_578; \ uint16x8_t __s1_578 = __p1_578; \ uint16x8_t __s2_578 = __p2_578; \ uint16x8_t __rev0_578; __rev0_578 = __builtin_shufflevector(__s0_578, __s0_578, 7, 6, 5, 4, 3, 2, 1, 0); \ uint16x8_t __rev1_578; __rev1_578 = __builtin_shufflevector(__s1_578, __s1_578, 7, 6, 5, 4, 3, 2, 1, 0); \ uint16x8_t __rev2_578; __rev2_578 = __builtin_shufflevector(__s2_578, __s2_578, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_578 = __rev0_578 - __rev1_578 * __noswap_splatq_laneq_u16(__rev2_578, __p3_578); \ __ret_578 = __builtin_shufflevector(__ret_578, __ret_578, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_578; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlsq_laneq_f32(__p0_579, __p1_579, __p2_579, __p3_579) __extension__ ({ \ float32x4_t __ret_579; \ float32x4_t __s0_579 = __p0_579; \ float32x4_t __s1_579 = __p1_579; \ float32x4_t __s2_579 = __p2_579; \ __ret_579 = __s0_579 - __s1_579 * splatq_laneq_f32(__s2_579, __p3_579); \ __ret_579; \ }) #else #define vmlsq_laneq_f32(__p0_580, __p1_580, __p2_580, __p3_580) __extension__ ({ \ float32x4_t __ret_580; \ float32x4_t __s0_580 = __p0_580; \ float32x4_t __s1_580 = __p1_580; \ float32x4_t __s2_580 = __p2_580; \ float32x4_t __rev0_580; __rev0_580 = __builtin_shufflevector(__s0_580, __s0_580, 3, 2, 1, 0); \ float32x4_t __rev1_580; __rev1_580 = __builtin_shufflevector(__s1_580, __s1_580, 3, 2, 1, 0); \ float32x4_t __rev2_580; __rev2_580 = __builtin_shufflevector(__s2_580, __s2_580, 3, 2, 1, 0); \ __ret_580 = __rev0_580 - __rev1_580 * __noswap_splatq_laneq_f32(__rev2_580, __p3_580); \ __ret_580 = __builtin_shufflevector(__ret_580, __ret_580, 3, 2, 1, 0); \ __ret_580; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlsq_laneq_s32(__p0_581, __p1_581, __p2_581, __p3_581) __extension__ ({ \ int32x4_t __ret_581; \ int32x4_t __s0_581 = __p0_581; \ int32x4_t __s1_581 = __p1_581; \ int32x4_t __s2_581 = __p2_581; \ __ret_581 = __s0_581 - __s1_581 * splatq_laneq_s32(__s2_581, __p3_581); \ __ret_581; \ }) #else #define vmlsq_laneq_s32(__p0_582, __p1_582, __p2_582, __p3_582) __extension__ ({ \ int32x4_t __ret_582; \ int32x4_t __s0_582 = __p0_582; \ int32x4_t __s1_582 = __p1_582; \ int32x4_t __s2_582 = __p2_582; \ int32x4_t __rev0_582; __rev0_582 = __builtin_shufflevector(__s0_582, __s0_582, 3, 2, 1, 0); \ int32x4_t __rev1_582; __rev1_582 = __builtin_shufflevector(__s1_582, __s1_582, 3, 2, 1, 0); \ int32x4_t __rev2_582; __rev2_582 = __builtin_shufflevector(__s2_582, __s2_582, 3, 2, 1, 0); \ __ret_582 = __rev0_582 - __rev1_582 * __noswap_splatq_laneq_s32(__rev2_582, __p3_582); \ __ret_582 = __builtin_shufflevector(__ret_582, __ret_582, 3, 2, 1, 0); \ __ret_582; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlsq_laneq_s16(__p0_583, __p1_583, __p2_583, __p3_583) __extension__ ({ \ int16x8_t __ret_583; \ int16x8_t __s0_583 = __p0_583; \ int16x8_t __s1_583 = __p1_583; \ int16x8_t __s2_583 = __p2_583; \ __ret_583 = __s0_583 - __s1_583 * splatq_laneq_s16(__s2_583, __p3_583); \ __ret_583; \ }) #else #define vmlsq_laneq_s16(__p0_584, __p1_584, __p2_584, __p3_584) __extension__ ({ \ int16x8_t __ret_584; \ int16x8_t __s0_584 = __p0_584; \ int16x8_t __s1_584 = __p1_584; \ int16x8_t __s2_584 = __p2_584; \ int16x8_t __rev0_584; __rev0_584 = __builtin_shufflevector(__s0_584, __s0_584, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev1_584; __rev1_584 = __builtin_shufflevector(__s1_584, __s1_584, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev2_584; __rev2_584 = __builtin_shufflevector(__s2_584, __s2_584, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_584 = __rev0_584 - __rev1_584 * __noswap_splatq_laneq_s16(__rev2_584, __p3_584); \ __ret_584 = __builtin_shufflevector(__ret_584, __ret_584, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_584; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmls_laneq_u32(__p0_585, __p1_585, __p2_585, __p3_585) __extension__ ({ \ uint32x2_t __ret_585; \ uint32x2_t __s0_585 = __p0_585; \ uint32x2_t __s1_585 = __p1_585; \ uint32x4_t __s2_585 = __p2_585; \ __ret_585 = __s0_585 - __s1_585 * splat_laneq_u32(__s2_585, __p3_585); \ __ret_585; \ }) #else #define vmls_laneq_u32(__p0_586, __p1_586, __p2_586, __p3_586) __extension__ ({ \ uint32x2_t __ret_586; \ uint32x2_t __s0_586 = __p0_586; \ uint32x2_t __s1_586 = __p1_586; \ uint32x4_t __s2_586 = __p2_586; \ uint32x2_t __rev0_586; __rev0_586 = __builtin_shufflevector(__s0_586, __s0_586, 1, 0); \ uint32x2_t __rev1_586; __rev1_586 = __builtin_shufflevector(__s1_586, __s1_586, 1, 0); \ uint32x4_t __rev2_586; __rev2_586 = __builtin_shufflevector(__s2_586, __s2_586, 3, 2, 1, 0); \ __ret_586 = __rev0_586 - __rev1_586 * __noswap_splat_laneq_u32(__rev2_586, __p3_586); \ __ret_586 = __builtin_shufflevector(__ret_586, __ret_586, 1, 0); \ __ret_586; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmls_laneq_u16(__p0_587, __p1_587, __p2_587, __p3_587) __extension__ ({ \ uint16x4_t __ret_587; \ uint16x4_t __s0_587 = __p0_587; \ uint16x4_t __s1_587 = __p1_587; \ uint16x8_t __s2_587 = __p2_587; \ __ret_587 = __s0_587 - __s1_587 * splat_laneq_u16(__s2_587, __p3_587); \ __ret_587; \ }) #else #define vmls_laneq_u16(__p0_588, __p1_588, __p2_588, __p3_588) __extension__ ({ \ uint16x4_t __ret_588; \ uint16x4_t __s0_588 = __p0_588; \ uint16x4_t __s1_588 = __p1_588; \ uint16x8_t __s2_588 = __p2_588; \ uint16x4_t __rev0_588; __rev0_588 = __builtin_shufflevector(__s0_588, __s0_588, 3, 2, 1, 0); \ uint16x4_t __rev1_588; __rev1_588 = __builtin_shufflevector(__s1_588, __s1_588, 3, 2, 1, 0); \ uint16x8_t __rev2_588; __rev2_588 = __builtin_shufflevector(__s2_588, __s2_588, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_588 = __rev0_588 - __rev1_588 * __noswap_splat_laneq_u16(__rev2_588, __p3_588); \ __ret_588 = __builtin_shufflevector(__ret_588, __ret_588, 3, 2, 1, 0); \ __ret_588; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmls_laneq_f32(__p0_589, __p1_589, __p2_589, __p3_589) __extension__ ({ \ float32x2_t __ret_589; \ float32x2_t __s0_589 = __p0_589; \ float32x2_t __s1_589 = __p1_589; \ float32x4_t __s2_589 = __p2_589; \ __ret_589 = __s0_589 - __s1_589 * splat_laneq_f32(__s2_589, __p3_589); \ __ret_589; \ }) #else #define vmls_laneq_f32(__p0_590, __p1_590, __p2_590, __p3_590) __extension__ ({ \ float32x2_t __ret_590; \ float32x2_t __s0_590 = __p0_590; \ float32x2_t __s1_590 = __p1_590; \ float32x4_t __s2_590 = __p2_590; \ float32x2_t __rev0_590; __rev0_590 = __builtin_shufflevector(__s0_590, __s0_590, 1, 0); \ float32x2_t __rev1_590; __rev1_590 = __builtin_shufflevector(__s1_590, __s1_590, 1, 0); \ float32x4_t __rev2_590; __rev2_590 = __builtin_shufflevector(__s2_590, __s2_590, 3, 2, 1, 0); \ __ret_590 = __rev0_590 - __rev1_590 * __noswap_splat_laneq_f32(__rev2_590, __p3_590); \ __ret_590 = __builtin_shufflevector(__ret_590, __ret_590, 1, 0); \ __ret_590; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmls_laneq_s32(__p0_591, __p1_591, __p2_591, __p3_591) __extension__ ({ \ int32x2_t __ret_591; \ int32x2_t __s0_591 = __p0_591; \ int32x2_t __s1_591 = __p1_591; \ int32x4_t __s2_591 = __p2_591; \ __ret_591 = __s0_591 - __s1_591 * splat_laneq_s32(__s2_591, __p3_591); \ __ret_591; \ }) #else #define vmls_laneq_s32(__p0_592, __p1_592, __p2_592, __p3_592) __extension__ ({ \ int32x2_t __ret_592; \ int32x2_t __s0_592 = __p0_592; \ int32x2_t __s1_592 = __p1_592; \ int32x4_t __s2_592 = __p2_592; \ int32x2_t __rev0_592; __rev0_592 = __builtin_shufflevector(__s0_592, __s0_592, 1, 0); \ int32x2_t __rev1_592; __rev1_592 = __builtin_shufflevector(__s1_592, __s1_592, 1, 0); \ int32x4_t __rev2_592; __rev2_592 = __builtin_shufflevector(__s2_592, __s2_592, 3, 2, 1, 0); \ __ret_592 = __rev0_592 - __rev1_592 * __noswap_splat_laneq_s32(__rev2_592, __p3_592); \ __ret_592 = __builtin_shufflevector(__ret_592, __ret_592, 1, 0); \ __ret_592; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmls_laneq_s16(__p0_593, __p1_593, __p2_593, __p3_593) __extension__ ({ \ int16x4_t __ret_593; \ int16x4_t __s0_593 = __p0_593; \ int16x4_t __s1_593 = __p1_593; \ int16x8_t __s2_593 = __p2_593; \ __ret_593 = __s0_593 - __s1_593 * splat_laneq_s16(__s2_593, __p3_593); \ __ret_593; \ }) #else #define vmls_laneq_s16(__p0_594, __p1_594, __p2_594, __p3_594) __extension__ ({ \ int16x4_t __ret_594; \ int16x4_t __s0_594 = __p0_594; \ int16x4_t __s1_594 = __p1_594; \ int16x8_t __s2_594 = __p2_594; \ int16x4_t __rev0_594; __rev0_594 = __builtin_shufflevector(__s0_594, __s0_594, 3, 2, 1, 0); \ int16x4_t __rev1_594; __rev1_594 = __builtin_shufflevector(__s1_594, __s1_594, 3, 2, 1, 0); \ int16x8_t __rev2_594; __rev2_594 = __builtin_shufflevector(__s2_594, __s2_594, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_594 = __rev0_594 - __rev1_594 * __noswap_splat_laneq_s16(__rev2_594, __p3_594); \ __ret_594 = __builtin_shufflevector(__ret_594, __ret_594, 3, 2, 1, 0); \ __ret_594; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlsl_high_lane_u32(__p0_595, __p1_595, __p2_595, __p3_595) __extension__ ({ \ uint64x2_t __ret_595; \ uint64x2_t __s0_595 = __p0_595; \ uint32x4_t __s1_595 = __p1_595; \ uint32x2_t __s2_595 = __p2_595; \ __ret_595 = __s0_595 - vmull_u32(vget_high_u32(__s1_595), splat_lane_u32(__s2_595, __p3_595)); \ __ret_595; \ }) #else #define vmlsl_high_lane_u32(__p0_596, __p1_596, __p2_596, __p3_596) __extension__ ({ \ uint64x2_t __ret_596; \ uint64x2_t __s0_596 = __p0_596; \ uint32x4_t __s1_596 = __p1_596; \ uint32x2_t __s2_596 = __p2_596; \ uint64x2_t __rev0_596; __rev0_596 = __builtin_shufflevector(__s0_596, __s0_596, 1, 0); \ uint32x4_t __rev1_596; __rev1_596 = __builtin_shufflevector(__s1_596, __s1_596, 3, 2, 1, 0); \ uint32x2_t __rev2_596; __rev2_596 = __builtin_shufflevector(__s2_596, __s2_596, 1, 0); \ __ret_596 = __rev0_596 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_596), __noswap_splat_lane_u32(__rev2_596, __p3_596)); \ __ret_596 = __builtin_shufflevector(__ret_596, __ret_596, 1, 0); \ __ret_596; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlsl_high_lane_u16(__p0_597, __p1_597, __p2_597, __p3_597) __extension__ ({ \ uint32x4_t __ret_597; \ uint32x4_t __s0_597 = __p0_597; \ uint16x8_t __s1_597 = __p1_597; \ uint16x4_t __s2_597 = __p2_597; \ __ret_597 = __s0_597 - vmull_u16(vget_high_u16(__s1_597), splat_lane_u16(__s2_597, __p3_597)); \ __ret_597; \ }) #else #define vmlsl_high_lane_u16(__p0_598, __p1_598, __p2_598, __p3_598) __extension__ ({ \ uint32x4_t __ret_598; \ uint32x4_t __s0_598 = __p0_598; \ uint16x8_t __s1_598 = __p1_598; \ uint16x4_t __s2_598 = __p2_598; \ uint32x4_t __rev0_598; __rev0_598 = __builtin_shufflevector(__s0_598, __s0_598, 3, 2, 1, 0); \ uint16x8_t __rev1_598; __rev1_598 = __builtin_shufflevector(__s1_598, __s1_598, 7, 6, 5, 4, 3, 2, 1, 0); \ uint16x4_t __rev2_598; __rev2_598 = __builtin_shufflevector(__s2_598, __s2_598, 3, 2, 1, 0); \ __ret_598 = __rev0_598 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_598), __noswap_splat_lane_u16(__rev2_598, __p3_598)); \ __ret_598 = __builtin_shufflevector(__ret_598, __ret_598, 3, 2, 1, 0); \ __ret_598; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlsl_high_lane_s32(__p0_599, __p1_599, __p2_599, __p3_599) __extension__ ({ \ int64x2_t __ret_599; \ int64x2_t __s0_599 = __p0_599; \ int32x4_t __s1_599 = __p1_599; \ int32x2_t __s2_599 = __p2_599; \ __ret_599 = __s0_599 - vmull_s32(vget_high_s32(__s1_599), splat_lane_s32(__s2_599, __p3_599)); \ __ret_599; \ }) #else #define vmlsl_high_lane_s32(__p0_600, __p1_600, __p2_600, __p3_600) __extension__ ({ \ int64x2_t __ret_600; \ int64x2_t __s0_600 = __p0_600; \ int32x4_t __s1_600 = __p1_600; \ int32x2_t __s2_600 = __p2_600; \ int64x2_t __rev0_600; __rev0_600 = __builtin_shufflevector(__s0_600, __s0_600, 1, 0); \ int32x4_t __rev1_600; __rev1_600 = __builtin_shufflevector(__s1_600, __s1_600, 3, 2, 1, 0); \ int32x2_t __rev2_600; __rev2_600 = __builtin_shufflevector(__s2_600, __s2_600, 1, 0); \ __ret_600 = __rev0_600 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_600), __noswap_splat_lane_s32(__rev2_600, __p3_600)); \ __ret_600 = __builtin_shufflevector(__ret_600, __ret_600, 1, 0); \ __ret_600; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlsl_high_lane_s16(__p0_601, __p1_601, __p2_601, __p3_601) __extension__ ({ \ int32x4_t __ret_601; \ int32x4_t __s0_601 = __p0_601; \ int16x8_t __s1_601 = __p1_601; \ int16x4_t __s2_601 = __p2_601; \ __ret_601 = __s0_601 - vmull_s16(vget_high_s16(__s1_601), splat_lane_s16(__s2_601, __p3_601)); \ __ret_601; \ }) #else #define vmlsl_high_lane_s16(__p0_602, __p1_602, __p2_602, __p3_602) __extension__ ({ \ int32x4_t __ret_602; \ int32x4_t __s0_602 = __p0_602; \ int16x8_t __s1_602 = __p1_602; \ int16x4_t __s2_602 = __p2_602; \ int32x4_t __rev0_602; __rev0_602 = __builtin_shufflevector(__s0_602, __s0_602, 3, 2, 1, 0); \ int16x8_t __rev1_602; __rev1_602 = __builtin_shufflevector(__s1_602, __s1_602, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x4_t __rev2_602; __rev2_602 = __builtin_shufflevector(__s2_602, __s2_602, 3, 2, 1, 0); \ __ret_602 = __rev0_602 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_602), __noswap_splat_lane_s16(__rev2_602, __p3_602)); \ __ret_602 = __builtin_shufflevector(__ret_602, __ret_602, 3, 2, 1, 0); \ __ret_602; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlsl_high_laneq_u32(__p0_603, __p1_603, __p2_603, __p3_603) __extension__ ({ \ uint64x2_t __ret_603; \ uint64x2_t __s0_603 = __p0_603; \ uint32x4_t __s1_603 = __p1_603; \ uint32x4_t __s2_603 = __p2_603; \ __ret_603 = __s0_603 - vmull_u32(vget_high_u32(__s1_603), splat_laneq_u32(__s2_603, __p3_603)); \ __ret_603; \ }) #else #define vmlsl_high_laneq_u32(__p0_604, __p1_604, __p2_604, __p3_604) __extension__ ({ \ uint64x2_t __ret_604; \ uint64x2_t __s0_604 = __p0_604; \ uint32x4_t __s1_604 = __p1_604; \ uint32x4_t __s2_604 = __p2_604; \ uint64x2_t __rev0_604; __rev0_604 = __builtin_shufflevector(__s0_604, __s0_604, 1, 0); \ uint32x4_t __rev1_604; __rev1_604 = __builtin_shufflevector(__s1_604, __s1_604, 3, 2, 1, 0); \ uint32x4_t __rev2_604; __rev2_604 = __builtin_shufflevector(__s2_604, __s2_604, 3, 2, 1, 0); \ __ret_604 = __rev0_604 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_604), __noswap_splat_laneq_u32(__rev2_604, __p3_604)); \ __ret_604 = __builtin_shufflevector(__ret_604, __ret_604, 1, 0); \ __ret_604; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlsl_high_laneq_u16(__p0_605, __p1_605, __p2_605, __p3_605) __extension__ ({ \ uint32x4_t __ret_605; \ uint32x4_t __s0_605 = __p0_605; \ uint16x8_t __s1_605 = __p1_605; \ uint16x8_t __s2_605 = __p2_605; \ __ret_605 = __s0_605 - vmull_u16(vget_high_u16(__s1_605), splat_laneq_u16(__s2_605, __p3_605)); \ __ret_605; \ }) #else #define vmlsl_high_laneq_u16(__p0_606, __p1_606, __p2_606, __p3_606) __extension__ ({ \ uint32x4_t __ret_606; \ uint32x4_t __s0_606 = __p0_606; \ uint16x8_t __s1_606 = __p1_606; \ uint16x8_t __s2_606 = __p2_606; \ uint32x4_t __rev0_606; __rev0_606 = __builtin_shufflevector(__s0_606, __s0_606, 3, 2, 1, 0); \ uint16x8_t __rev1_606; __rev1_606 = __builtin_shufflevector(__s1_606, __s1_606, 7, 6, 5, 4, 3, 2, 1, 0); \ uint16x8_t __rev2_606; __rev2_606 = __builtin_shufflevector(__s2_606, __s2_606, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_606 = __rev0_606 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_606), __noswap_splat_laneq_u16(__rev2_606, __p3_606)); \ __ret_606 = __builtin_shufflevector(__ret_606, __ret_606, 3, 2, 1, 0); \ __ret_606; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlsl_high_laneq_s32(__p0_607, __p1_607, __p2_607, __p3_607) __extension__ ({ \ int64x2_t __ret_607; \ int64x2_t __s0_607 = __p0_607; \ int32x4_t __s1_607 = __p1_607; \ int32x4_t __s2_607 = __p2_607; \ __ret_607 = __s0_607 - vmull_s32(vget_high_s32(__s1_607), splat_laneq_s32(__s2_607, __p3_607)); \ __ret_607; \ }) #else #define vmlsl_high_laneq_s32(__p0_608, __p1_608, __p2_608, __p3_608) __extension__ ({ \ int64x2_t __ret_608; \ int64x2_t __s0_608 = __p0_608; \ int32x4_t __s1_608 = __p1_608; \ int32x4_t __s2_608 = __p2_608; \ int64x2_t __rev0_608; __rev0_608 = __builtin_shufflevector(__s0_608, __s0_608, 1, 0); \ int32x4_t __rev1_608; __rev1_608 = __builtin_shufflevector(__s1_608, __s1_608, 3, 2, 1, 0); \ int32x4_t __rev2_608; __rev2_608 = __builtin_shufflevector(__s2_608, __s2_608, 3, 2, 1, 0); \ __ret_608 = __rev0_608 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_608), __noswap_splat_laneq_s32(__rev2_608, __p3_608)); \ __ret_608 = __builtin_shufflevector(__ret_608, __ret_608, 1, 0); \ __ret_608; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlsl_high_laneq_s16(__p0_609, __p1_609, __p2_609, __p3_609) __extension__ ({ \ int32x4_t __ret_609; \ int32x4_t __s0_609 = __p0_609; \ int16x8_t __s1_609 = __p1_609; \ int16x8_t __s2_609 = __p2_609; \ __ret_609 = __s0_609 - vmull_s16(vget_high_s16(__s1_609), splat_laneq_s16(__s2_609, __p3_609)); \ __ret_609; \ }) #else #define vmlsl_high_laneq_s16(__p0_610, __p1_610, __p2_610, __p3_610) __extension__ ({ \ int32x4_t __ret_610; \ int32x4_t __s0_610 = __p0_610; \ int16x8_t __s1_610 = __p1_610; \ int16x8_t __s2_610 = __p2_610; \ int32x4_t __rev0_610; __rev0_610 = __builtin_shufflevector(__s0_610, __s0_610, 3, 2, 1, 0); \ int16x8_t __rev1_610; __rev1_610 = __builtin_shufflevector(__s1_610, __s1_610, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev2_610; __rev2_610 = __builtin_shufflevector(__s2_610, __s2_610, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_610 = __rev0_610 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_610), __noswap_splat_laneq_s16(__rev2_610, __p3_610)); \ __ret_610 = __builtin_shufflevector(__ret_610, __ret_610, 3, 2, 1, 0); \ __ret_610; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlsl_laneq_u32(__p0_611, __p1_611, __p2_611, __p3_611) __extension__ ({ \ uint64x2_t __ret_611; \ uint64x2_t __s0_611 = __p0_611; \ uint32x2_t __s1_611 = __p1_611; \ uint32x4_t __s2_611 = __p2_611; \ __ret_611 = __s0_611 - vmull_u32(__s1_611, splat_laneq_u32(__s2_611, __p3_611)); \ __ret_611; \ }) #else #define vmlsl_laneq_u32(__p0_612, __p1_612, __p2_612, __p3_612) __extension__ ({ \ uint64x2_t __ret_612; \ uint64x2_t __s0_612 = __p0_612; \ uint32x2_t __s1_612 = __p1_612; \ uint32x4_t __s2_612 = __p2_612; \ uint64x2_t __rev0_612; __rev0_612 = __builtin_shufflevector(__s0_612, __s0_612, 1, 0); \ uint32x2_t __rev1_612; __rev1_612 = __builtin_shufflevector(__s1_612, __s1_612, 1, 0); \ uint32x4_t __rev2_612; __rev2_612 = __builtin_shufflevector(__s2_612, __s2_612, 3, 2, 1, 0); \ __ret_612 = __rev0_612 - __noswap_vmull_u32(__rev1_612, __noswap_splat_laneq_u32(__rev2_612, __p3_612)); \ __ret_612 = __builtin_shufflevector(__ret_612, __ret_612, 1, 0); \ __ret_612; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlsl_laneq_u16(__p0_613, __p1_613, __p2_613, __p3_613) __extension__ ({ \ uint32x4_t __ret_613; \ uint32x4_t __s0_613 = __p0_613; \ uint16x4_t __s1_613 = __p1_613; \ uint16x8_t __s2_613 = __p2_613; \ __ret_613 = __s0_613 - vmull_u16(__s1_613, splat_laneq_u16(__s2_613, __p3_613)); \ __ret_613; \ }) #else #define vmlsl_laneq_u16(__p0_614, __p1_614, __p2_614, __p3_614) __extension__ ({ \ uint32x4_t __ret_614; \ uint32x4_t __s0_614 = __p0_614; \ uint16x4_t __s1_614 = __p1_614; \ uint16x8_t __s2_614 = __p2_614; \ uint32x4_t __rev0_614; __rev0_614 = __builtin_shufflevector(__s0_614, __s0_614, 3, 2, 1, 0); \ uint16x4_t __rev1_614; __rev1_614 = __builtin_shufflevector(__s1_614, __s1_614, 3, 2, 1, 0); \ uint16x8_t __rev2_614; __rev2_614 = __builtin_shufflevector(__s2_614, __s2_614, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_614 = __rev0_614 - __noswap_vmull_u16(__rev1_614, __noswap_splat_laneq_u16(__rev2_614, __p3_614)); \ __ret_614 = __builtin_shufflevector(__ret_614, __ret_614, 3, 2, 1, 0); \ __ret_614; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlsl_laneq_s32(__p0_615, __p1_615, __p2_615, __p3_615) __extension__ ({ \ int64x2_t __ret_615; \ int64x2_t __s0_615 = __p0_615; \ int32x2_t __s1_615 = __p1_615; \ int32x4_t __s2_615 = __p2_615; \ __ret_615 = __s0_615 - vmull_s32(__s1_615, splat_laneq_s32(__s2_615, __p3_615)); \ __ret_615; \ }) #else #define vmlsl_laneq_s32(__p0_616, __p1_616, __p2_616, __p3_616) __extension__ ({ \ int64x2_t __ret_616; \ int64x2_t __s0_616 = __p0_616; \ int32x2_t __s1_616 = __p1_616; \ int32x4_t __s2_616 = __p2_616; \ int64x2_t __rev0_616; __rev0_616 = __builtin_shufflevector(__s0_616, __s0_616, 1, 0); \ int32x2_t __rev1_616; __rev1_616 = __builtin_shufflevector(__s1_616, __s1_616, 1, 0); \ int32x4_t __rev2_616; __rev2_616 = __builtin_shufflevector(__s2_616, __s2_616, 3, 2, 1, 0); \ __ret_616 = __rev0_616 - __noswap_vmull_s32(__rev1_616, __noswap_splat_laneq_s32(__rev2_616, __p3_616)); \ __ret_616 = __builtin_shufflevector(__ret_616, __ret_616, 1, 0); \ __ret_616; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlsl_laneq_s16(__p0_617, __p1_617, __p2_617, __p3_617) __extension__ ({ \ int32x4_t __ret_617; \ int32x4_t __s0_617 = __p0_617; \ int16x4_t __s1_617 = __p1_617; \ int16x8_t __s2_617 = __p2_617; \ __ret_617 = __s0_617 - vmull_s16(__s1_617, splat_laneq_s16(__s2_617, __p3_617)); \ __ret_617; \ }) #else #define vmlsl_laneq_s16(__p0_618, __p1_618, __p2_618, __p3_618) __extension__ ({ \ int32x4_t __ret_618; \ int32x4_t __s0_618 = __p0_618; \ int16x4_t __s1_618 = __p1_618; \ int16x8_t __s2_618 = __p2_618; \ int32x4_t __rev0_618; __rev0_618 = __builtin_shufflevector(__s0_618, __s0_618, 3, 2, 1, 0); \ int16x4_t __rev1_618; __rev1_618 = __builtin_shufflevector(__s1_618, __s1_618, 3, 2, 1, 0); \ int16x8_t __rev2_618; __rev2_618 = __builtin_shufflevector(__s2_618, __s2_618, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_618 = __rev0_618 - __noswap_vmull_s16(__rev1_618, __noswap_splat_laneq_s16(__rev2_618, __p3_618)); \ __ret_618 = __builtin_shufflevector(__ret_618, __ret_618, 3, 2, 1, 0); \ __ret_618; \ }) #endif __ai poly64x1_t vmov_n_p64(poly64_t __p0) { poly64x1_t __ret; __ret = (poly64x1_t) {__p0}; return __ret; } #ifdef __LITTLE_ENDIAN__ __ai poly64x2_t vmovq_n_p64(poly64_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t) {__p0, __p0}; return __ret; } #else __ai poly64x2_t vmovq_n_p64(poly64_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t) {__p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vmovq_n_f64(float64_t __p0) { float64x2_t __ret; __ret = (float64x2_t) {__p0, __p0}; return __ret; } #else __ai float64x2_t vmovq_n_f64(float64_t __p0) { float64x2_t __ret; __ret = (float64x2_t) {__p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai float64x1_t vmov_n_f64(float64_t __p0) { float64x1_t __ret; __ret = (float64x1_t) {__p0}; return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_619) { uint16x8_t __ret_619; uint8x8_t __a1_619 = vget_high_u8(__p0_619); __ret_619 = (uint16x8_t)(vshll_n_u8(__a1_619, 0)); return __ret_619; } #else __ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_620) { uint16x8_t __ret_620; uint8x16_t __rev0_620; __rev0_620 = __builtin_shufflevector(__p0_620, __p0_620, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __a1_620 = __noswap_vget_high_u8(__rev0_620); __ret_620 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_620, 0)); __ret_620 = __builtin_shufflevector(__ret_620, __ret_620, 7, 6, 5, 4, 3, 2, 1, 0); return __ret_620; } __ai uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_621) { uint16x8_t __ret_621; uint8x8_t __a1_621 = __noswap_vget_high_u8(__p0_621); __ret_621 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_621, 0)); return __ret_621; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_622) { uint64x2_t __ret_622; uint32x2_t __a1_622 = vget_high_u32(__p0_622); __ret_622 = (uint64x2_t)(vshll_n_u32(__a1_622, 0)); return __ret_622; } #else __ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_623) { uint64x2_t __ret_623; uint32x4_t __rev0_623; __rev0_623 = __builtin_shufflevector(__p0_623, __p0_623, 3, 2, 1, 0); uint32x2_t __a1_623 = __noswap_vget_high_u32(__rev0_623); __ret_623 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_623, 0)); __ret_623 = __builtin_shufflevector(__ret_623, __ret_623, 1, 0); return __ret_623; } __ai uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_624) { uint64x2_t __ret_624; uint32x2_t __a1_624 = __noswap_vget_high_u32(__p0_624); __ret_624 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_624, 0)); return __ret_624; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_625) { uint32x4_t __ret_625; uint16x4_t __a1_625 = vget_high_u16(__p0_625); __ret_625 = (uint32x4_t)(vshll_n_u16(__a1_625, 0)); return __ret_625; } #else __ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_626) { uint32x4_t __ret_626; uint16x8_t __rev0_626; __rev0_626 = __builtin_shufflevector(__p0_626, __p0_626, 7, 6, 5, 4, 3, 2, 1, 0); uint16x4_t __a1_626 = __noswap_vget_high_u16(__rev0_626); __ret_626 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_626, 0)); __ret_626 = __builtin_shufflevector(__ret_626, __ret_626, 3, 2, 1, 0); return __ret_626; } __ai uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_627) { uint32x4_t __ret_627; uint16x4_t __a1_627 = __noswap_vget_high_u16(__p0_627); __ret_627 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_627, 0)); return __ret_627; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vmovl_high_s8(int8x16_t __p0_628) { int16x8_t __ret_628; int8x8_t __a1_628 = vget_high_s8(__p0_628); __ret_628 = (int16x8_t)(vshll_n_s8(__a1_628, 0)); return __ret_628; } #else __ai int16x8_t vmovl_high_s8(int8x16_t __p0_629) { int16x8_t __ret_629; int8x16_t __rev0_629; __rev0_629 = __builtin_shufflevector(__p0_629, __p0_629, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __a1_629 = __noswap_vget_high_s8(__rev0_629); __ret_629 = (int16x8_t)(__noswap_vshll_n_s8(__a1_629, 0)); __ret_629 = __builtin_shufflevector(__ret_629, __ret_629, 7, 6, 5, 4, 3, 2, 1, 0); return __ret_629; } __ai int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_630) { int16x8_t __ret_630; int8x8_t __a1_630 = __noswap_vget_high_s8(__p0_630); __ret_630 = (int16x8_t)(__noswap_vshll_n_s8(__a1_630, 0)); return __ret_630; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vmovl_high_s32(int32x4_t __p0_631) { int64x2_t __ret_631; int32x2_t __a1_631 = vget_high_s32(__p0_631); __ret_631 = (int64x2_t)(vshll_n_s32(__a1_631, 0)); return __ret_631; } #else __ai int64x2_t vmovl_high_s32(int32x4_t __p0_632) { int64x2_t __ret_632; int32x4_t __rev0_632; __rev0_632 = __builtin_shufflevector(__p0_632, __p0_632, 3, 2, 1, 0); int32x2_t __a1_632 = __noswap_vget_high_s32(__rev0_632); __ret_632 = (int64x2_t)(__noswap_vshll_n_s32(__a1_632, 0)); __ret_632 = __builtin_shufflevector(__ret_632, __ret_632, 1, 0); return __ret_632; } __ai int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_633) { int64x2_t __ret_633; int32x2_t __a1_633 = __noswap_vget_high_s32(__p0_633); __ret_633 = (int64x2_t)(__noswap_vshll_n_s32(__a1_633, 0)); return __ret_633; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vmovl_high_s16(int16x8_t __p0_634) { int32x4_t __ret_634; int16x4_t __a1_634 = vget_high_s16(__p0_634); __ret_634 = (int32x4_t)(vshll_n_s16(__a1_634, 0)); return __ret_634; } #else __ai int32x4_t vmovl_high_s16(int16x8_t __p0_635) { int32x4_t __ret_635; int16x8_t __rev0_635; __rev0_635 = __builtin_shufflevector(__p0_635, __p0_635, 7, 6, 5, 4, 3, 2, 1, 0); int16x4_t __a1_635 = __noswap_vget_high_s16(__rev0_635); __ret_635 = (int32x4_t)(__noswap_vshll_n_s16(__a1_635, 0)); __ret_635 = __builtin_shufflevector(__ret_635, __ret_635, 3, 2, 1, 0); return __ret_635; } __ai int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_636) { int32x4_t __ret_636; int16x4_t __a1_636 = __noswap_vget_high_s16(__p0_636); __ret_636 = (int32x4_t)(__noswap_vshll_n_s16(__a1_636, 0)); return __ret_636; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { uint16x8_t __ret; __ret = vcombine_u16(__p0, vmovn_u32(__p1)); return __ret; } #else __ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { uint16x8_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __noswap_vcombine_u16(__rev0, __noswap_vmovn_u32(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { uint32x4_t __ret; __ret = vcombine_u32(__p0, vmovn_u64(__p1)); return __ret; } #else __ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { uint32x4_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __noswap_vcombine_u32(__rev0, __noswap_vmovn_u64(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { uint8x16_t __ret; __ret = vcombine_u8(__p0, vmovn_u16(__p1)); return __ret; } #else __ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { uint8x16_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vcombine_u8(__rev0, __noswap_vmovn_u16(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { int16x8_t __ret; __ret = vcombine_s16(__p0, vmovn_s32(__p1)); return __ret; } #else __ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { int16x8_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __noswap_vcombine_s16(__rev0, __noswap_vmovn_s32(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { int32x4_t __ret; __ret = vcombine_s32(__p0, vmovn_s64(__p1)); return __ret; } #else __ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { int32x4_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __noswap_vcombine_s32(__rev0, __noswap_vmovn_s64(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { int8x16_t __ret; __ret = vcombine_s8(__p0, vmovn_s16(__p1)); return __ret; } #else __ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { int8x16_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vcombine_s8(__rev0, __noswap_vmovn_s16(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = __p0 * __p1; return __ret; } #else __ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 * __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) { float64x1_t __ret; __ret = __p0 * __p1; return __ret; } #define vmuld_lane_f64(__p0_637, __p1_637, __p2_637) __extension__ ({ \ float64_t __ret_637; \ float64_t __s0_637 = __p0_637; \ float64x1_t __s1_637 = __p1_637; \ __ret_637 = __s0_637 * vget_lane_f64(__s1_637, __p2_637); \ __ret_637; \ }) #ifdef __LITTLE_ENDIAN__ #define vmuls_lane_f32(__p0_638, __p1_638, __p2_638) __extension__ ({ \ float32_t __ret_638; \ float32_t __s0_638 = __p0_638; \ float32x2_t __s1_638 = __p1_638; \ __ret_638 = __s0_638 * vget_lane_f32(__s1_638, __p2_638); \ __ret_638; \ }) #else #define vmuls_lane_f32(__p0_639, __p1_639, __p2_639) __extension__ ({ \ float32_t __ret_639; \ float32_t __s0_639 = __p0_639; \ float32x2_t __s1_639 = __p1_639; \ float32x2_t __rev1_639; __rev1_639 = __builtin_shufflevector(__s1_639, __s1_639, 1, 0); \ __ret_639 = __s0_639 * __noswap_vget_lane_f32(__rev1_639, __p2_639); \ __ret_639; \ }) #endif #define vmul_lane_f64(__p0, __p1, __p2) __extension__ ({ \ float64x1_t __ret; \ float64x1_t __s0 = __p0; \ float64x1_t __s1 = __p1; \ __ret = (float64x1_t) __builtin_neon_vmul_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vmulq_lane_f64(__p0_640, __p1_640, __p2_640) __extension__ ({ \ float64x2_t __ret_640; \ float64x2_t __s0_640 = __p0_640; \ float64x1_t __s1_640 = __p1_640; \ __ret_640 = __s0_640 * splatq_lane_f64(__s1_640, __p2_640); \ __ret_640; \ }) #else #define vmulq_lane_f64(__p0_641, __p1_641, __p2_641) __extension__ ({ \ float64x2_t __ret_641; \ float64x2_t __s0_641 = __p0_641; \ float64x1_t __s1_641 = __p1_641; \ float64x2_t __rev0_641; __rev0_641 = __builtin_shufflevector(__s0_641, __s0_641, 1, 0); \ __ret_641 = __rev0_641 * __noswap_splatq_lane_f64(__s1_641, __p2_641); \ __ret_641 = __builtin_shufflevector(__ret_641, __ret_641, 1, 0); \ __ret_641; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmuld_laneq_f64(__p0_642, __p1_642, __p2_642) __extension__ ({ \ float64_t __ret_642; \ float64_t __s0_642 = __p0_642; \ float64x2_t __s1_642 = __p1_642; \ __ret_642 = __s0_642 * vgetq_lane_f64(__s1_642, __p2_642); \ __ret_642; \ }) #else #define vmuld_laneq_f64(__p0_643, __p1_643, __p2_643) __extension__ ({ \ float64_t __ret_643; \ float64_t __s0_643 = __p0_643; \ float64x2_t __s1_643 = __p1_643; \ float64x2_t __rev1_643; __rev1_643 = __builtin_shufflevector(__s1_643, __s1_643, 1, 0); \ __ret_643 = __s0_643 * __noswap_vgetq_lane_f64(__rev1_643, __p2_643); \ __ret_643; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmuls_laneq_f32(__p0_644, __p1_644, __p2_644) __extension__ ({ \ float32_t __ret_644; \ float32_t __s0_644 = __p0_644; \ float32x4_t __s1_644 = __p1_644; \ __ret_644 = __s0_644 * vgetq_lane_f32(__s1_644, __p2_644); \ __ret_644; \ }) #else #define vmuls_laneq_f32(__p0_645, __p1_645, __p2_645) __extension__ ({ \ float32_t __ret_645; \ float32_t __s0_645 = __p0_645; \ float32x4_t __s1_645 = __p1_645; \ float32x4_t __rev1_645; __rev1_645 = __builtin_shufflevector(__s1_645, __s1_645, 3, 2, 1, 0); \ __ret_645 = __s0_645 * __noswap_vgetq_lane_f32(__rev1_645, __p2_645); \ __ret_645; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \ float64x1_t __ret; \ float64x1_t __s0 = __p0; \ float64x2_t __s1 = __p1; \ __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 10); \ __ret; \ }) #else #define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \ float64x1_t __ret; \ float64x1_t __s0 = __p0; \ float64x2_t __s1 = __p1; \ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__rev1, __p2, 10); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmulq_laneq_u32(__p0_646, __p1_646, __p2_646) __extension__ ({ \ uint32x4_t __ret_646; \ uint32x4_t __s0_646 = __p0_646; \ uint32x4_t __s1_646 = __p1_646; \ __ret_646 = __s0_646 * splatq_laneq_u32(__s1_646, __p2_646); \ __ret_646; \ }) #else #define vmulq_laneq_u32(__p0_647, __p1_647, __p2_647) __extension__ ({ \ uint32x4_t __ret_647; \ uint32x4_t __s0_647 = __p0_647; \ uint32x4_t __s1_647 = __p1_647; \ uint32x4_t __rev0_647; __rev0_647 = __builtin_shufflevector(__s0_647, __s0_647, 3, 2, 1, 0); \ uint32x4_t __rev1_647; __rev1_647 = __builtin_shufflevector(__s1_647, __s1_647, 3, 2, 1, 0); \ __ret_647 = __rev0_647 * __noswap_splatq_laneq_u32(__rev1_647, __p2_647); \ __ret_647 = __builtin_shufflevector(__ret_647, __ret_647, 3, 2, 1, 0); \ __ret_647; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmulq_laneq_u16(__p0_648, __p1_648, __p2_648) __extension__ ({ \ uint16x8_t __ret_648; \ uint16x8_t __s0_648 = __p0_648; \ uint16x8_t __s1_648 = __p1_648; \ __ret_648 = __s0_648 * splatq_laneq_u16(__s1_648, __p2_648); \ __ret_648; \ }) #else #define vmulq_laneq_u16(__p0_649, __p1_649, __p2_649) __extension__ ({ \ uint16x8_t __ret_649; \ uint16x8_t __s0_649 = __p0_649; \ uint16x8_t __s1_649 = __p1_649; \ uint16x8_t __rev0_649; __rev0_649 = __builtin_shufflevector(__s0_649, __s0_649, 7, 6, 5, 4, 3, 2, 1, 0); \ uint16x8_t __rev1_649; __rev1_649 = __builtin_shufflevector(__s1_649, __s1_649, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_649 = __rev0_649 * __noswap_splatq_laneq_u16(__rev1_649, __p2_649); \ __ret_649 = __builtin_shufflevector(__ret_649, __ret_649, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_649; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmulq_laneq_f64(__p0_650, __p1_650, __p2_650) __extension__ ({ \ float64x2_t __ret_650; \ float64x2_t __s0_650 = __p0_650; \ float64x2_t __s1_650 = __p1_650; \ __ret_650 = __s0_650 * splatq_laneq_f64(__s1_650, __p2_650); \ __ret_650; \ }) #else #define vmulq_laneq_f64(__p0_651, __p1_651, __p2_651) __extension__ ({ \ float64x2_t __ret_651; \ float64x2_t __s0_651 = __p0_651; \ float64x2_t __s1_651 = __p1_651; \ float64x2_t __rev0_651; __rev0_651 = __builtin_shufflevector(__s0_651, __s0_651, 1, 0); \ float64x2_t __rev1_651; __rev1_651 = __builtin_shufflevector(__s1_651, __s1_651, 1, 0); \ __ret_651 = __rev0_651 * __noswap_splatq_laneq_f64(__rev1_651, __p2_651); \ __ret_651 = __builtin_shufflevector(__ret_651, __ret_651, 1, 0); \ __ret_651; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmulq_laneq_f32(__p0_652, __p1_652, __p2_652) __extension__ ({ \ float32x4_t __ret_652; \ float32x4_t __s0_652 = __p0_652; \ float32x4_t __s1_652 = __p1_652; \ __ret_652 = __s0_652 * splatq_laneq_f32(__s1_652, __p2_652); \ __ret_652; \ }) #else #define vmulq_laneq_f32(__p0_653, __p1_653, __p2_653) __extension__ ({ \ float32x4_t __ret_653; \ float32x4_t __s0_653 = __p0_653; \ float32x4_t __s1_653 = __p1_653; \ float32x4_t __rev0_653; __rev0_653 = __builtin_shufflevector(__s0_653, __s0_653, 3, 2, 1, 0); \ float32x4_t __rev1_653; __rev1_653 = __builtin_shufflevector(__s1_653, __s1_653, 3, 2, 1, 0); \ __ret_653 = __rev0_653 * __noswap_splatq_laneq_f32(__rev1_653, __p2_653); \ __ret_653 = __builtin_shufflevector(__ret_653, __ret_653, 3, 2, 1, 0); \ __ret_653; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmulq_laneq_s32(__p0_654, __p1_654, __p2_654) __extension__ ({ \ int32x4_t __ret_654; \ int32x4_t __s0_654 = __p0_654; \ int32x4_t __s1_654 = __p1_654; \ __ret_654 = __s0_654 * splatq_laneq_s32(__s1_654, __p2_654); \ __ret_654; \ }) #else #define vmulq_laneq_s32(__p0_655, __p1_655, __p2_655) __extension__ ({ \ int32x4_t __ret_655; \ int32x4_t __s0_655 = __p0_655; \ int32x4_t __s1_655 = __p1_655; \ int32x4_t __rev0_655; __rev0_655 = __builtin_shufflevector(__s0_655, __s0_655, 3, 2, 1, 0); \ int32x4_t __rev1_655; __rev1_655 = __builtin_shufflevector(__s1_655, __s1_655, 3, 2, 1, 0); \ __ret_655 = __rev0_655 * __noswap_splatq_laneq_s32(__rev1_655, __p2_655); \ __ret_655 = __builtin_shufflevector(__ret_655, __ret_655, 3, 2, 1, 0); \ __ret_655; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmulq_laneq_s16(__p0_656, __p1_656, __p2_656) __extension__ ({ \ int16x8_t __ret_656; \ int16x8_t __s0_656 = __p0_656; \ int16x8_t __s1_656 = __p1_656; \ __ret_656 = __s0_656 * splatq_laneq_s16(__s1_656, __p2_656); \ __ret_656; \ }) #else #define vmulq_laneq_s16(__p0_657, __p1_657, __p2_657) __extension__ ({ \ int16x8_t __ret_657; \ int16x8_t __s0_657 = __p0_657; \ int16x8_t __s1_657 = __p1_657; \ int16x8_t __rev0_657; __rev0_657 = __builtin_shufflevector(__s0_657, __s0_657, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev1_657; __rev1_657 = __builtin_shufflevector(__s1_657, __s1_657, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_657 = __rev0_657 * __noswap_splatq_laneq_s16(__rev1_657, __p2_657); \ __ret_657 = __builtin_shufflevector(__ret_657, __ret_657, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_657; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmul_laneq_u32(__p0_658, __p1_658, __p2_658) __extension__ ({ \ uint32x2_t __ret_658; \ uint32x2_t __s0_658 = __p0_658; \ uint32x4_t __s1_658 = __p1_658; \ __ret_658 = __s0_658 * splat_laneq_u32(__s1_658, __p2_658); \ __ret_658; \ }) #else #define vmul_laneq_u32(__p0_659, __p1_659, __p2_659) __extension__ ({ \ uint32x2_t __ret_659; \ uint32x2_t __s0_659 = __p0_659; \ uint32x4_t __s1_659 = __p1_659; \ uint32x2_t __rev0_659; __rev0_659 = __builtin_shufflevector(__s0_659, __s0_659, 1, 0); \ uint32x4_t __rev1_659; __rev1_659 = __builtin_shufflevector(__s1_659, __s1_659, 3, 2, 1, 0); \ __ret_659 = __rev0_659 * __noswap_splat_laneq_u32(__rev1_659, __p2_659); \ __ret_659 = __builtin_shufflevector(__ret_659, __ret_659, 1, 0); \ __ret_659; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmul_laneq_u16(__p0_660, __p1_660, __p2_660) __extension__ ({ \ uint16x4_t __ret_660; \ uint16x4_t __s0_660 = __p0_660; \ uint16x8_t __s1_660 = __p1_660; \ __ret_660 = __s0_660 * splat_laneq_u16(__s1_660, __p2_660); \ __ret_660; \ }) #else #define vmul_laneq_u16(__p0_661, __p1_661, __p2_661) __extension__ ({ \ uint16x4_t __ret_661; \ uint16x4_t __s0_661 = __p0_661; \ uint16x8_t __s1_661 = __p1_661; \ uint16x4_t __rev0_661; __rev0_661 = __builtin_shufflevector(__s0_661, __s0_661, 3, 2, 1, 0); \ uint16x8_t __rev1_661; __rev1_661 = __builtin_shufflevector(__s1_661, __s1_661, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_661 = __rev0_661 * __noswap_splat_laneq_u16(__rev1_661, __p2_661); \ __ret_661 = __builtin_shufflevector(__ret_661, __ret_661, 3, 2, 1, 0); \ __ret_661; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmul_laneq_f32(__p0_662, __p1_662, __p2_662) __extension__ ({ \ float32x2_t __ret_662; \ float32x2_t __s0_662 = __p0_662; \ float32x4_t __s1_662 = __p1_662; \ __ret_662 = __s0_662 * splat_laneq_f32(__s1_662, __p2_662); \ __ret_662; \ }) #else #define vmul_laneq_f32(__p0_663, __p1_663, __p2_663) __extension__ ({ \ float32x2_t __ret_663; \ float32x2_t __s0_663 = __p0_663; \ float32x4_t __s1_663 = __p1_663; \ float32x2_t __rev0_663; __rev0_663 = __builtin_shufflevector(__s0_663, __s0_663, 1, 0); \ float32x4_t __rev1_663; __rev1_663 = __builtin_shufflevector(__s1_663, __s1_663, 3, 2, 1, 0); \ __ret_663 = __rev0_663 * __noswap_splat_laneq_f32(__rev1_663, __p2_663); \ __ret_663 = __builtin_shufflevector(__ret_663, __ret_663, 1, 0); \ __ret_663; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmul_laneq_s32(__p0_664, __p1_664, __p2_664) __extension__ ({ \ int32x2_t __ret_664; \ int32x2_t __s0_664 = __p0_664; \ int32x4_t __s1_664 = __p1_664; \ __ret_664 = __s0_664 * splat_laneq_s32(__s1_664, __p2_664); \ __ret_664; \ }) #else #define vmul_laneq_s32(__p0_665, __p1_665, __p2_665) __extension__ ({ \ int32x2_t __ret_665; \ int32x2_t __s0_665 = __p0_665; \ int32x4_t __s1_665 = __p1_665; \ int32x2_t __rev0_665; __rev0_665 = __builtin_shufflevector(__s0_665, __s0_665, 1, 0); \ int32x4_t __rev1_665; __rev1_665 = __builtin_shufflevector(__s1_665, __s1_665, 3, 2, 1, 0); \ __ret_665 = __rev0_665 * __noswap_splat_laneq_s32(__rev1_665, __p2_665); \ __ret_665 = __builtin_shufflevector(__ret_665, __ret_665, 1, 0); \ __ret_665; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmul_laneq_s16(__p0_666, __p1_666, __p2_666) __extension__ ({ \ int16x4_t __ret_666; \ int16x4_t __s0_666 = __p0_666; \ int16x8_t __s1_666 = __p1_666; \ __ret_666 = __s0_666 * splat_laneq_s16(__s1_666, __p2_666); \ __ret_666; \ }) #else #define vmul_laneq_s16(__p0_667, __p1_667, __p2_667) __extension__ ({ \ int16x4_t __ret_667; \ int16x4_t __s0_667 = __p0_667; \ int16x8_t __s1_667 = __p1_667; \ int16x4_t __rev0_667; __rev0_667 = __builtin_shufflevector(__s0_667, __s0_667, 3, 2, 1, 0); \ int16x8_t __rev1_667; __rev1_667 = __builtin_shufflevector(__s1_667, __s1_667, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_667 = __rev0_667 * __noswap_splat_laneq_s16(__rev1_667, __p2_667); \ __ret_667 = __builtin_shufflevector(__ret_667, __ret_667, 3, 2, 1, 0); \ __ret_667; \ }) #endif __ai float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vmul_n_f64((float64x1_t)__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) { float64x2_t __ret; __ret = __p0 * (float64x2_t) {__p1, __p1}; return __ret; } #else __ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __rev0 * (float64x2_t) {__p1, __p1}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai poly128_t vmull_p64(poly64_t __p0, poly64_t __p1) { poly128_t __ret; __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) { poly16x8_t __ret; __ret = vmull_p8(vget_high_p8(__p0), vget_high_p8(__p1)); return __ret; } #else __ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) { poly16x8_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vmull_p8(__noswap_vget_high_p8(__rev0), __noswap_vget_high_p8(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) { uint16x8_t __ret; __ret = vmull_u8(vget_high_u8(__p0), vget_high_u8(__p1)); return __ret; } #else __ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) { uint16x8_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vmull_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) { uint64x2_t __ret; __ret = vmull_u32(vget_high_u32(__p0), vget_high_u32(__p1)); return __ret; } #else __ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) { uint64x2_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) { uint32x4_t __ret; __ret = vmull_u16(vget_high_u16(__p0), vget_high_u16(__p1)); return __ret; } #else __ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) { uint32x4_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) { int16x8_t __ret; __ret = vmull_s8(vget_high_s8(__p0), vget_high_s8(__p1)); return __ret; } #else __ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) { int16x8_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vmull_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) { int64x2_t __ret; __ret = vmull_s32(vget_high_s32(__p0), vget_high_s32(__p1)); return __ret; } #else __ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) { int64x2_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) { int32x4_t __ret; __ret = vmull_s16(vget_high_s16(__p0), vget_high_s16(__p1)); return __ret; } #else __ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) { int32x4_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) { poly128_t __ret; __ret = vmull_p64((poly64_t)(vget_high_p64(__p0)), (poly64_t)(vget_high_p64(__p1))); return __ret; } #else __ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) { poly128_t __ret; poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = vmull_p64((poly64_t)(__noswap_vget_high_p64(__rev0)), (poly64_t)(__noswap_vget_high_p64(__rev1))); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vmull_high_lane_u32(__p0_668, __p1_668, __p2_668) __extension__ ({ \ uint64x2_t __ret_668; \ uint32x4_t __s0_668 = __p0_668; \ uint32x2_t __s1_668 = __p1_668; \ __ret_668 = vmull_u32(vget_high_u32(__s0_668), splat_lane_u32(__s1_668, __p2_668)); \ __ret_668; \ }) #else #define vmull_high_lane_u32(__p0_669, __p1_669, __p2_669) __extension__ ({ \ uint64x2_t __ret_669; \ uint32x4_t __s0_669 = __p0_669; \ uint32x2_t __s1_669 = __p1_669; \ uint32x4_t __rev0_669; __rev0_669 = __builtin_shufflevector(__s0_669, __s0_669, 3, 2, 1, 0); \ uint32x2_t __rev1_669; __rev1_669 = __builtin_shufflevector(__s1_669, __s1_669, 1, 0); \ __ret_669 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_669), __noswap_splat_lane_u32(__rev1_669, __p2_669)); \ __ret_669 = __builtin_shufflevector(__ret_669, __ret_669, 1, 0); \ __ret_669; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmull_high_lane_u16(__p0_670, __p1_670, __p2_670) __extension__ ({ \ uint32x4_t __ret_670; \ uint16x8_t __s0_670 = __p0_670; \ uint16x4_t __s1_670 = __p1_670; \ __ret_670 = vmull_u16(vget_high_u16(__s0_670), splat_lane_u16(__s1_670, __p2_670)); \ __ret_670; \ }) #else #define vmull_high_lane_u16(__p0_671, __p1_671, __p2_671) __extension__ ({ \ uint32x4_t __ret_671; \ uint16x8_t __s0_671 = __p0_671; \ uint16x4_t __s1_671 = __p1_671; \ uint16x8_t __rev0_671; __rev0_671 = __builtin_shufflevector(__s0_671, __s0_671, 7, 6, 5, 4, 3, 2, 1, 0); \ uint16x4_t __rev1_671; __rev1_671 = __builtin_shufflevector(__s1_671, __s1_671, 3, 2, 1, 0); \ __ret_671 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_671), __noswap_splat_lane_u16(__rev1_671, __p2_671)); \ __ret_671 = __builtin_shufflevector(__ret_671, __ret_671, 3, 2, 1, 0); \ __ret_671; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmull_high_lane_s32(__p0_672, __p1_672, __p2_672) __extension__ ({ \ int64x2_t __ret_672; \ int32x4_t __s0_672 = __p0_672; \ int32x2_t __s1_672 = __p1_672; \ __ret_672 = vmull_s32(vget_high_s32(__s0_672), splat_lane_s32(__s1_672, __p2_672)); \ __ret_672; \ }) #else #define vmull_high_lane_s32(__p0_673, __p1_673, __p2_673) __extension__ ({ \ int64x2_t __ret_673; \ int32x4_t __s0_673 = __p0_673; \ int32x2_t __s1_673 = __p1_673; \ int32x4_t __rev0_673; __rev0_673 = __builtin_shufflevector(__s0_673, __s0_673, 3, 2, 1, 0); \ int32x2_t __rev1_673; __rev1_673 = __builtin_shufflevector(__s1_673, __s1_673, 1, 0); \ __ret_673 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_673), __noswap_splat_lane_s32(__rev1_673, __p2_673)); \ __ret_673 = __builtin_shufflevector(__ret_673, __ret_673, 1, 0); \ __ret_673; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmull_high_lane_s16(__p0_674, __p1_674, __p2_674) __extension__ ({ \ int32x4_t __ret_674; \ int16x8_t __s0_674 = __p0_674; \ int16x4_t __s1_674 = __p1_674; \ __ret_674 = vmull_s16(vget_high_s16(__s0_674), splat_lane_s16(__s1_674, __p2_674)); \ __ret_674; \ }) #else #define vmull_high_lane_s16(__p0_675, __p1_675, __p2_675) __extension__ ({ \ int32x4_t __ret_675; \ int16x8_t __s0_675 = __p0_675; \ int16x4_t __s1_675 = __p1_675; \ int16x8_t __rev0_675; __rev0_675 = __builtin_shufflevector(__s0_675, __s0_675, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x4_t __rev1_675; __rev1_675 = __builtin_shufflevector(__s1_675, __s1_675, 3, 2, 1, 0); \ __ret_675 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_675), __noswap_splat_lane_s16(__rev1_675, __p2_675)); \ __ret_675 = __builtin_shufflevector(__ret_675, __ret_675, 3, 2, 1, 0); \ __ret_675; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmull_high_laneq_u32(__p0_676, __p1_676, __p2_676) __extension__ ({ \ uint64x2_t __ret_676; \ uint32x4_t __s0_676 = __p0_676; \ uint32x4_t __s1_676 = __p1_676; \ __ret_676 = vmull_u32(vget_high_u32(__s0_676), splat_laneq_u32(__s1_676, __p2_676)); \ __ret_676; \ }) #else #define vmull_high_laneq_u32(__p0_677, __p1_677, __p2_677) __extension__ ({ \ uint64x2_t __ret_677; \ uint32x4_t __s0_677 = __p0_677; \ uint32x4_t __s1_677 = __p1_677; \ uint32x4_t __rev0_677; __rev0_677 = __builtin_shufflevector(__s0_677, __s0_677, 3, 2, 1, 0); \ uint32x4_t __rev1_677; __rev1_677 = __builtin_shufflevector(__s1_677, __s1_677, 3, 2, 1, 0); \ __ret_677 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_677), __noswap_splat_laneq_u32(__rev1_677, __p2_677)); \ __ret_677 = __builtin_shufflevector(__ret_677, __ret_677, 1, 0); \ __ret_677; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmull_high_laneq_u16(__p0_678, __p1_678, __p2_678) __extension__ ({ \ uint32x4_t __ret_678; \ uint16x8_t __s0_678 = __p0_678; \ uint16x8_t __s1_678 = __p1_678; \ __ret_678 = vmull_u16(vget_high_u16(__s0_678), splat_laneq_u16(__s1_678, __p2_678)); \ __ret_678; \ }) #else #define vmull_high_laneq_u16(__p0_679, __p1_679, __p2_679) __extension__ ({ \ uint32x4_t __ret_679; \ uint16x8_t __s0_679 = __p0_679; \ uint16x8_t __s1_679 = __p1_679; \ uint16x8_t __rev0_679; __rev0_679 = __builtin_shufflevector(__s0_679, __s0_679, 7, 6, 5, 4, 3, 2, 1, 0); \ uint16x8_t __rev1_679; __rev1_679 = __builtin_shufflevector(__s1_679, __s1_679, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_679 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_679), __noswap_splat_laneq_u16(__rev1_679, __p2_679)); \ __ret_679 = __builtin_shufflevector(__ret_679, __ret_679, 3, 2, 1, 0); \ __ret_679; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmull_high_laneq_s32(__p0_680, __p1_680, __p2_680) __extension__ ({ \ int64x2_t __ret_680; \ int32x4_t __s0_680 = __p0_680; \ int32x4_t __s1_680 = __p1_680; \ __ret_680 = vmull_s32(vget_high_s32(__s0_680), splat_laneq_s32(__s1_680, __p2_680)); \ __ret_680; \ }) #else #define vmull_high_laneq_s32(__p0_681, __p1_681, __p2_681) __extension__ ({ \ int64x2_t __ret_681; \ int32x4_t __s0_681 = __p0_681; \ int32x4_t __s1_681 = __p1_681; \ int32x4_t __rev0_681; __rev0_681 = __builtin_shufflevector(__s0_681, __s0_681, 3, 2, 1, 0); \ int32x4_t __rev1_681; __rev1_681 = __builtin_shufflevector(__s1_681, __s1_681, 3, 2, 1, 0); \ __ret_681 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_681), __noswap_splat_laneq_s32(__rev1_681, __p2_681)); \ __ret_681 = __builtin_shufflevector(__ret_681, __ret_681, 1, 0); \ __ret_681; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmull_high_laneq_s16(__p0_682, __p1_682, __p2_682) __extension__ ({ \ int32x4_t __ret_682; \ int16x8_t __s0_682 = __p0_682; \ int16x8_t __s1_682 = __p1_682; \ __ret_682 = vmull_s16(vget_high_s16(__s0_682), splat_laneq_s16(__s1_682, __p2_682)); \ __ret_682; \ }) #else #define vmull_high_laneq_s16(__p0_683, __p1_683, __p2_683) __extension__ ({ \ int32x4_t __ret_683; \ int16x8_t __s0_683 = __p0_683; \ int16x8_t __s1_683 = __p1_683; \ int16x8_t __rev0_683; __rev0_683 = __builtin_shufflevector(__s0_683, __s0_683, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev1_683; __rev1_683 = __builtin_shufflevector(__s1_683, __s1_683, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_683 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_683), __noswap_splat_laneq_s16(__rev1_683, __p2_683)); \ __ret_683 = __builtin_shufflevector(__ret_683, __ret_683, 3, 2, 1, 0); \ __ret_683; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) { uint64x2_t __ret; __ret = vmull_n_u32(vget_high_u32(__p0), __p1); return __ret; } #else __ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) { uint64x2_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __noswap_vmull_n_u32(__noswap_vget_high_u32(__rev0), __p1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) { uint32x4_t __ret; __ret = vmull_n_u16(vget_high_u16(__p0), __p1); return __ret; } #else __ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) { uint32x4_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vmull_n_u16(__noswap_vget_high_u16(__rev0), __p1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) { int64x2_t __ret; __ret = vmull_n_s32(vget_high_s32(__p0), __p1); return __ret; } #else __ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) { int64x2_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __noswap_vmull_n_s32(__noswap_vget_high_s32(__rev0), __p1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) { int32x4_t __ret; __ret = vmull_n_s16(vget_high_s16(__p0), __p1); return __ret; } #else __ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) { int32x4_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vmull_n_s16(__noswap_vget_high_s16(__rev0), __p1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vmull_laneq_u32(__p0_684, __p1_684, __p2_684) __extension__ ({ \ uint64x2_t __ret_684; \ uint32x2_t __s0_684 = __p0_684; \ uint32x4_t __s1_684 = __p1_684; \ __ret_684 = vmull_u32(__s0_684, splat_laneq_u32(__s1_684, __p2_684)); \ __ret_684; \ }) #else #define vmull_laneq_u32(__p0_685, __p1_685, __p2_685) __extension__ ({ \ uint64x2_t __ret_685; \ uint32x2_t __s0_685 = __p0_685; \ uint32x4_t __s1_685 = __p1_685; \ uint32x2_t __rev0_685; __rev0_685 = __builtin_shufflevector(__s0_685, __s0_685, 1, 0); \ uint32x4_t __rev1_685; __rev1_685 = __builtin_shufflevector(__s1_685, __s1_685, 3, 2, 1, 0); \ __ret_685 = __noswap_vmull_u32(__rev0_685, __noswap_splat_laneq_u32(__rev1_685, __p2_685)); \ __ret_685 = __builtin_shufflevector(__ret_685, __ret_685, 1, 0); \ __ret_685; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmull_laneq_u16(__p0_686, __p1_686, __p2_686) __extension__ ({ \ uint32x4_t __ret_686; \ uint16x4_t __s0_686 = __p0_686; \ uint16x8_t __s1_686 = __p1_686; \ __ret_686 = vmull_u16(__s0_686, splat_laneq_u16(__s1_686, __p2_686)); \ __ret_686; \ }) #else #define vmull_laneq_u16(__p0_687, __p1_687, __p2_687) __extension__ ({ \ uint32x4_t __ret_687; \ uint16x4_t __s0_687 = __p0_687; \ uint16x8_t __s1_687 = __p1_687; \ uint16x4_t __rev0_687; __rev0_687 = __builtin_shufflevector(__s0_687, __s0_687, 3, 2, 1, 0); \ uint16x8_t __rev1_687; __rev1_687 = __builtin_shufflevector(__s1_687, __s1_687, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_687 = __noswap_vmull_u16(__rev0_687, __noswap_splat_laneq_u16(__rev1_687, __p2_687)); \ __ret_687 = __builtin_shufflevector(__ret_687, __ret_687, 3, 2, 1, 0); \ __ret_687; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmull_laneq_s32(__p0_688, __p1_688, __p2_688) __extension__ ({ \ int64x2_t __ret_688; \ int32x2_t __s0_688 = __p0_688; \ int32x4_t __s1_688 = __p1_688; \ __ret_688 = vmull_s32(__s0_688, splat_laneq_s32(__s1_688, __p2_688)); \ __ret_688; \ }) #else #define vmull_laneq_s32(__p0_689, __p1_689, __p2_689) __extension__ ({ \ int64x2_t __ret_689; \ int32x2_t __s0_689 = __p0_689; \ int32x4_t __s1_689 = __p1_689; \ int32x2_t __rev0_689; __rev0_689 = __builtin_shufflevector(__s0_689, __s0_689, 1, 0); \ int32x4_t __rev1_689; __rev1_689 = __builtin_shufflevector(__s1_689, __s1_689, 3, 2, 1, 0); \ __ret_689 = __noswap_vmull_s32(__rev0_689, __noswap_splat_laneq_s32(__rev1_689, __p2_689)); \ __ret_689 = __builtin_shufflevector(__ret_689, __ret_689, 1, 0); \ __ret_689; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmull_laneq_s16(__p0_690, __p1_690, __p2_690) __extension__ ({ \ int32x4_t __ret_690; \ int16x4_t __s0_690 = __p0_690; \ int16x8_t __s1_690 = __p1_690; \ __ret_690 = vmull_s16(__s0_690, splat_laneq_s16(__s1_690, __p2_690)); \ __ret_690; \ }) #else #define vmull_laneq_s16(__p0_691, __p1_691, __p2_691) __extension__ ({ \ int32x4_t __ret_691; \ int16x4_t __s0_691 = __p0_691; \ int16x8_t __s1_691 = __p1_691; \ int16x4_t __rev0_691; __rev0_691 = __builtin_shufflevector(__s0_691, __s0_691, 3, 2, 1, 0); \ int16x8_t __rev1_691; __rev1_691 = __builtin_shufflevector(__s1_691, __s1_691, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_691 = __noswap_vmull_s16(__rev0_691, __noswap_splat_laneq_s16(__rev1_691, __p2_691)); \ __ret_691 = __builtin_shufflevector(__ret_691, __ret_691, 3, 2, 1, 0); \ __ret_691; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #else __ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai float64x2_t __noswap_vmulxq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else __ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai float32x4_t __noswap_vmulxq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #endif __ai float64x1_t vmulx_f64(float64x1_t __p0, float64x1_t __p1) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 10); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else __ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai float32x2_t __noswap_vmulx_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #endif __ai float64_t vmulxd_f64(float64_t __p0, float64_t __p1) { float64_t __ret; __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1); return __ret; } __ai float32_t vmulxs_f32(float32_t __p0, float32_t __p1) { float32_t __ret; __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1); return __ret; } #define vmulxd_lane_f64(__p0_692, __p1_692, __p2_692) __extension__ ({ \ float64_t __ret_692; \ float64_t __s0_692 = __p0_692; \ float64x1_t __s1_692 = __p1_692; \ __ret_692 = vmulxd_f64(__s0_692, vget_lane_f64(__s1_692, __p2_692)); \ __ret_692; \ }) #ifdef __LITTLE_ENDIAN__ #define vmulxs_lane_f32(__p0_693, __p1_693, __p2_693) __extension__ ({ \ float32_t __ret_693; \ float32_t __s0_693 = __p0_693; \ float32x2_t __s1_693 = __p1_693; \ __ret_693 = vmulxs_f32(__s0_693, vget_lane_f32(__s1_693, __p2_693)); \ __ret_693; \ }) #else #define vmulxs_lane_f32(__p0_694, __p1_694, __p2_694) __extension__ ({ \ float32_t __ret_694; \ float32_t __s0_694 = __p0_694; \ float32x2_t __s1_694 = __p1_694; \ float32x2_t __rev1_694; __rev1_694 = __builtin_shufflevector(__s1_694, __s1_694, 1, 0); \ __ret_694 = vmulxs_f32(__s0_694, __noswap_vget_lane_f32(__rev1_694, __p2_694)); \ __ret_694; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmulxq_lane_f64(__p0_695, __p1_695, __p2_695) __extension__ ({ \ float64x2_t __ret_695; \ float64x2_t __s0_695 = __p0_695; \ float64x1_t __s1_695 = __p1_695; \ __ret_695 = vmulxq_f64(__s0_695, splatq_lane_f64(__s1_695, __p2_695)); \ __ret_695; \ }) #else #define vmulxq_lane_f64(__p0_696, __p1_696, __p2_696) __extension__ ({ \ float64x2_t __ret_696; \ float64x2_t __s0_696 = __p0_696; \ float64x1_t __s1_696 = __p1_696; \ float64x2_t __rev0_696; __rev0_696 = __builtin_shufflevector(__s0_696, __s0_696, 1, 0); \ __ret_696 = __noswap_vmulxq_f64(__rev0_696, __noswap_splatq_lane_f64(__s1_696, __p2_696)); \ __ret_696 = __builtin_shufflevector(__ret_696, __ret_696, 1, 0); \ __ret_696; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmulxq_lane_f32(__p0_697, __p1_697, __p2_697) __extension__ ({ \ float32x4_t __ret_697; \ float32x4_t __s0_697 = __p0_697; \ float32x2_t __s1_697 = __p1_697; \ __ret_697 = vmulxq_f32(__s0_697, splatq_lane_f32(__s1_697, __p2_697)); \ __ret_697; \ }) #else #define vmulxq_lane_f32(__p0_698, __p1_698, __p2_698) __extension__ ({ \ float32x4_t __ret_698; \ float32x4_t __s0_698 = __p0_698; \ float32x2_t __s1_698 = __p1_698; \ float32x4_t __rev0_698; __rev0_698 = __builtin_shufflevector(__s0_698, __s0_698, 3, 2, 1, 0); \ float32x2_t __rev1_698; __rev1_698 = __builtin_shufflevector(__s1_698, __s1_698, 1, 0); \ __ret_698 = __noswap_vmulxq_f32(__rev0_698, __noswap_splatq_lane_f32(__rev1_698, __p2_698)); \ __ret_698 = __builtin_shufflevector(__ret_698, __ret_698, 3, 2, 1, 0); \ __ret_698; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmulx_lane_f32(__p0_699, __p1_699, __p2_699) __extension__ ({ \ float32x2_t __ret_699; \ float32x2_t __s0_699 = __p0_699; \ float32x2_t __s1_699 = __p1_699; \ __ret_699 = vmulx_f32(__s0_699, splat_lane_f32(__s1_699, __p2_699)); \ __ret_699; \ }) #else #define vmulx_lane_f32(__p0_700, __p1_700, __p2_700) __extension__ ({ \ float32x2_t __ret_700; \ float32x2_t __s0_700 = __p0_700; \ float32x2_t __s1_700 = __p1_700; \ float32x2_t __rev0_700; __rev0_700 = __builtin_shufflevector(__s0_700, __s0_700, 1, 0); \ float32x2_t __rev1_700; __rev1_700 = __builtin_shufflevector(__s1_700, __s1_700, 1, 0); \ __ret_700 = __noswap_vmulx_f32(__rev0_700, __noswap_splat_lane_f32(__rev1_700, __p2_700)); \ __ret_700 = __builtin_shufflevector(__ret_700, __ret_700, 1, 0); \ __ret_700; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmulxd_laneq_f64(__p0_701, __p1_701, __p2_701) __extension__ ({ \ float64_t __ret_701; \ float64_t __s0_701 = __p0_701; \ float64x2_t __s1_701 = __p1_701; \ __ret_701 = vmulxd_f64(__s0_701, vgetq_lane_f64(__s1_701, __p2_701)); \ __ret_701; \ }) #else #define vmulxd_laneq_f64(__p0_702, __p1_702, __p2_702) __extension__ ({ \ float64_t __ret_702; \ float64_t __s0_702 = __p0_702; \ float64x2_t __s1_702 = __p1_702; \ float64x2_t __rev1_702; __rev1_702 = __builtin_shufflevector(__s1_702, __s1_702, 1, 0); \ __ret_702 = vmulxd_f64(__s0_702, __noswap_vgetq_lane_f64(__rev1_702, __p2_702)); \ __ret_702; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmulxs_laneq_f32(__p0_703, __p1_703, __p2_703) __extension__ ({ \ float32_t __ret_703; \ float32_t __s0_703 = __p0_703; \ float32x4_t __s1_703 = __p1_703; \ __ret_703 = vmulxs_f32(__s0_703, vgetq_lane_f32(__s1_703, __p2_703)); \ __ret_703; \ }) #else #define vmulxs_laneq_f32(__p0_704, __p1_704, __p2_704) __extension__ ({ \ float32_t __ret_704; \ float32_t __s0_704 = __p0_704; \ float32x4_t __s1_704 = __p1_704; \ float32x4_t __rev1_704; __rev1_704 = __builtin_shufflevector(__s1_704, __s1_704, 3, 2, 1, 0); \ __ret_704 = vmulxs_f32(__s0_704, __noswap_vgetq_lane_f32(__rev1_704, __p2_704)); \ __ret_704; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmulxq_laneq_f64(__p0_705, __p1_705, __p2_705) __extension__ ({ \ float64x2_t __ret_705; \ float64x2_t __s0_705 = __p0_705; \ float64x2_t __s1_705 = __p1_705; \ __ret_705 = vmulxq_f64(__s0_705, splatq_laneq_f64(__s1_705, __p2_705)); \ __ret_705; \ }) #else #define vmulxq_laneq_f64(__p0_706, __p1_706, __p2_706) __extension__ ({ \ float64x2_t __ret_706; \ float64x2_t __s0_706 = __p0_706; \ float64x2_t __s1_706 = __p1_706; \ float64x2_t __rev0_706; __rev0_706 = __builtin_shufflevector(__s0_706, __s0_706, 1, 0); \ float64x2_t __rev1_706; __rev1_706 = __builtin_shufflevector(__s1_706, __s1_706, 1, 0); \ __ret_706 = __noswap_vmulxq_f64(__rev0_706, __noswap_splatq_laneq_f64(__rev1_706, __p2_706)); \ __ret_706 = __builtin_shufflevector(__ret_706, __ret_706, 1, 0); \ __ret_706; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmulxq_laneq_f32(__p0_707, __p1_707, __p2_707) __extension__ ({ \ float32x4_t __ret_707; \ float32x4_t __s0_707 = __p0_707; \ float32x4_t __s1_707 = __p1_707; \ __ret_707 = vmulxq_f32(__s0_707, splatq_laneq_f32(__s1_707, __p2_707)); \ __ret_707; \ }) #else #define vmulxq_laneq_f32(__p0_708, __p1_708, __p2_708) __extension__ ({ \ float32x4_t __ret_708; \ float32x4_t __s0_708 = __p0_708; \ float32x4_t __s1_708 = __p1_708; \ float32x4_t __rev0_708; __rev0_708 = __builtin_shufflevector(__s0_708, __s0_708, 3, 2, 1, 0); \ float32x4_t __rev1_708; __rev1_708 = __builtin_shufflevector(__s1_708, __s1_708, 3, 2, 1, 0); \ __ret_708 = __noswap_vmulxq_f32(__rev0_708, __noswap_splatq_laneq_f32(__rev1_708, __p2_708)); \ __ret_708 = __builtin_shufflevector(__ret_708, __ret_708, 3, 2, 1, 0); \ __ret_708; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmulx_laneq_f32(__p0_709, __p1_709, __p2_709) __extension__ ({ \ float32x2_t __ret_709; \ float32x2_t __s0_709 = __p0_709; \ float32x4_t __s1_709 = __p1_709; \ __ret_709 = vmulx_f32(__s0_709, splat_laneq_f32(__s1_709, __p2_709)); \ __ret_709; \ }) #else #define vmulx_laneq_f32(__p0_710, __p1_710, __p2_710) __extension__ ({ \ float32x2_t __ret_710; \ float32x2_t __s0_710 = __p0_710; \ float32x4_t __s1_710 = __p1_710; \ float32x2_t __rev0_710; __rev0_710 = __builtin_shufflevector(__s0_710, __s0_710, 1, 0); \ float32x4_t __rev1_710; __rev1_710 = __builtin_shufflevector(__s1_710, __s1_710, 3, 2, 1, 0); \ __ret_710 = __noswap_vmulx_f32(__rev0_710, __noswap_splat_laneq_f32(__rev1_710, __p2_710)); \ __ret_710 = __builtin_shufflevector(__ret_710, __ret_710, 1, 0); \ __ret_710; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vnegq_f64(float64x2_t __p0) { float64x2_t __ret; __ret = -__p0; return __ret; } #else __ai float64x2_t vnegq_f64(float64x2_t __p0) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = -__rev0; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vnegq_s64(int64x2_t __p0) { int64x2_t __ret; __ret = -__p0; return __ret; } #else __ai int64x2_t vnegq_s64(int64x2_t __p0) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = -__rev0; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai float64x1_t vneg_f64(float64x1_t __p0) { float64x1_t __ret; __ret = -__p0; return __ret; } __ai int64x1_t vneg_s64(int64x1_t __p0) { int64x1_t __ret; __ret = -__p0; return __ret; } __ai int64_t vnegd_s64(int64_t __p0) { int64_t __ret; __ret = (int64_t) __builtin_neon_vnegd_s64(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else __ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else __ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else __ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else __ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else __ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #else __ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else __ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else __ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); return __ret; } #else __ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else __ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64_t vpaddd_u64(uint64x2_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vpaddd_u64(__p0); return __ret; } #else __ai uint64_t vpaddd_u64(uint64x2_t __p0) { uint64_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64_t) __builtin_neon_vpaddd_u64(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64_t vpaddd_f64(float64x2_t __p0) { float64_t __ret; __ret = (float64_t) __builtin_neon_vpaddd_f64(__p0); return __ret; } #else __ai float64_t vpaddd_f64(float64x2_t __p0) { float64_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64_t) __builtin_neon_vpaddd_f64(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64_t vpaddd_s64(int64x2_t __p0) { int64_t __ret; __ret = (int64_t) __builtin_neon_vpaddd_s64(__p0); return __ret; } #else __ai int64_t vpaddd_s64(int64x2_t __p0) { int64_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int64_t) __builtin_neon_vpaddd_s64(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32_t vpadds_f32(float32x2_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vpadds_f32(__p0); return __ret; } #else __ai float32_t vpadds_f32(float32x2_t __p0) { float32_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32_t) __builtin_neon_vpadds_f32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else __ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else __ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else __ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else __ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #else __ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else __ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else __ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else __ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64_t vpmaxqd_f64(float64x2_t __p0) { float64_t __ret; __ret = (float64_t) __builtin_neon_vpmaxqd_f64(__p0); return __ret; } #else __ai float64_t vpmaxqd_f64(float64x2_t __p0) { float64_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64_t) __builtin_neon_vpmaxqd_f64(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32_t vpmaxs_f32(float32x2_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vpmaxs_f32(__p0); return __ret; } #else __ai float32_t vpmaxs_f32(float32x2_t __p0) { float32_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32_t) __builtin_neon_vpmaxs_f32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #else __ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else __ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else __ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64_t vpmaxnmqd_f64(float64x2_t __p0) { float64_t __ret; __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64(__p0); return __ret; } #else __ai float64_t vpmaxnmqd_f64(float64x2_t __p0) { float64_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32_t vpmaxnms_f32(float32x2_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vpmaxnms_f32(__p0); return __ret; } #else __ai float32_t vpmaxnms_f32(float32x2_t __p0) { float32_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32_t) __builtin_neon_vpmaxnms_f32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else __ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else __ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else __ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else __ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #else __ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else __ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else __ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else __ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64_t vpminqd_f64(float64x2_t __p0) { float64_t __ret; __ret = (float64_t) __builtin_neon_vpminqd_f64(__p0); return __ret; } #else __ai float64_t vpminqd_f64(float64x2_t __p0) { float64_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64_t) __builtin_neon_vpminqd_f64(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32_t vpmins_f32(float32x2_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vpmins_f32(__p0); return __ret; } #else __ai float32_t vpmins_f32(float32x2_t __p0) { float32_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32_t) __builtin_neon_vpmins_f32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #else __ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else __ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else __ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64_t vpminnmqd_f64(float64x2_t __p0) { float64_t __ret; __ret = (float64_t) __builtin_neon_vpminnmqd_f64(__p0); return __ret; } #else __ai float64_t vpminnmqd_f64(float64x2_t __p0) { float64_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64_t) __builtin_neon_vpminnmqd_f64(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32_t vpminnms_f32(float32x2_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vpminnms_f32(__p0); return __ret; } #else __ai float32_t vpminnms_f32(float32x2_t __p0) { float32_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32_t) __builtin_neon_vpminnms_f32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vqabsq_s64(int64x2_t __p0) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 35); return __ret; } #else __ai int64x2_t vqabsq_s64(int64x2_t __p0) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai int64x1_t vqabs_s64(int64x1_t __p0) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 3); return __ret; } __ai int8_t vqabsb_s8(int8_t __p0) { int8_t __ret; __ret = (int8_t) __builtin_neon_vqabsb_s8(__p0); return __ret; } __ai int32_t vqabss_s32(int32_t __p0) { int32_t __ret; __ret = (int32_t) __builtin_neon_vqabss_s32(__p0); return __ret; } __ai int64_t vqabsd_s64(int64_t __p0) { int64_t __ret; __ret = (int64_t) __builtin_neon_vqabsd_s64(__p0); return __ret; } __ai int16_t vqabsh_s16(int16_t __p0) { int16_t __ret; __ret = (int16_t) __builtin_neon_vqabsh_s16(__p0); return __ret; } __ai uint8_t vqaddb_u8(uint8_t __p0, uint8_t __p1) { uint8_t __ret; __ret = (uint8_t) __builtin_neon_vqaddb_u8(__p0, __p1); return __ret; } __ai uint32_t vqadds_u32(uint32_t __p0, uint32_t __p1) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vqadds_u32(__p0, __p1); return __ret; } __ai uint64_t vqaddd_u64(uint64_t __p0, uint64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vqaddd_u64(__p0, __p1); return __ret; } __ai uint16_t vqaddh_u16(uint16_t __p0, uint16_t __p1) { uint16_t __ret; __ret = (uint16_t) __builtin_neon_vqaddh_u16(__p0, __p1); return __ret; } __ai int8_t vqaddb_s8(int8_t __p0, int8_t __p1) { int8_t __ret; __ret = (int8_t) __builtin_neon_vqaddb_s8(__p0, __p1); return __ret; } __ai int32_t vqadds_s32(int32_t __p0, int32_t __p1) { int32_t __ret; __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1); return __ret; } __ai int64_t vqaddd_s64(int64_t __p0, int64_t __p1) { int64_t __ret; __ret = (int64_t) __builtin_neon_vqaddd_s64(__p0, __p1); return __ret; } __ai int16_t vqaddh_s16(int16_t __p0, int16_t __p1) { int16_t __ret; __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1); return __ret; } __ai int64_t vqdmlals_s32(int64_t __p0, int32_t __p1, int32_t __p2) { int64_t __ret; __ret = (int64_t) __builtin_neon_vqdmlals_s32(__p0, __p1, __p2); return __ret; } __ai int32_t vqdmlalh_s16(int32_t __p0, int16_t __p1, int16_t __p2) { int32_t __ret; __ret = (int32_t) __builtin_neon_vqdmlalh_s16(__p0, __p1, __p2); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { int64x2_t __ret; __ret = vqdmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); return __ret; } #else __ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2)); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { int32x4_t __ret; __ret = vqdmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); return __ret; } #else __ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vqdmlal_high_lane_s32(__p0_711, __p1_711, __p2_711, __p3_711) __extension__ ({ \ int64x2_t __ret_711; \ int64x2_t __s0_711 = __p0_711; \ int32x4_t __s1_711 = __p1_711; \ int32x2_t __s2_711 = __p2_711; \ __ret_711 = vqdmlal_s32(__s0_711, vget_high_s32(__s1_711), splat_lane_s32(__s2_711, __p3_711)); \ __ret_711; \ }) #else #define vqdmlal_high_lane_s32(__p0_712, __p1_712, __p2_712, __p3_712) __extension__ ({ \ int64x2_t __ret_712; \ int64x2_t __s0_712 = __p0_712; \ int32x4_t __s1_712 = __p1_712; \ int32x2_t __s2_712 = __p2_712; \ int64x2_t __rev0_712; __rev0_712 = __builtin_shufflevector(__s0_712, __s0_712, 1, 0); \ int32x4_t __rev1_712; __rev1_712 = __builtin_shufflevector(__s1_712, __s1_712, 3, 2, 1, 0); \ int32x2_t __rev2_712; __rev2_712 = __builtin_shufflevector(__s2_712, __s2_712, 1, 0); \ __ret_712 = __noswap_vqdmlal_s32(__rev0_712, __noswap_vget_high_s32(__rev1_712), __noswap_splat_lane_s32(__rev2_712, __p3_712)); \ __ret_712 = __builtin_shufflevector(__ret_712, __ret_712, 1, 0); \ __ret_712; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmlal_high_lane_s16(__p0_713, __p1_713, __p2_713, __p3_713) __extension__ ({ \ int32x4_t __ret_713; \ int32x4_t __s0_713 = __p0_713; \ int16x8_t __s1_713 = __p1_713; \ int16x4_t __s2_713 = __p2_713; \ __ret_713 = vqdmlal_s16(__s0_713, vget_high_s16(__s1_713), splat_lane_s16(__s2_713, __p3_713)); \ __ret_713; \ }) #else #define vqdmlal_high_lane_s16(__p0_714, __p1_714, __p2_714, __p3_714) __extension__ ({ \ int32x4_t __ret_714; \ int32x4_t __s0_714 = __p0_714; \ int16x8_t __s1_714 = __p1_714; \ int16x4_t __s2_714 = __p2_714; \ int32x4_t __rev0_714; __rev0_714 = __builtin_shufflevector(__s0_714, __s0_714, 3, 2, 1, 0); \ int16x8_t __rev1_714; __rev1_714 = __builtin_shufflevector(__s1_714, __s1_714, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x4_t __rev2_714; __rev2_714 = __builtin_shufflevector(__s2_714, __s2_714, 3, 2, 1, 0); \ __ret_714 = __noswap_vqdmlal_s16(__rev0_714, __noswap_vget_high_s16(__rev1_714), __noswap_splat_lane_s16(__rev2_714, __p3_714)); \ __ret_714 = __builtin_shufflevector(__ret_714, __ret_714, 3, 2, 1, 0); \ __ret_714; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmlal_high_laneq_s32(__p0_715, __p1_715, __p2_715, __p3_715) __extension__ ({ \ int64x2_t __ret_715; \ int64x2_t __s0_715 = __p0_715; \ int32x4_t __s1_715 = __p1_715; \ int32x4_t __s2_715 = __p2_715; \ __ret_715 = vqdmlal_s32(__s0_715, vget_high_s32(__s1_715), splat_laneq_s32(__s2_715, __p3_715)); \ __ret_715; \ }) #else #define vqdmlal_high_laneq_s32(__p0_716, __p1_716, __p2_716, __p3_716) __extension__ ({ \ int64x2_t __ret_716; \ int64x2_t __s0_716 = __p0_716; \ int32x4_t __s1_716 = __p1_716; \ int32x4_t __s2_716 = __p2_716; \ int64x2_t __rev0_716; __rev0_716 = __builtin_shufflevector(__s0_716, __s0_716, 1, 0); \ int32x4_t __rev1_716; __rev1_716 = __builtin_shufflevector(__s1_716, __s1_716, 3, 2, 1, 0); \ int32x4_t __rev2_716; __rev2_716 = __builtin_shufflevector(__s2_716, __s2_716, 3, 2, 1, 0); \ __ret_716 = __noswap_vqdmlal_s32(__rev0_716, __noswap_vget_high_s32(__rev1_716), __noswap_splat_laneq_s32(__rev2_716, __p3_716)); \ __ret_716 = __builtin_shufflevector(__ret_716, __ret_716, 1, 0); \ __ret_716; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmlal_high_laneq_s16(__p0_717, __p1_717, __p2_717, __p3_717) __extension__ ({ \ int32x4_t __ret_717; \ int32x4_t __s0_717 = __p0_717; \ int16x8_t __s1_717 = __p1_717; \ int16x8_t __s2_717 = __p2_717; \ __ret_717 = vqdmlal_s16(__s0_717, vget_high_s16(__s1_717), splat_laneq_s16(__s2_717, __p3_717)); \ __ret_717; \ }) #else #define vqdmlal_high_laneq_s16(__p0_718, __p1_718, __p2_718, __p3_718) __extension__ ({ \ int32x4_t __ret_718; \ int32x4_t __s0_718 = __p0_718; \ int16x8_t __s1_718 = __p1_718; \ int16x8_t __s2_718 = __p2_718; \ int32x4_t __rev0_718; __rev0_718 = __builtin_shufflevector(__s0_718, __s0_718, 3, 2, 1, 0); \ int16x8_t __rev1_718; __rev1_718 = __builtin_shufflevector(__s1_718, __s1_718, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev2_718; __rev2_718 = __builtin_shufflevector(__s2_718, __s2_718, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_718 = __noswap_vqdmlal_s16(__rev0_718, __noswap_vget_high_s16(__rev1_718), __noswap_splat_laneq_s16(__rev2_718, __p3_718)); \ __ret_718 = __builtin_shufflevector(__ret_718, __ret_718, 3, 2, 1, 0); \ __ret_718; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { int64x2_t __ret; __ret = vqdmlal_n_s32(__p0, vget_high_s32(__p1), __p2); return __ret; } #else __ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __noswap_vqdmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { int32x4_t __ret; __ret = vqdmlal_n_s16(__p0, vget_high_s16(__p1), __p2); return __ret; } #else __ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vqdmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ int64_t __ret; \ int64_t __s0 = __p0; \ int32_t __s1 = __p1; \ int32x2_t __s2 = __p2; \ __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, __s2, __p3); \ __ret; \ }) #else #define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ int64_t __ret; \ int64_t __s0 = __p0; \ int32_t __s1 = __p1; \ int32x2_t __s2 = __p2; \ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, __rev2, __p3); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ int32_t __ret; \ int32_t __s0 = __p0; \ int16_t __s1 = __p1; \ int16x4_t __s2 = __p2; \ __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, __s2, __p3); \ __ret; \ }) #else #define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ int32_t __ret; \ int32_t __s0 = __p0; \ int16_t __s1 = __p1; \ int16x4_t __s2 = __p2; \ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, __rev2, __p3); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ int64_t __ret; \ int64_t __s0 = __p0; \ int32_t __s1 = __p1; \ int32x4_t __s2 = __p2; \ __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, __s2, __p3); \ __ret; \ }) #else #define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ int64_t __ret; \ int64_t __s0 = __p0; \ int32_t __s1 = __p1; \ int32x4_t __s2 = __p2; \ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, __rev2, __p3); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ int32_t __ret; \ int32_t __s0 = __p0; \ int16_t __s1 = __p1; \ int16x8_t __s2 = __p2; \ __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, __s2, __p3); \ __ret; \ }) #else #define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ int32_t __ret; \ int32_t __s0 = __p0; \ int16_t __s1 = __p1; \ int16x8_t __s2 = __p2; \ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, __rev2, __p3); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmlal_laneq_s32(__p0_719, __p1_719, __p2_719, __p3_719) __extension__ ({ \ int64x2_t __ret_719; \ int64x2_t __s0_719 = __p0_719; \ int32x2_t __s1_719 = __p1_719; \ int32x4_t __s2_719 = __p2_719; \ __ret_719 = vqdmlal_s32(__s0_719, __s1_719, splat_laneq_s32(__s2_719, __p3_719)); \ __ret_719; \ }) #else #define vqdmlal_laneq_s32(__p0_720, __p1_720, __p2_720, __p3_720) __extension__ ({ \ int64x2_t __ret_720; \ int64x2_t __s0_720 = __p0_720; \ int32x2_t __s1_720 = __p1_720; \ int32x4_t __s2_720 = __p2_720; \ int64x2_t __rev0_720; __rev0_720 = __builtin_shufflevector(__s0_720, __s0_720, 1, 0); \ int32x2_t __rev1_720; __rev1_720 = __builtin_shufflevector(__s1_720, __s1_720, 1, 0); \ int32x4_t __rev2_720; __rev2_720 = __builtin_shufflevector(__s2_720, __s2_720, 3, 2, 1, 0); \ __ret_720 = __noswap_vqdmlal_s32(__rev0_720, __rev1_720, __noswap_splat_laneq_s32(__rev2_720, __p3_720)); \ __ret_720 = __builtin_shufflevector(__ret_720, __ret_720, 1, 0); \ __ret_720; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmlal_laneq_s16(__p0_721, __p1_721, __p2_721, __p3_721) __extension__ ({ \ int32x4_t __ret_721; \ int32x4_t __s0_721 = __p0_721; \ int16x4_t __s1_721 = __p1_721; \ int16x8_t __s2_721 = __p2_721; \ __ret_721 = vqdmlal_s16(__s0_721, __s1_721, splat_laneq_s16(__s2_721, __p3_721)); \ __ret_721; \ }) #else #define vqdmlal_laneq_s16(__p0_722, __p1_722, __p2_722, __p3_722) __extension__ ({ \ int32x4_t __ret_722; \ int32x4_t __s0_722 = __p0_722; \ int16x4_t __s1_722 = __p1_722; \ int16x8_t __s2_722 = __p2_722; \ int32x4_t __rev0_722; __rev0_722 = __builtin_shufflevector(__s0_722, __s0_722, 3, 2, 1, 0); \ int16x4_t __rev1_722; __rev1_722 = __builtin_shufflevector(__s1_722, __s1_722, 3, 2, 1, 0); \ int16x8_t __rev2_722; __rev2_722 = __builtin_shufflevector(__s2_722, __s2_722, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_722 = __noswap_vqdmlal_s16(__rev0_722, __rev1_722, __noswap_splat_laneq_s16(__rev2_722, __p3_722)); \ __ret_722 = __builtin_shufflevector(__ret_722, __ret_722, 3, 2, 1, 0); \ __ret_722; \ }) #endif __ai int64_t vqdmlsls_s32(int64_t __p0, int32_t __p1, int32_t __p2) { int64_t __ret; __ret = (int64_t) __builtin_neon_vqdmlsls_s32(__p0, __p1, __p2); return __ret; } __ai int32_t vqdmlslh_s16(int32_t __p0, int16_t __p1, int16_t __p2) { int32_t __ret; __ret = (int32_t) __builtin_neon_vqdmlslh_s16(__p0, __p1, __p2); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { int64x2_t __ret; __ret = vqdmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); return __ret; } #else __ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2)); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { int32x4_t __ret; __ret = vqdmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); return __ret; } #else __ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vqdmlsl_high_lane_s32(__p0_723, __p1_723, __p2_723, __p3_723) __extension__ ({ \ int64x2_t __ret_723; \ int64x2_t __s0_723 = __p0_723; \ int32x4_t __s1_723 = __p1_723; \ int32x2_t __s2_723 = __p2_723; \ __ret_723 = vqdmlsl_s32(__s0_723, vget_high_s32(__s1_723), splat_lane_s32(__s2_723, __p3_723)); \ __ret_723; \ }) #else #define vqdmlsl_high_lane_s32(__p0_724, __p1_724, __p2_724, __p3_724) __extension__ ({ \ int64x2_t __ret_724; \ int64x2_t __s0_724 = __p0_724; \ int32x4_t __s1_724 = __p1_724; \ int32x2_t __s2_724 = __p2_724; \ int64x2_t __rev0_724; __rev0_724 = __builtin_shufflevector(__s0_724, __s0_724, 1, 0); \ int32x4_t __rev1_724; __rev1_724 = __builtin_shufflevector(__s1_724, __s1_724, 3, 2, 1, 0); \ int32x2_t __rev2_724; __rev2_724 = __builtin_shufflevector(__s2_724, __s2_724, 1, 0); \ __ret_724 = __noswap_vqdmlsl_s32(__rev0_724, __noswap_vget_high_s32(__rev1_724), __noswap_splat_lane_s32(__rev2_724, __p3_724)); \ __ret_724 = __builtin_shufflevector(__ret_724, __ret_724, 1, 0); \ __ret_724; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmlsl_high_lane_s16(__p0_725, __p1_725, __p2_725, __p3_725) __extension__ ({ \ int32x4_t __ret_725; \ int32x4_t __s0_725 = __p0_725; \ int16x8_t __s1_725 = __p1_725; \ int16x4_t __s2_725 = __p2_725; \ __ret_725 = vqdmlsl_s16(__s0_725, vget_high_s16(__s1_725), splat_lane_s16(__s2_725, __p3_725)); \ __ret_725; \ }) #else #define vqdmlsl_high_lane_s16(__p0_726, __p1_726, __p2_726, __p3_726) __extension__ ({ \ int32x4_t __ret_726; \ int32x4_t __s0_726 = __p0_726; \ int16x8_t __s1_726 = __p1_726; \ int16x4_t __s2_726 = __p2_726; \ int32x4_t __rev0_726; __rev0_726 = __builtin_shufflevector(__s0_726, __s0_726, 3, 2, 1, 0); \ int16x8_t __rev1_726; __rev1_726 = __builtin_shufflevector(__s1_726, __s1_726, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x4_t __rev2_726; __rev2_726 = __builtin_shufflevector(__s2_726, __s2_726, 3, 2, 1, 0); \ __ret_726 = __noswap_vqdmlsl_s16(__rev0_726, __noswap_vget_high_s16(__rev1_726), __noswap_splat_lane_s16(__rev2_726, __p3_726)); \ __ret_726 = __builtin_shufflevector(__ret_726, __ret_726, 3, 2, 1, 0); \ __ret_726; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmlsl_high_laneq_s32(__p0_727, __p1_727, __p2_727, __p3_727) __extension__ ({ \ int64x2_t __ret_727; \ int64x2_t __s0_727 = __p0_727; \ int32x4_t __s1_727 = __p1_727; \ int32x4_t __s2_727 = __p2_727; \ __ret_727 = vqdmlsl_s32(__s0_727, vget_high_s32(__s1_727), splat_laneq_s32(__s2_727, __p3_727)); \ __ret_727; \ }) #else #define vqdmlsl_high_laneq_s32(__p0_728, __p1_728, __p2_728, __p3_728) __extension__ ({ \ int64x2_t __ret_728; \ int64x2_t __s0_728 = __p0_728; \ int32x4_t __s1_728 = __p1_728; \ int32x4_t __s2_728 = __p2_728; \ int64x2_t __rev0_728; __rev0_728 = __builtin_shufflevector(__s0_728, __s0_728, 1, 0); \ int32x4_t __rev1_728; __rev1_728 = __builtin_shufflevector(__s1_728, __s1_728, 3, 2, 1, 0); \ int32x4_t __rev2_728; __rev2_728 = __builtin_shufflevector(__s2_728, __s2_728, 3, 2, 1, 0); \ __ret_728 = __noswap_vqdmlsl_s32(__rev0_728, __noswap_vget_high_s32(__rev1_728), __noswap_splat_laneq_s32(__rev2_728, __p3_728)); \ __ret_728 = __builtin_shufflevector(__ret_728, __ret_728, 1, 0); \ __ret_728; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmlsl_high_laneq_s16(__p0_729, __p1_729, __p2_729, __p3_729) __extension__ ({ \ int32x4_t __ret_729; \ int32x4_t __s0_729 = __p0_729; \ int16x8_t __s1_729 = __p1_729; \ int16x8_t __s2_729 = __p2_729; \ __ret_729 = vqdmlsl_s16(__s0_729, vget_high_s16(__s1_729), splat_laneq_s16(__s2_729, __p3_729)); \ __ret_729; \ }) #else #define vqdmlsl_high_laneq_s16(__p0_730, __p1_730, __p2_730, __p3_730) __extension__ ({ \ int32x4_t __ret_730; \ int32x4_t __s0_730 = __p0_730; \ int16x8_t __s1_730 = __p1_730; \ int16x8_t __s2_730 = __p2_730; \ int32x4_t __rev0_730; __rev0_730 = __builtin_shufflevector(__s0_730, __s0_730, 3, 2, 1, 0); \ int16x8_t __rev1_730; __rev1_730 = __builtin_shufflevector(__s1_730, __s1_730, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev2_730; __rev2_730 = __builtin_shufflevector(__s2_730, __s2_730, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_730 = __noswap_vqdmlsl_s16(__rev0_730, __noswap_vget_high_s16(__rev1_730), __noswap_splat_laneq_s16(__rev2_730, __p3_730)); \ __ret_730 = __builtin_shufflevector(__ret_730, __ret_730, 3, 2, 1, 0); \ __ret_730; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { int64x2_t __ret; __ret = vqdmlsl_n_s32(__p0, vget_high_s32(__p1), __p2); return __ret; } #else __ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __noswap_vqdmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { int32x4_t __ret; __ret = vqdmlsl_n_s16(__p0, vget_high_s16(__p1), __p2); return __ret; } #else __ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vqdmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ int64_t __ret; \ int64_t __s0 = __p0; \ int32_t __s1 = __p1; \ int32x2_t __s2 = __p2; \ __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, __s2, __p3); \ __ret; \ }) #else #define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ int64_t __ret; \ int64_t __s0 = __p0; \ int32_t __s1 = __p1; \ int32x2_t __s2 = __p2; \ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, __rev2, __p3); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ int32_t __ret; \ int32_t __s0 = __p0; \ int16_t __s1 = __p1; \ int16x4_t __s2 = __p2; \ __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, __s2, __p3); \ __ret; \ }) #else #define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ int32_t __ret; \ int32_t __s0 = __p0; \ int16_t __s1 = __p1; \ int16x4_t __s2 = __p2; \ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, __rev2, __p3); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ int64_t __ret; \ int64_t __s0 = __p0; \ int32_t __s1 = __p1; \ int32x4_t __s2 = __p2; \ __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, __s2, __p3); \ __ret; \ }) #else #define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ int64_t __ret; \ int64_t __s0 = __p0; \ int32_t __s1 = __p1; \ int32x4_t __s2 = __p2; \ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, __rev2, __p3); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ int32_t __ret; \ int32_t __s0 = __p0; \ int16_t __s1 = __p1; \ int16x8_t __s2 = __p2; \ __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, __s2, __p3); \ __ret; \ }) #else #define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ int32_t __ret; \ int32_t __s0 = __p0; \ int16_t __s1 = __p1; \ int16x8_t __s2 = __p2; \ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, __rev2, __p3); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmlsl_laneq_s32(__p0_731, __p1_731, __p2_731, __p3_731) __extension__ ({ \ int64x2_t __ret_731; \ int64x2_t __s0_731 = __p0_731; \ int32x2_t __s1_731 = __p1_731; \ int32x4_t __s2_731 = __p2_731; \ __ret_731 = vqdmlsl_s32(__s0_731, __s1_731, splat_laneq_s32(__s2_731, __p3_731)); \ __ret_731; \ }) #else #define vqdmlsl_laneq_s32(__p0_732, __p1_732, __p2_732, __p3_732) __extension__ ({ \ int64x2_t __ret_732; \ int64x2_t __s0_732 = __p0_732; \ int32x2_t __s1_732 = __p1_732; \ int32x4_t __s2_732 = __p2_732; \ int64x2_t __rev0_732; __rev0_732 = __builtin_shufflevector(__s0_732, __s0_732, 1, 0); \ int32x2_t __rev1_732; __rev1_732 = __builtin_shufflevector(__s1_732, __s1_732, 1, 0); \ int32x4_t __rev2_732; __rev2_732 = __builtin_shufflevector(__s2_732, __s2_732, 3, 2, 1, 0); \ __ret_732 = __noswap_vqdmlsl_s32(__rev0_732, __rev1_732, __noswap_splat_laneq_s32(__rev2_732, __p3_732)); \ __ret_732 = __builtin_shufflevector(__ret_732, __ret_732, 1, 0); \ __ret_732; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmlsl_laneq_s16(__p0_733, __p1_733, __p2_733, __p3_733) __extension__ ({ \ int32x4_t __ret_733; \ int32x4_t __s0_733 = __p0_733; \ int16x4_t __s1_733 = __p1_733; \ int16x8_t __s2_733 = __p2_733; \ __ret_733 = vqdmlsl_s16(__s0_733, __s1_733, splat_laneq_s16(__s2_733, __p3_733)); \ __ret_733; \ }) #else #define vqdmlsl_laneq_s16(__p0_734, __p1_734, __p2_734, __p3_734) __extension__ ({ \ int32x4_t __ret_734; \ int32x4_t __s0_734 = __p0_734; \ int16x4_t __s1_734 = __p1_734; \ int16x8_t __s2_734 = __p2_734; \ int32x4_t __rev0_734; __rev0_734 = __builtin_shufflevector(__s0_734, __s0_734, 3, 2, 1, 0); \ int16x4_t __rev1_734; __rev1_734 = __builtin_shufflevector(__s1_734, __s1_734, 3, 2, 1, 0); \ int16x8_t __rev2_734; __rev2_734 = __builtin_shufflevector(__s2_734, __s2_734, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_734 = __noswap_vqdmlsl_s16(__rev0_734, __rev1_734, __noswap_splat_laneq_s16(__rev2_734, __p3_734)); \ __ret_734 = __builtin_shufflevector(__ret_734, __ret_734, 3, 2, 1, 0); \ __ret_734; \ }) #endif __ai int32_t vqdmulhs_s32(int32_t __p0, int32_t __p1) { int32_t __ret; __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1); return __ret; } __ai int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) { int16_t __ret; __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ #define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x2_t __s1 = __p1; \ __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 2); \ __ret; \ }) #else #define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x2_t __s1 = __p1; \ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x4_t __s1 = __p1; \ __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 1); \ __ret; \ }) #else #define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x4_t __s1 = __p1; \ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ int32x2_t __s1 = __p1; \ __ret = (int32x2_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ __ret; \ }) #else #define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ int32x2_t __s1 = __p1; \ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (int32x2_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ int16x4_t __s1 = __p1; \ __ret = (int16x4_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ __ret; \ }) #else #define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ int16x4_t __s1 = __p1; \ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (int16x4_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmulhs_lane_s32(__p0_735, __p1_735, __p2_735) __extension__ ({ \ int32_t __ret_735; \ int32_t __s0_735 = __p0_735; \ int32x2_t __s1_735 = __p1_735; \ __ret_735 = vqdmulhs_s32(__s0_735, vget_lane_s32(__s1_735, __p2_735)); \ __ret_735; \ }) #else #define vqdmulhs_lane_s32(__p0_736, __p1_736, __p2_736) __extension__ ({ \ int32_t __ret_736; \ int32_t __s0_736 = __p0_736; \ int32x2_t __s1_736 = __p1_736; \ int32x2_t __rev1_736; __rev1_736 = __builtin_shufflevector(__s1_736, __s1_736, 1, 0); \ __ret_736 = vqdmulhs_s32(__s0_736, __noswap_vget_lane_s32(__rev1_736, __p2_736)); \ __ret_736; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmulhh_lane_s16(__p0_737, __p1_737, __p2_737) __extension__ ({ \ int16_t __ret_737; \ int16_t __s0_737 = __p0_737; \ int16x4_t __s1_737 = __p1_737; \ __ret_737 = vqdmulhh_s16(__s0_737, vget_lane_s16(__s1_737, __p2_737)); \ __ret_737; \ }) #else #define vqdmulhh_lane_s16(__p0_738, __p1_738, __p2_738) __extension__ ({ \ int16_t __ret_738; \ int16_t __s0_738 = __p0_738; \ int16x4_t __s1_738 = __p1_738; \ int16x4_t __rev1_738; __rev1_738 = __builtin_shufflevector(__s1_738, __s1_738, 3, 2, 1, 0); \ __ret_738 = vqdmulhh_s16(__s0_738, __noswap_vget_lane_s16(__rev1_738, __p2_738)); \ __ret_738; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmulhs_laneq_s32(__p0_739, __p1_739, __p2_739) __extension__ ({ \ int32_t __ret_739; \ int32_t __s0_739 = __p0_739; \ int32x4_t __s1_739 = __p1_739; \ __ret_739 = vqdmulhs_s32(__s0_739, vgetq_lane_s32(__s1_739, __p2_739)); \ __ret_739; \ }) #else #define vqdmulhs_laneq_s32(__p0_740, __p1_740, __p2_740) __extension__ ({ \ int32_t __ret_740; \ int32_t __s0_740 = __p0_740; \ int32x4_t __s1_740 = __p1_740; \ int32x4_t __rev1_740; __rev1_740 = __builtin_shufflevector(__s1_740, __s1_740, 3, 2, 1, 0); \ __ret_740 = vqdmulhs_s32(__s0_740, __noswap_vgetq_lane_s32(__rev1_740, __p2_740)); \ __ret_740; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmulhh_laneq_s16(__p0_741, __p1_741, __p2_741) __extension__ ({ \ int16_t __ret_741; \ int16_t __s0_741 = __p0_741; \ int16x8_t __s1_741 = __p1_741; \ __ret_741 = vqdmulhh_s16(__s0_741, vgetq_lane_s16(__s1_741, __p2_741)); \ __ret_741; \ }) #else #define vqdmulhh_laneq_s16(__p0_742, __p1_742, __p2_742) __extension__ ({ \ int16_t __ret_742; \ int16_t __s0_742 = __p0_742; \ int16x8_t __s1_742 = __p1_742; \ int16x8_t __rev1_742; __rev1_742 = __builtin_shufflevector(__s1_742, __s1_742, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_742 = vqdmulhh_s16(__s0_742, __noswap_vgetq_lane_s16(__rev1_742, __p2_742)); \ __ret_742; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __s1 = __p1; \ __ret = (int32x4_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ __ret; \ }) #else #define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __s1 = __p1; \ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (int32x4_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __s1 = __p1; \ __ret = (int16x8_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ __ret; \ }) #else #define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __s1 = __p1; \ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int16x8_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ int32x4_t __s1 = __p1; \ __ret = (int32x2_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 2); \ __ret; \ }) #else #define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ int32x4_t __s1 = __p1; \ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (int32x2_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ int16x8_t __s1 = __p1; \ __ret = (int16x4_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 1); \ __ret; \ }) #else #define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ int16x8_t __s1 = __p1; \ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int16x4_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 1); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif __ai int64_t vqdmulls_s32(int32_t __p0, int32_t __p1) { int64_t __ret; __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1); return __ret; } __ai int32_t vqdmullh_s16(int16_t __p0, int16_t __p1) { int32_t __ret; __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) { int64x2_t __ret; __ret = vqdmull_s32(vget_high_s32(__p0), vget_high_s32(__p1)); return __ret; } #else __ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) { int64x2_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) { int32x4_t __ret; __ret = vqdmull_s16(vget_high_s16(__p0), vget_high_s16(__p1)); return __ret; } #else __ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) { int32x4_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vqdmull_high_lane_s32(__p0_743, __p1_743, __p2_743) __extension__ ({ \ int64x2_t __ret_743; \ int32x4_t __s0_743 = __p0_743; \ int32x2_t __s1_743 = __p1_743; \ __ret_743 = vqdmull_s32(vget_high_s32(__s0_743), splat_lane_s32(__s1_743, __p2_743)); \ __ret_743; \ }) #else #define vqdmull_high_lane_s32(__p0_744, __p1_744, __p2_744) __extension__ ({ \ int64x2_t __ret_744; \ int32x4_t __s0_744 = __p0_744; \ int32x2_t __s1_744 = __p1_744; \ int32x4_t __rev0_744; __rev0_744 = __builtin_shufflevector(__s0_744, __s0_744, 3, 2, 1, 0); \ int32x2_t __rev1_744; __rev1_744 = __builtin_shufflevector(__s1_744, __s1_744, 1, 0); \ __ret_744 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_744), __noswap_splat_lane_s32(__rev1_744, __p2_744)); \ __ret_744 = __builtin_shufflevector(__ret_744, __ret_744, 1, 0); \ __ret_744; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmull_high_lane_s16(__p0_745, __p1_745, __p2_745) __extension__ ({ \ int32x4_t __ret_745; \ int16x8_t __s0_745 = __p0_745; \ int16x4_t __s1_745 = __p1_745; \ __ret_745 = vqdmull_s16(vget_high_s16(__s0_745), splat_lane_s16(__s1_745, __p2_745)); \ __ret_745; \ }) #else #define vqdmull_high_lane_s16(__p0_746, __p1_746, __p2_746) __extension__ ({ \ int32x4_t __ret_746; \ int16x8_t __s0_746 = __p0_746; \ int16x4_t __s1_746 = __p1_746; \ int16x8_t __rev0_746; __rev0_746 = __builtin_shufflevector(__s0_746, __s0_746, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x4_t __rev1_746; __rev1_746 = __builtin_shufflevector(__s1_746, __s1_746, 3, 2, 1, 0); \ __ret_746 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_746), __noswap_splat_lane_s16(__rev1_746, __p2_746)); \ __ret_746 = __builtin_shufflevector(__ret_746, __ret_746, 3, 2, 1, 0); \ __ret_746; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmull_high_laneq_s32(__p0_747, __p1_747, __p2_747) __extension__ ({ \ int64x2_t __ret_747; \ int32x4_t __s0_747 = __p0_747; \ int32x4_t __s1_747 = __p1_747; \ __ret_747 = vqdmull_s32(vget_high_s32(__s0_747), splat_laneq_s32(__s1_747, __p2_747)); \ __ret_747; \ }) #else #define vqdmull_high_laneq_s32(__p0_748, __p1_748, __p2_748) __extension__ ({ \ int64x2_t __ret_748; \ int32x4_t __s0_748 = __p0_748; \ int32x4_t __s1_748 = __p1_748; \ int32x4_t __rev0_748; __rev0_748 = __builtin_shufflevector(__s0_748, __s0_748, 3, 2, 1, 0); \ int32x4_t __rev1_748; __rev1_748 = __builtin_shufflevector(__s1_748, __s1_748, 3, 2, 1, 0); \ __ret_748 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_748), __noswap_splat_laneq_s32(__rev1_748, __p2_748)); \ __ret_748 = __builtin_shufflevector(__ret_748, __ret_748, 1, 0); \ __ret_748; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmull_high_laneq_s16(__p0_749, __p1_749, __p2_749) __extension__ ({ \ int32x4_t __ret_749; \ int16x8_t __s0_749 = __p0_749; \ int16x8_t __s1_749 = __p1_749; \ __ret_749 = vqdmull_s16(vget_high_s16(__s0_749), splat_laneq_s16(__s1_749, __p2_749)); \ __ret_749; \ }) #else #define vqdmull_high_laneq_s16(__p0_750, __p1_750, __p2_750) __extension__ ({ \ int32x4_t __ret_750; \ int16x8_t __s0_750 = __p0_750; \ int16x8_t __s1_750 = __p1_750; \ int16x8_t __rev0_750; __rev0_750 = __builtin_shufflevector(__s0_750, __s0_750, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev1_750; __rev1_750 = __builtin_shufflevector(__s1_750, __s1_750, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_750 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_750), __noswap_splat_laneq_s16(__rev1_750, __p2_750)); \ __ret_750 = __builtin_shufflevector(__ret_750, __ret_750, 3, 2, 1, 0); \ __ret_750; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) { int64x2_t __ret; __ret = vqdmull_n_s32(vget_high_s32(__p0), __p1); return __ret; } #else __ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) { int64x2_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __noswap_vqdmull_n_s32(__noswap_vget_high_s32(__rev0), __p1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) { int32x4_t __ret; __ret = vqdmull_n_s16(vget_high_s16(__p0), __p1); return __ret; } #else __ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) { int32x4_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vqdmull_n_s16(__noswap_vget_high_s16(__rev0), __p1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vqdmulls_lane_s32(__p0_751, __p1_751, __p2_751) __extension__ ({ \ int64_t __ret_751; \ int32_t __s0_751 = __p0_751; \ int32x2_t __s1_751 = __p1_751; \ __ret_751 = vqdmulls_s32(__s0_751, vget_lane_s32(__s1_751, __p2_751)); \ __ret_751; \ }) #else #define vqdmulls_lane_s32(__p0_752, __p1_752, __p2_752) __extension__ ({ \ int64_t __ret_752; \ int32_t __s0_752 = __p0_752; \ int32x2_t __s1_752 = __p1_752; \ int32x2_t __rev1_752; __rev1_752 = __builtin_shufflevector(__s1_752, __s1_752, 1, 0); \ __ret_752 = vqdmulls_s32(__s0_752, __noswap_vget_lane_s32(__rev1_752, __p2_752)); \ __ret_752; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmullh_lane_s16(__p0_753, __p1_753, __p2_753) __extension__ ({ \ int32_t __ret_753; \ int16_t __s0_753 = __p0_753; \ int16x4_t __s1_753 = __p1_753; \ __ret_753 = vqdmullh_s16(__s0_753, vget_lane_s16(__s1_753, __p2_753)); \ __ret_753; \ }) #else #define vqdmullh_lane_s16(__p0_754, __p1_754, __p2_754) __extension__ ({ \ int32_t __ret_754; \ int16_t __s0_754 = __p0_754; \ int16x4_t __s1_754 = __p1_754; \ int16x4_t __rev1_754; __rev1_754 = __builtin_shufflevector(__s1_754, __s1_754, 3, 2, 1, 0); \ __ret_754 = vqdmullh_s16(__s0_754, __noswap_vget_lane_s16(__rev1_754, __p2_754)); \ __ret_754; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmulls_laneq_s32(__p0_755, __p1_755, __p2_755) __extension__ ({ \ int64_t __ret_755; \ int32_t __s0_755 = __p0_755; \ int32x4_t __s1_755 = __p1_755; \ __ret_755 = vqdmulls_s32(__s0_755, vgetq_lane_s32(__s1_755, __p2_755)); \ __ret_755; \ }) #else #define vqdmulls_laneq_s32(__p0_756, __p1_756, __p2_756) __extension__ ({ \ int64_t __ret_756; \ int32_t __s0_756 = __p0_756; \ int32x4_t __s1_756 = __p1_756; \ int32x4_t __rev1_756; __rev1_756 = __builtin_shufflevector(__s1_756, __s1_756, 3, 2, 1, 0); \ __ret_756 = vqdmulls_s32(__s0_756, __noswap_vgetq_lane_s32(__rev1_756, __p2_756)); \ __ret_756; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmullh_laneq_s16(__p0_757, __p1_757, __p2_757) __extension__ ({ \ int32_t __ret_757; \ int16_t __s0_757 = __p0_757; \ int16x8_t __s1_757 = __p1_757; \ __ret_757 = vqdmullh_s16(__s0_757, vgetq_lane_s16(__s1_757, __p2_757)); \ __ret_757; \ }) #else #define vqdmullh_laneq_s16(__p0_758, __p1_758, __p2_758) __extension__ ({ \ int32_t __ret_758; \ int16_t __s0_758 = __p0_758; \ int16x8_t __s1_758 = __p1_758; \ int16x8_t __rev1_758; __rev1_758 = __builtin_shufflevector(__s1_758, __s1_758, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_758 = vqdmullh_s16(__s0_758, __noswap_vgetq_lane_s16(__rev1_758, __p2_758)); \ __ret_758; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmull_laneq_s32(__p0_759, __p1_759, __p2_759) __extension__ ({ \ int64x2_t __ret_759; \ int32x2_t __s0_759 = __p0_759; \ int32x4_t __s1_759 = __p1_759; \ __ret_759 = vqdmull_s32(__s0_759, splat_laneq_s32(__s1_759, __p2_759)); \ __ret_759; \ }) #else #define vqdmull_laneq_s32(__p0_760, __p1_760, __p2_760) __extension__ ({ \ int64x2_t __ret_760; \ int32x2_t __s0_760 = __p0_760; \ int32x4_t __s1_760 = __p1_760; \ int32x2_t __rev0_760; __rev0_760 = __builtin_shufflevector(__s0_760, __s0_760, 1, 0); \ int32x4_t __rev1_760; __rev1_760 = __builtin_shufflevector(__s1_760, __s1_760, 3, 2, 1, 0); \ __ret_760 = __noswap_vqdmull_s32(__rev0_760, __noswap_splat_laneq_s32(__rev1_760, __p2_760)); \ __ret_760 = __builtin_shufflevector(__ret_760, __ret_760, 1, 0); \ __ret_760; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqdmull_laneq_s16(__p0_761, __p1_761, __p2_761) __extension__ ({ \ int32x4_t __ret_761; \ int16x4_t __s0_761 = __p0_761; \ int16x8_t __s1_761 = __p1_761; \ __ret_761 = vqdmull_s16(__s0_761, splat_laneq_s16(__s1_761, __p2_761)); \ __ret_761; \ }) #else #define vqdmull_laneq_s16(__p0_762, __p1_762, __p2_762) __extension__ ({ \ int32x4_t __ret_762; \ int16x4_t __s0_762 = __p0_762; \ int16x8_t __s1_762 = __p1_762; \ int16x4_t __rev0_762; __rev0_762 = __builtin_shufflevector(__s0_762, __s0_762, 3, 2, 1, 0); \ int16x8_t __rev1_762; __rev1_762 = __builtin_shufflevector(__s1_762, __s1_762, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_762 = __noswap_vqdmull_s16(__rev0_762, __noswap_splat_laneq_s16(__rev1_762, __p2_762)); \ __ret_762 = __builtin_shufflevector(__ret_762, __ret_762, 3, 2, 1, 0); \ __ret_762; \ }) #endif __ai int16_t vqmovns_s32(int32_t __p0) { int16_t __ret; __ret = (int16_t) __builtin_neon_vqmovns_s32(__p0); return __ret; } __ai int32_t vqmovnd_s64(int64_t __p0) { int32_t __ret; __ret = (int32_t) __builtin_neon_vqmovnd_s64(__p0); return __ret; } __ai int8_t vqmovnh_s16(int16_t __p0) { int8_t __ret; __ret = (int8_t) __builtin_neon_vqmovnh_s16(__p0); return __ret; } __ai uint16_t vqmovns_u32(uint32_t __p0) { uint16_t __ret; __ret = (uint16_t) __builtin_neon_vqmovns_u32(__p0); return __ret; } __ai uint32_t vqmovnd_u64(uint64_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vqmovnd_u64(__p0); return __ret; } __ai uint8_t vqmovnh_u16(uint16_t __p0) { uint8_t __ret; __ret = (uint8_t) __builtin_neon_vqmovnh_u16(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { uint16x8_t __ret; __ret = vcombine_u16(__p0, vqmovn_u32(__p1)); return __ret; } #else __ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { uint16x8_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __noswap_vcombine_u16(__rev0, __noswap_vqmovn_u32(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { uint32x4_t __ret; __ret = vcombine_u32(__p0, vqmovn_u64(__p1)); return __ret; } #else __ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { uint32x4_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __noswap_vcombine_u32(__rev0, __noswap_vqmovn_u64(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { uint8x16_t __ret; __ret = vcombine_u8(__p0, vqmovn_u16(__p1)); return __ret; } #else __ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { uint8x16_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vcombine_u8(__rev0, __noswap_vqmovn_u16(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { int16x8_t __ret; __ret = vcombine_s16(__p0, vqmovn_s32(__p1)); return __ret; } #else __ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { int16x8_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __noswap_vcombine_s16(__rev0, __noswap_vqmovn_s32(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { int32x4_t __ret; __ret = vcombine_s32(__p0, vqmovn_s64(__p1)); return __ret; } #else __ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { int32x4_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __noswap_vcombine_s32(__rev0, __noswap_vqmovn_s64(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { int8x16_t __ret; __ret = vcombine_s8(__p0, vqmovn_s16(__p1)); return __ret; } #else __ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { int8x16_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vcombine_s8(__rev0, __noswap_vqmovn_s16(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif __ai uint16_t vqmovuns_s32(int32_t __p0) { uint16_t __ret; __ret = (uint16_t) __builtin_neon_vqmovuns_s32(__p0); return __ret; } __ai uint32_t vqmovund_s64(int64_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vqmovund_s64(__p0); return __ret; } __ai uint8_t vqmovunh_s16(int16_t __p0) { uint8_t __ret; __ret = (uint8_t) __builtin_neon_vqmovunh_s16(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) { uint16x8_t __ret; __ret = vcombine_u16((uint16x4_t)(__p0), vqmovun_s32(__p1)); return __ret; } #else __ai uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) { uint16x8_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __noswap_vcombine_u16((uint16x4_t)(__rev0), __noswap_vqmovun_s32(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) { uint32x4_t __ret; __ret = vcombine_u32((uint32x2_t)(__p0), vqmovun_s64(__p1)); return __ret; } #else __ai uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) { uint32x4_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __noswap_vcombine_u32((uint32x2_t)(__rev0), __noswap_vqmovun_s64(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) { uint8x16_t __ret; __ret = vcombine_u8((uint8x8_t)(__p0), vqmovun_s16(__p1)); return __ret; } #else __ai uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) { uint8x16_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vcombine_u8((uint8x8_t)(__rev0), __noswap_vqmovun_s16(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vqnegq_s64(int64x2_t __p0) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 35); return __ret; } #else __ai int64x2_t vqnegq_s64(int64x2_t __p0) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai int64x1_t vqneg_s64(int64x1_t __p0) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 3); return __ret; } __ai int8_t vqnegb_s8(int8_t __p0) { int8_t __ret; __ret = (int8_t) __builtin_neon_vqnegb_s8(__p0); return __ret; } __ai int32_t vqnegs_s32(int32_t __p0) { int32_t __ret; __ret = (int32_t) __builtin_neon_vqnegs_s32(__p0); return __ret; } __ai int64_t vqnegd_s64(int64_t __p0) { int64_t __ret; __ret = (int64_t) __builtin_neon_vqnegd_s64(__p0); return __ret; } __ai int16_t vqnegh_s16(int16_t __p0) { int16_t __ret; __ret = (int16_t) __builtin_neon_vqnegh_s16(__p0); return __ret; } __ai int32_t vqrdmulhs_s32(int32_t __p0, int32_t __p1) { int32_t __ret; __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1); return __ret; } __ai int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) { int16_t __ret; __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ #define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x2_t __s1 = __p1; \ __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 2); \ __ret; \ }) #else #define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x2_t __s1 = __p1; \ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x4_t __s1 = __p1; \ __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 1); \ __ret; \ }) #else #define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x4_t __s1 = __p1; \ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ int32x2_t __s1 = __p1; \ __ret = (int32x2_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ __ret; \ }) #else #define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ int32x2_t __s1 = __p1; \ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (int32x2_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ int16x4_t __s1 = __p1; \ __ret = (int16x4_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ __ret; \ }) #else #define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ int16x4_t __s1 = __p1; \ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (int16x4_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmulhs_lane_s32(__p0_763, __p1_763, __p2_763) __extension__ ({ \ int32_t __ret_763; \ int32_t __s0_763 = __p0_763; \ int32x2_t __s1_763 = __p1_763; \ __ret_763 = vqrdmulhs_s32(__s0_763, vget_lane_s32(__s1_763, __p2_763)); \ __ret_763; \ }) #else #define vqrdmulhs_lane_s32(__p0_764, __p1_764, __p2_764) __extension__ ({ \ int32_t __ret_764; \ int32_t __s0_764 = __p0_764; \ int32x2_t __s1_764 = __p1_764; \ int32x2_t __rev1_764; __rev1_764 = __builtin_shufflevector(__s1_764, __s1_764, 1, 0); \ __ret_764 = vqrdmulhs_s32(__s0_764, __noswap_vget_lane_s32(__rev1_764, __p2_764)); \ __ret_764; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmulhh_lane_s16(__p0_765, __p1_765, __p2_765) __extension__ ({ \ int16_t __ret_765; \ int16_t __s0_765 = __p0_765; \ int16x4_t __s1_765 = __p1_765; \ __ret_765 = vqrdmulhh_s16(__s0_765, vget_lane_s16(__s1_765, __p2_765)); \ __ret_765; \ }) #else #define vqrdmulhh_lane_s16(__p0_766, __p1_766, __p2_766) __extension__ ({ \ int16_t __ret_766; \ int16_t __s0_766 = __p0_766; \ int16x4_t __s1_766 = __p1_766; \ int16x4_t __rev1_766; __rev1_766 = __builtin_shufflevector(__s1_766, __s1_766, 3, 2, 1, 0); \ __ret_766 = vqrdmulhh_s16(__s0_766, __noswap_vget_lane_s16(__rev1_766, __p2_766)); \ __ret_766; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmulhs_laneq_s32(__p0_767, __p1_767, __p2_767) __extension__ ({ \ int32_t __ret_767; \ int32_t __s0_767 = __p0_767; \ int32x4_t __s1_767 = __p1_767; \ __ret_767 = vqrdmulhs_s32(__s0_767, vgetq_lane_s32(__s1_767, __p2_767)); \ __ret_767; \ }) #else #define vqrdmulhs_laneq_s32(__p0_768, __p1_768, __p2_768) __extension__ ({ \ int32_t __ret_768; \ int32_t __s0_768 = __p0_768; \ int32x4_t __s1_768 = __p1_768; \ int32x4_t __rev1_768; __rev1_768 = __builtin_shufflevector(__s1_768, __s1_768, 3, 2, 1, 0); \ __ret_768 = vqrdmulhs_s32(__s0_768, __noswap_vgetq_lane_s32(__rev1_768, __p2_768)); \ __ret_768; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmulhh_laneq_s16(__p0_769, __p1_769, __p2_769) __extension__ ({ \ int16_t __ret_769; \ int16_t __s0_769 = __p0_769; \ int16x8_t __s1_769 = __p1_769; \ __ret_769 = vqrdmulhh_s16(__s0_769, vgetq_lane_s16(__s1_769, __p2_769)); \ __ret_769; \ }) #else #define vqrdmulhh_laneq_s16(__p0_770, __p1_770, __p2_770) __extension__ ({ \ int16_t __ret_770; \ int16_t __s0_770 = __p0_770; \ int16x8_t __s1_770 = __p1_770; \ int16x8_t __rev1_770; __rev1_770 = __builtin_shufflevector(__s1_770, __s1_770, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_770 = vqrdmulhh_s16(__s0_770, __noswap_vgetq_lane_s16(__rev1_770, __p2_770)); \ __ret_770; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __s1 = __p1; \ __ret = (int32x4_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ __ret; \ }) #else #define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __s1 = __p1; \ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (int32x4_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __s1 = __p1; \ __ret = (int16x8_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ __ret; \ }) #else #define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __s1 = __p1; \ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int16x8_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ int32x4_t __s1 = __p1; \ __ret = (int32x2_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 2); \ __ret; \ }) #else #define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ int32x4_t __s1 = __p1; \ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ __ret = (int32x2_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ int16x8_t __s1 = __p1; \ __ret = (int16x4_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 1); \ __ret; \ }) #else #define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ int16x8_t __s1 = __p1; \ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret = (int16x4_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 1); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif __ai uint8_t vqrshlb_u8(uint8_t __p0, int8_t __p1) { uint8_t __ret; __ret = (uint8_t) __builtin_neon_vqrshlb_u8(__p0, __p1); return __ret; } __ai uint32_t vqrshls_u32(uint32_t __p0, int32_t __p1) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vqrshls_u32(__p0, __p1); return __ret; } __ai uint64_t vqrshld_u64(uint64_t __p0, int64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vqrshld_u64(__p0, __p1); return __ret; } __ai uint16_t vqrshlh_u16(uint16_t __p0, int16_t __p1) { uint16_t __ret; __ret = (uint16_t) __builtin_neon_vqrshlh_u16(__p0, __p1); return __ret; } __ai int8_t vqrshlb_s8(int8_t __p0, int8_t __p1) { int8_t __ret; __ret = (int8_t) __builtin_neon_vqrshlb_s8(__p0, __p1); return __ret; } __ai int32_t vqrshls_s32(int32_t __p0, int32_t __p1) { int32_t __ret; __ret = (int32_t) __builtin_neon_vqrshls_s32(__p0, __p1); return __ret; } __ai int64_t vqrshld_s64(int64_t __p0, int64_t __p1) { int64_t __ret; __ret = (int64_t) __builtin_neon_vqrshld_s64(__p0, __p1); return __ret; } __ai int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) { int16_t __ret; __ret = (int16_t) __builtin_neon_vqrshlh_s16(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ #define vqrshrn_high_n_u32(__p0_771, __p1_771, __p2_771) __extension__ ({ \ uint16x8_t __ret_771; \ uint16x4_t __s0_771 = __p0_771; \ uint32x4_t __s1_771 = __p1_771; \ __ret_771 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_771), (uint16x4_t)(vqrshrn_n_u32(__s1_771, __p2_771)))); \ __ret_771; \ }) #else #define vqrshrn_high_n_u32(__p0_772, __p1_772, __p2_772) __extension__ ({ \ uint16x8_t __ret_772; \ uint16x4_t __s0_772 = __p0_772; \ uint32x4_t __s1_772 = __p1_772; \ uint16x4_t __rev0_772; __rev0_772 = __builtin_shufflevector(__s0_772, __s0_772, 3, 2, 1, 0); \ uint32x4_t __rev1_772; __rev1_772 = __builtin_shufflevector(__s1_772, __s1_772, 3, 2, 1, 0); \ __ret_772 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_772), (uint16x4_t)(__noswap_vqrshrn_n_u32(__rev1_772, __p2_772)))); \ __ret_772 = __builtin_shufflevector(__ret_772, __ret_772, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_772; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrshrn_high_n_u64(__p0_773, __p1_773, __p2_773) __extension__ ({ \ uint32x4_t __ret_773; \ uint32x2_t __s0_773 = __p0_773; \ uint64x2_t __s1_773 = __p1_773; \ __ret_773 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_773), (uint32x2_t)(vqrshrn_n_u64(__s1_773, __p2_773)))); \ __ret_773; \ }) #else #define vqrshrn_high_n_u64(__p0_774, __p1_774, __p2_774) __extension__ ({ \ uint32x4_t __ret_774; \ uint32x2_t __s0_774 = __p0_774; \ uint64x2_t __s1_774 = __p1_774; \ uint32x2_t __rev0_774; __rev0_774 = __builtin_shufflevector(__s0_774, __s0_774, 1, 0); \ uint64x2_t __rev1_774; __rev1_774 = __builtin_shufflevector(__s1_774, __s1_774, 1, 0); \ __ret_774 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_774), (uint32x2_t)(__noswap_vqrshrn_n_u64(__rev1_774, __p2_774)))); \ __ret_774 = __builtin_shufflevector(__ret_774, __ret_774, 3, 2, 1, 0); \ __ret_774; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrshrn_high_n_u16(__p0_775, __p1_775, __p2_775) __extension__ ({ \ uint8x16_t __ret_775; \ uint8x8_t __s0_775 = __p0_775; \ uint16x8_t __s1_775 = __p1_775; \ __ret_775 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_775), (uint8x8_t)(vqrshrn_n_u16(__s1_775, __p2_775)))); \ __ret_775; \ }) #else #define vqrshrn_high_n_u16(__p0_776, __p1_776, __p2_776) __extension__ ({ \ uint8x16_t __ret_776; \ uint8x8_t __s0_776 = __p0_776; \ uint16x8_t __s1_776 = __p1_776; \ uint8x8_t __rev0_776; __rev0_776 = __builtin_shufflevector(__s0_776, __s0_776, 7, 6, 5, 4, 3, 2, 1, 0); \ uint16x8_t __rev1_776; __rev1_776 = __builtin_shufflevector(__s1_776, __s1_776, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_776 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_776), (uint8x8_t)(__noswap_vqrshrn_n_u16(__rev1_776, __p2_776)))); \ __ret_776 = __builtin_shufflevector(__ret_776, __ret_776, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_776; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrshrn_high_n_s32(__p0_777, __p1_777, __p2_777) __extension__ ({ \ int16x8_t __ret_777; \ int16x4_t __s0_777 = __p0_777; \ int32x4_t __s1_777 = __p1_777; \ __ret_777 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_777), (int16x4_t)(vqrshrn_n_s32(__s1_777, __p2_777)))); \ __ret_777; \ }) #else #define vqrshrn_high_n_s32(__p0_778, __p1_778, __p2_778) __extension__ ({ \ int16x8_t __ret_778; \ int16x4_t __s0_778 = __p0_778; \ int32x4_t __s1_778 = __p1_778; \ int16x4_t __rev0_778; __rev0_778 = __builtin_shufflevector(__s0_778, __s0_778, 3, 2, 1, 0); \ int32x4_t __rev1_778; __rev1_778 = __builtin_shufflevector(__s1_778, __s1_778, 3, 2, 1, 0); \ __ret_778 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_778), (int16x4_t)(__noswap_vqrshrn_n_s32(__rev1_778, __p2_778)))); \ __ret_778 = __builtin_shufflevector(__ret_778, __ret_778, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_778; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrshrn_high_n_s64(__p0_779, __p1_779, __p2_779) __extension__ ({ \ int32x4_t __ret_779; \ int32x2_t __s0_779 = __p0_779; \ int64x2_t __s1_779 = __p1_779; \ __ret_779 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_779), (int32x2_t)(vqrshrn_n_s64(__s1_779, __p2_779)))); \ __ret_779; \ }) #else #define vqrshrn_high_n_s64(__p0_780, __p1_780, __p2_780) __extension__ ({ \ int32x4_t __ret_780; \ int32x2_t __s0_780 = __p0_780; \ int64x2_t __s1_780 = __p1_780; \ int32x2_t __rev0_780; __rev0_780 = __builtin_shufflevector(__s0_780, __s0_780, 1, 0); \ int64x2_t __rev1_780; __rev1_780 = __builtin_shufflevector(__s1_780, __s1_780, 1, 0); \ __ret_780 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_780), (int32x2_t)(__noswap_vqrshrn_n_s64(__rev1_780, __p2_780)))); \ __ret_780 = __builtin_shufflevector(__ret_780, __ret_780, 3, 2, 1, 0); \ __ret_780; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrshrn_high_n_s16(__p0_781, __p1_781, __p2_781) __extension__ ({ \ int8x16_t __ret_781; \ int8x8_t __s0_781 = __p0_781; \ int16x8_t __s1_781 = __p1_781; \ __ret_781 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_781), (int8x8_t)(vqrshrn_n_s16(__s1_781, __p2_781)))); \ __ret_781; \ }) #else #define vqrshrn_high_n_s16(__p0_782, __p1_782, __p2_782) __extension__ ({ \ int8x16_t __ret_782; \ int8x8_t __s0_782 = __p0_782; \ int16x8_t __s1_782 = __p1_782; \ int8x8_t __rev0_782; __rev0_782 = __builtin_shufflevector(__s0_782, __s0_782, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev1_782; __rev1_782 = __builtin_shufflevector(__s1_782, __s1_782, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_782 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_782), (int8x8_t)(__noswap_vqrshrn_n_s16(__rev1_782, __p2_782)))); \ __ret_782 = __builtin_shufflevector(__ret_782, __ret_782, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_782; \ }) #endif #define vqrshrns_n_u32(__p0, __p1) __extension__ ({ \ uint16_t __ret; \ uint32_t __s0 = __p0; \ __ret = (uint16_t) __builtin_neon_vqrshrns_n_u32(__s0, __p1); \ __ret; \ }) #define vqrshrnd_n_u64(__p0, __p1) __extension__ ({ \ uint32_t __ret; \ uint64_t __s0 = __p0; \ __ret = (uint32_t) __builtin_neon_vqrshrnd_n_u64(__s0, __p1); \ __ret; \ }) #define vqrshrnh_n_u16(__p0, __p1) __extension__ ({ \ uint8_t __ret; \ uint16_t __s0 = __p0; \ __ret = (uint8_t) __builtin_neon_vqrshrnh_n_u16(__s0, __p1); \ __ret; \ }) #define vqrshrns_n_s32(__p0, __p1) __extension__ ({ \ int16_t __ret; \ int32_t __s0 = __p0; \ __ret = (int16_t) __builtin_neon_vqrshrns_n_s32(__s0, __p1); \ __ret; \ }) #define vqrshrnd_n_s64(__p0, __p1) __extension__ ({ \ int32_t __ret; \ int64_t __s0 = __p0; \ __ret = (int32_t) __builtin_neon_vqrshrnd_n_s64(__s0, __p1); \ __ret; \ }) #define vqrshrnh_n_s16(__p0, __p1) __extension__ ({ \ int8_t __ret; \ int16_t __s0 = __p0; \ __ret = (int8_t) __builtin_neon_vqrshrnh_n_s16(__s0, __p1); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vqrshrun_high_n_s32(__p0_783, __p1_783, __p2_783) __extension__ ({ \ int16x8_t __ret_783; \ int16x4_t __s0_783 = __p0_783; \ int32x4_t __s1_783 = __p1_783; \ __ret_783 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_783), (int16x4_t)(vqrshrun_n_s32(__s1_783, __p2_783)))); \ __ret_783; \ }) #else #define vqrshrun_high_n_s32(__p0_784, __p1_784, __p2_784) __extension__ ({ \ int16x8_t __ret_784; \ int16x4_t __s0_784 = __p0_784; \ int32x4_t __s1_784 = __p1_784; \ int16x4_t __rev0_784; __rev0_784 = __builtin_shufflevector(__s0_784, __s0_784, 3, 2, 1, 0); \ int32x4_t __rev1_784; __rev1_784 = __builtin_shufflevector(__s1_784, __s1_784, 3, 2, 1, 0); \ __ret_784 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_784), (int16x4_t)(__noswap_vqrshrun_n_s32(__rev1_784, __p2_784)))); \ __ret_784 = __builtin_shufflevector(__ret_784, __ret_784, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_784; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrshrun_high_n_s64(__p0_785, __p1_785, __p2_785) __extension__ ({ \ int32x4_t __ret_785; \ int32x2_t __s0_785 = __p0_785; \ int64x2_t __s1_785 = __p1_785; \ __ret_785 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_785), (int32x2_t)(vqrshrun_n_s64(__s1_785, __p2_785)))); \ __ret_785; \ }) #else #define vqrshrun_high_n_s64(__p0_786, __p1_786, __p2_786) __extension__ ({ \ int32x4_t __ret_786; \ int32x2_t __s0_786 = __p0_786; \ int64x2_t __s1_786 = __p1_786; \ int32x2_t __rev0_786; __rev0_786 = __builtin_shufflevector(__s0_786, __s0_786, 1, 0); \ int64x2_t __rev1_786; __rev1_786 = __builtin_shufflevector(__s1_786, __s1_786, 1, 0); \ __ret_786 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_786), (int32x2_t)(__noswap_vqrshrun_n_s64(__rev1_786, __p2_786)))); \ __ret_786 = __builtin_shufflevector(__ret_786, __ret_786, 3, 2, 1, 0); \ __ret_786; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqrshrun_high_n_s16(__p0_787, __p1_787, __p2_787) __extension__ ({ \ int8x16_t __ret_787; \ int8x8_t __s0_787 = __p0_787; \ int16x8_t __s1_787 = __p1_787; \ __ret_787 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_787), (int8x8_t)(vqrshrun_n_s16(__s1_787, __p2_787)))); \ __ret_787; \ }) #else #define vqrshrun_high_n_s16(__p0_788, __p1_788, __p2_788) __extension__ ({ \ int8x16_t __ret_788; \ int8x8_t __s0_788 = __p0_788; \ int16x8_t __s1_788 = __p1_788; \ int8x8_t __rev0_788; __rev0_788 = __builtin_shufflevector(__s0_788, __s0_788, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev1_788; __rev1_788 = __builtin_shufflevector(__s1_788, __s1_788, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_788 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_788), (int8x8_t)(__noswap_vqrshrun_n_s16(__rev1_788, __p2_788)))); \ __ret_788 = __builtin_shufflevector(__ret_788, __ret_788, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_788; \ }) #endif #define vqrshruns_n_s32(__p0, __p1) __extension__ ({ \ int16_t __ret; \ int32_t __s0 = __p0; \ __ret = (int16_t) __builtin_neon_vqrshruns_n_s32(__s0, __p1); \ __ret; \ }) #define vqrshrund_n_s64(__p0, __p1) __extension__ ({ \ int32_t __ret; \ int64_t __s0 = __p0; \ __ret = (int32_t) __builtin_neon_vqrshrund_n_s64(__s0, __p1); \ __ret; \ }) #define vqrshrunh_n_s16(__p0, __p1) __extension__ ({ \ int8_t __ret; \ int16_t __s0 = __p0; \ __ret = (int8_t) __builtin_neon_vqrshrunh_n_s16(__s0, __p1); \ __ret; \ }) __ai uint8_t vqshlb_u8(uint8_t __p0, int8_t __p1) { uint8_t __ret; __ret = (uint8_t) __builtin_neon_vqshlb_u8(__p0, __p1); return __ret; } __ai uint32_t vqshls_u32(uint32_t __p0, int32_t __p1) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vqshls_u32(__p0, __p1); return __ret; } __ai uint64_t vqshld_u64(uint64_t __p0, int64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vqshld_u64(__p0, __p1); return __ret; } __ai uint16_t vqshlh_u16(uint16_t __p0, int16_t __p1) { uint16_t __ret; __ret = (uint16_t) __builtin_neon_vqshlh_u16(__p0, __p1); return __ret; } __ai int8_t vqshlb_s8(int8_t __p0, int8_t __p1) { int8_t __ret; __ret = (int8_t) __builtin_neon_vqshlb_s8(__p0, __p1); return __ret; } __ai int32_t vqshls_s32(int32_t __p0, int32_t __p1) { int32_t __ret; __ret = (int32_t) __builtin_neon_vqshls_s32(__p0, __p1); return __ret; } __ai int64_t vqshld_s64(int64_t __p0, int64_t __p1) { int64_t __ret; __ret = (int64_t) __builtin_neon_vqshld_s64(__p0, __p1); return __ret; } __ai int16_t vqshlh_s16(int16_t __p0, int16_t __p1) { int16_t __ret; __ret = (int16_t) __builtin_neon_vqshlh_s16(__p0, __p1); return __ret; } #define vqshlb_n_u8(__p0, __p1) __extension__ ({ \ uint8_t __ret; \ uint8_t __s0 = __p0; \ __ret = (uint8_t) __builtin_neon_vqshlb_n_u8(__s0, __p1); \ __ret; \ }) #define vqshls_n_u32(__p0, __p1) __extension__ ({ \ uint32_t __ret; \ uint32_t __s0 = __p0; \ __ret = (uint32_t) __builtin_neon_vqshls_n_u32(__s0, __p1); \ __ret; \ }) #define vqshld_n_u64(__p0, __p1) __extension__ ({ \ uint64_t __ret; \ uint64_t __s0 = __p0; \ __ret = (uint64_t) __builtin_neon_vqshld_n_u64(__s0, __p1); \ __ret; \ }) #define vqshlh_n_u16(__p0, __p1) __extension__ ({ \ uint16_t __ret; \ uint16_t __s0 = __p0; \ __ret = (uint16_t) __builtin_neon_vqshlh_n_u16(__s0, __p1); \ __ret; \ }) #define vqshlb_n_s8(__p0, __p1) __extension__ ({ \ int8_t __ret; \ int8_t __s0 = __p0; \ __ret = (int8_t) __builtin_neon_vqshlb_n_s8(__s0, __p1); \ __ret; \ }) #define vqshls_n_s32(__p0, __p1) __extension__ ({ \ int32_t __ret; \ int32_t __s0 = __p0; \ __ret = (int32_t) __builtin_neon_vqshls_n_s32(__s0, __p1); \ __ret; \ }) #define vqshld_n_s64(__p0, __p1) __extension__ ({ \ int64_t __ret; \ int64_t __s0 = __p0; \ __ret = (int64_t) __builtin_neon_vqshld_n_s64(__s0, __p1); \ __ret; \ }) #define vqshlh_n_s16(__p0, __p1) __extension__ ({ \ int16_t __ret; \ int16_t __s0 = __p0; \ __ret = (int16_t) __builtin_neon_vqshlh_n_s16(__s0, __p1); \ __ret; \ }) #define vqshlub_n_s8(__p0, __p1) __extension__ ({ \ int8_t __ret; \ int8_t __s0 = __p0; \ __ret = (int8_t) __builtin_neon_vqshlub_n_s8(__s0, __p1); \ __ret; \ }) #define vqshlus_n_s32(__p0, __p1) __extension__ ({ \ int32_t __ret; \ int32_t __s0 = __p0; \ __ret = (int32_t) __builtin_neon_vqshlus_n_s32(__s0, __p1); \ __ret; \ }) #define vqshlud_n_s64(__p0, __p1) __extension__ ({ \ int64_t __ret; \ int64_t __s0 = __p0; \ __ret = (int64_t) __builtin_neon_vqshlud_n_s64(__s0, __p1); \ __ret; \ }) #define vqshluh_n_s16(__p0, __p1) __extension__ ({ \ int16_t __ret; \ int16_t __s0 = __p0; \ __ret = (int16_t) __builtin_neon_vqshluh_n_s16(__s0, __p1); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vqshrn_high_n_u32(__p0_789, __p1_789, __p2_789) __extension__ ({ \ uint16x8_t __ret_789; \ uint16x4_t __s0_789 = __p0_789; \ uint32x4_t __s1_789 = __p1_789; \ __ret_789 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_789), (uint16x4_t)(vqshrn_n_u32(__s1_789, __p2_789)))); \ __ret_789; \ }) #else #define vqshrn_high_n_u32(__p0_790, __p1_790, __p2_790) __extension__ ({ \ uint16x8_t __ret_790; \ uint16x4_t __s0_790 = __p0_790; \ uint32x4_t __s1_790 = __p1_790; \ uint16x4_t __rev0_790; __rev0_790 = __builtin_shufflevector(__s0_790, __s0_790, 3, 2, 1, 0); \ uint32x4_t __rev1_790; __rev1_790 = __builtin_shufflevector(__s1_790, __s1_790, 3, 2, 1, 0); \ __ret_790 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_790), (uint16x4_t)(__noswap_vqshrn_n_u32(__rev1_790, __p2_790)))); \ __ret_790 = __builtin_shufflevector(__ret_790, __ret_790, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_790; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqshrn_high_n_u64(__p0_791, __p1_791, __p2_791) __extension__ ({ \ uint32x4_t __ret_791; \ uint32x2_t __s0_791 = __p0_791; \ uint64x2_t __s1_791 = __p1_791; \ __ret_791 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_791), (uint32x2_t)(vqshrn_n_u64(__s1_791, __p2_791)))); \ __ret_791; \ }) #else #define vqshrn_high_n_u64(__p0_792, __p1_792, __p2_792) __extension__ ({ \ uint32x4_t __ret_792; \ uint32x2_t __s0_792 = __p0_792; \ uint64x2_t __s1_792 = __p1_792; \ uint32x2_t __rev0_792; __rev0_792 = __builtin_shufflevector(__s0_792, __s0_792, 1, 0); \ uint64x2_t __rev1_792; __rev1_792 = __builtin_shufflevector(__s1_792, __s1_792, 1, 0); \ __ret_792 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_792), (uint32x2_t)(__noswap_vqshrn_n_u64(__rev1_792, __p2_792)))); \ __ret_792 = __builtin_shufflevector(__ret_792, __ret_792, 3, 2, 1, 0); \ __ret_792; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqshrn_high_n_u16(__p0_793, __p1_793, __p2_793) __extension__ ({ \ uint8x16_t __ret_793; \ uint8x8_t __s0_793 = __p0_793; \ uint16x8_t __s1_793 = __p1_793; \ __ret_793 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_793), (uint8x8_t)(vqshrn_n_u16(__s1_793, __p2_793)))); \ __ret_793; \ }) #else #define vqshrn_high_n_u16(__p0_794, __p1_794, __p2_794) __extension__ ({ \ uint8x16_t __ret_794; \ uint8x8_t __s0_794 = __p0_794; \ uint16x8_t __s1_794 = __p1_794; \ uint8x8_t __rev0_794; __rev0_794 = __builtin_shufflevector(__s0_794, __s0_794, 7, 6, 5, 4, 3, 2, 1, 0); \ uint16x8_t __rev1_794; __rev1_794 = __builtin_shufflevector(__s1_794, __s1_794, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_794 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_794), (uint8x8_t)(__noswap_vqshrn_n_u16(__rev1_794, __p2_794)))); \ __ret_794 = __builtin_shufflevector(__ret_794, __ret_794, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_794; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqshrn_high_n_s32(__p0_795, __p1_795, __p2_795) __extension__ ({ \ int16x8_t __ret_795; \ int16x4_t __s0_795 = __p0_795; \ int32x4_t __s1_795 = __p1_795; \ __ret_795 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_795), (int16x4_t)(vqshrn_n_s32(__s1_795, __p2_795)))); \ __ret_795; \ }) #else #define vqshrn_high_n_s32(__p0_796, __p1_796, __p2_796) __extension__ ({ \ int16x8_t __ret_796; \ int16x4_t __s0_796 = __p0_796; \ int32x4_t __s1_796 = __p1_796; \ int16x4_t __rev0_796; __rev0_796 = __builtin_shufflevector(__s0_796, __s0_796, 3, 2, 1, 0); \ int32x4_t __rev1_796; __rev1_796 = __builtin_shufflevector(__s1_796, __s1_796, 3, 2, 1, 0); \ __ret_796 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_796), (int16x4_t)(__noswap_vqshrn_n_s32(__rev1_796, __p2_796)))); \ __ret_796 = __builtin_shufflevector(__ret_796, __ret_796, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_796; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqshrn_high_n_s64(__p0_797, __p1_797, __p2_797) __extension__ ({ \ int32x4_t __ret_797; \ int32x2_t __s0_797 = __p0_797; \ int64x2_t __s1_797 = __p1_797; \ __ret_797 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_797), (int32x2_t)(vqshrn_n_s64(__s1_797, __p2_797)))); \ __ret_797; \ }) #else #define vqshrn_high_n_s64(__p0_798, __p1_798, __p2_798) __extension__ ({ \ int32x4_t __ret_798; \ int32x2_t __s0_798 = __p0_798; \ int64x2_t __s1_798 = __p1_798; \ int32x2_t __rev0_798; __rev0_798 = __builtin_shufflevector(__s0_798, __s0_798, 1, 0); \ int64x2_t __rev1_798; __rev1_798 = __builtin_shufflevector(__s1_798, __s1_798, 1, 0); \ __ret_798 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_798), (int32x2_t)(__noswap_vqshrn_n_s64(__rev1_798, __p2_798)))); \ __ret_798 = __builtin_shufflevector(__ret_798, __ret_798, 3, 2, 1, 0); \ __ret_798; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqshrn_high_n_s16(__p0_799, __p1_799, __p2_799) __extension__ ({ \ int8x16_t __ret_799; \ int8x8_t __s0_799 = __p0_799; \ int16x8_t __s1_799 = __p1_799; \ __ret_799 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_799), (int8x8_t)(vqshrn_n_s16(__s1_799, __p2_799)))); \ __ret_799; \ }) #else #define vqshrn_high_n_s16(__p0_800, __p1_800, __p2_800) __extension__ ({ \ int8x16_t __ret_800; \ int8x8_t __s0_800 = __p0_800; \ int16x8_t __s1_800 = __p1_800; \ int8x8_t __rev0_800; __rev0_800 = __builtin_shufflevector(__s0_800, __s0_800, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev1_800; __rev1_800 = __builtin_shufflevector(__s1_800, __s1_800, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_800 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_800), (int8x8_t)(__noswap_vqshrn_n_s16(__rev1_800, __p2_800)))); \ __ret_800 = __builtin_shufflevector(__ret_800, __ret_800, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_800; \ }) #endif #define vqshrns_n_u32(__p0, __p1) __extension__ ({ \ uint16_t __ret; \ uint32_t __s0 = __p0; \ __ret = (uint16_t) __builtin_neon_vqshrns_n_u32(__s0, __p1); \ __ret; \ }) #define vqshrnd_n_u64(__p0, __p1) __extension__ ({ \ uint32_t __ret; \ uint64_t __s0 = __p0; \ __ret = (uint32_t) __builtin_neon_vqshrnd_n_u64(__s0, __p1); \ __ret; \ }) #define vqshrnh_n_u16(__p0, __p1) __extension__ ({ \ uint8_t __ret; \ uint16_t __s0 = __p0; \ __ret = (uint8_t) __builtin_neon_vqshrnh_n_u16(__s0, __p1); \ __ret; \ }) #define vqshrns_n_s32(__p0, __p1) __extension__ ({ \ int16_t __ret; \ int32_t __s0 = __p0; \ __ret = (int16_t) __builtin_neon_vqshrns_n_s32(__s0, __p1); \ __ret; \ }) #define vqshrnd_n_s64(__p0, __p1) __extension__ ({ \ int32_t __ret; \ int64_t __s0 = __p0; \ __ret = (int32_t) __builtin_neon_vqshrnd_n_s64(__s0, __p1); \ __ret; \ }) #define vqshrnh_n_s16(__p0, __p1) __extension__ ({ \ int8_t __ret; \ int16_t __s0 = __p0; \ __ret = (int8_t) __builtin_neon_vqshrnh_n_s16(__s0, __p1); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vqshrun_high_n_s32(__p0_801, __p1_801, __p2_801) __extension__ ({ \ int16x8_t __ret_801; \ int16x4_t __s0_801 = __p0_801; \ int32x4_t __s1_801 = __p1_801; \ __ret_801 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_801), (int16x4_t)(vqshrun_n_s32(__s1_801, __p2_801)))); \ __ret_801; \ }) #else #define vqshrun_high_n_s32(__p0_802, __p1_802, __p2_802) __extension__ ({ \ int16x8_t __ret_802; \ int16x4_t __s0_802 = __p0_802; \ int32x4_t __s1_802 = __p1_802; \ int16x4_t __rev0_802; __rev0_802 = __builtin_shufflevector(__s0_802, __s0_802, 3, 2, 1, 0); \ int32x4_t __rev1_802; __rev1_802 = __builtin_shufflevector(__s1_802, __s1_802, 3, 2, 1, 0); \ __ret_802 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_802), (int16x4_t)(__noswap_vqshrun_n_s32(__rev1_802, __p2_802)))); \ __ret_802 = __builtin_shufflevector(__ret_802, __ret_802, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_802; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqshrun_high_n_s64(__p0_803, __p1_803, __p2_803) __extension__ ({ \ int32x4_t __ret_803; \ int32x2_t __s0_803 = __p0_803; \ int64x2_t __s1_803 = __p1_803; \ __ret_803 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_803), (int32x2_t)(vqshrun_n_s64(__s1_803, __p2_803)))); \ __ret_803; \ }) #else #define vqshrun_high_n_s64(__p0_804, __p1_804, __p2_804) __extension__ ({ \ int32x4_t __ret_804; \ int32x2_t __s0_804 = __p0_804; \ int64x2_t __s1_804 = __p1_804; \ int32x2_t __rev0_804; __rev0_804 = __builtin_shufflevector(__s0_804, __s0_804, 1, 0); \ int64x2_t __rev1_804; __rev1_804 = __builtin_shufflevector(__s1_804, __s1_804, 1, 0); \ __ret_804 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_804), (int32x2_t)(__noswap_vqshrun_n_s64(__rev1_804, __p2_804)))); \ __ret_804 = __builtin_shufflevector(__ret_804, __ret_804, 3, 2, 1, 0); \ __ret_804; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vqshrun_high_n_s16(__p0_805, __p1_805, __p2_805) __extension__ ({ \ int8x16_t __ret_805; \ int8x8_t __s0_805 = __p0_805; \ int16x8_t __s1_805 = __p1_805; \ __ret_805 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_805), (int8x8_t)(vqshrun_n_s16(__s1_805, __p2_805)))); \ __ret_805; \ }) #else #define vqshrun_high_n_s16(__p0_806, __p1_806, __p2_806) __extension__ ({ \ int8x16_t __ret_806; \ int8x8_t __s0_806 = __p0_806; \ int16x8_t __s1_806 = __p1_806; \ int8x8_t __rev0_806; __rev0_806 = __builtin_shufflevector(__s0_806, __s0_806, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev1_806; __rev1_806 = __builtin_shufflevector(__s1_806, __s1_806, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_806 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_806), (int8x8_t)(__noswap_vqshrun_n_s16(__rev1_806, __p2_806)))); \ __ret_806 = __builtin_shufflevector(__ret_806, __ret_806, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_806; \ }) #endif #define vqshruns_n_s32(__p0, __p1) __extension__ ({ \ int16_t __ret; \ int32_t __s0 = __p0; \ __ret = (int16_t) __builtin_neon_vqshruns_n_s32(__s0, __p1); \ __ret; \ }) #define vqshrund_n_s64(__p0, __p1) __extension__ ({ \ int32_t __ret; \ int64_t __s0 = __p0; \ __ret = (int32_t) __builtin_neon_vqshrund_n_s64(__s0, __p1); \ __ret; \ }) #define vqshrunh_n_s16(__p0, __p1) __extension__ ({ \ int8_t __ret; \ int16_t __s0 = __p0; \ __ret = (int8_t) __builtin_neon_vqshrunh_n_s16(__s0, __p1); \ __ret; \ }) __ai uint8_t vqsubb_u8(uint8_t __p0, uint8_t __p1) { uint8_t __ret; __ret = (uint8_t) __builtin_neon_vqsubb_u8(__p0, __p1); return __ret; } __ai uint32_t vqsubs_u32(uint32_t __p0, uint32_t __p1) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vqsubs_u32(__p0, __p1); return __ret; } __ai uint64_t vqsubd_u64(uint64_t __p0, uint64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vqsubd_u64(__p0, __p1); return __ret; } __ai uint16_t vqsubh_u16(uint16_t __p0, uint16_t __p1) { uint16_t __ret; __ret = (uint16_t) __builtin_neon_vqsubh_u16(__p0, __p1); return __ret; } __ai int8_t vqsubb_s8(int8_t __p0, int8_t __p1) { int8_t __ret; __ret = (int8_t) __builtin_neon_vqsubb_s8(__p0, __p1); return __ret; } __ai int32_t vqsubs_s32(int32_t __p0, int32_t __p1) { int32_t __ret; __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1); return __ret; } __ai int64_t vqsubd_s64(int64_t __p0, int64_t __p1) { int64_t __ret; __ret = (int64_t) __builtin_neon_vqsubd_s64(__p0, __p1); return __ret; } __ai int16_t vqsubh_s16(int16_t __p0, int16_t __p1) { int16_t __ret; __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 4); return __ret; } #else __ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) { poly8x8_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 4); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) { poly8x16_t __ret; __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 36); return __ret; } #else __ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else __ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else __ai int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else __ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else __ai int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) { int8x8_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 4); return __ret; } #else __ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) { poly8x8_t __ret; poly8x16x2_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 4); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) { poly8x16_t __ret; __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 36); return __ret; } #else __ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) { poly8x16_t __ret; poly8x16x2_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 36); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 48); return __ret; } #else __ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16x2_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 32); return __ret; } #else __ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) { int8x16_t __ret; int8x16x2_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 16); return __ret; } #else __ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x16x2_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 0); return __ret; } #else __ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) { int8x8_t __ret; int8x16x2_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 4); return __ret; } #else __ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) { poly8x8_t __ret; poly8x16x3_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 4); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) { poly8x16_t __ret; __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 36); return __ret; } #else __ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) { poly8x16_t __ret; poly8x16x3_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 36); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 48); return __ret; } #else __ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16x3_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 32); return __ret; } #else __ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) { int8x16_t __ret; int8x16x3_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 16); return __ret; } #else __ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x16x3_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 0); return __ret; } #else __ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) { int8x8_t __ret; int8x16x3_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 4); return __ret; } #else __ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) { poly8x8_t __ret; poly8x16x4_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 4); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) { poly8x16_t __ret; __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 36); return __ret; } #else __ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) { poly8x16_t __ret; poly8x16x4_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 36); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 48); return __ret; } #else __ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16x4_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 32); return __ret; } #else __ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) { int8x16_t __ret; int8x16x4_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 16); return __ret; } #else __ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x16x4_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 0); return __ret; } #else __ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) { int8x8_t __ret; int8x16x4_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 4); return __ret; } #else __ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 4); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) { poly8x16_t __ret; __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36); return __ret; } #else __ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48); return __ret; } #else __ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32); return __ret; } #else __ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 16); return __ret; } #else __ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 0); return __ret; } #else __ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 4); return __ret; } #else __ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16x2_t __rev1; __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 4); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) { poly8x16_t __ret; __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 36); return __ret; } #else __ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16x2_t __rev1; __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 36); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 48); return __ret; } #else __ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16x2_t __rev1; __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 32); return __ret; } #else __ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16x2_t __rev1; __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 16); return __ret; } #else __ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16x2_t __rev1; __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 0); return __ret; } #else __ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x16x2_t __rev1; __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 4); return __ret; } #else __ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16x3_t __rev1; __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 4); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) { poly8x16_t __ret; __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 36); return __ret; } #else __ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16x3_t __rev1; __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 36); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 48); return __ret; } #else __ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16x3_t __rev1; __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 32); return __ret; } #else __ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16x3_t __rev1; __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 16); return __ret; } #else __ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16x3_t __rev1; __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 0); return __ret; } #else __ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x16x3_t __rev1; __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 4); return __ret; } #else __ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16x4_t __rev1; __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 4); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) { poly8x16_t __ret; __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 36); return __ret; } #else __ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16x4_t __rev1; __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 36); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 48); return __ret; } #else __ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16x4_t __rev1; __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 32); return __ret; } #else __ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16x4_t __rev1; __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 16); return __ret; } #else __ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16x4_t __rev1; __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 0); return __ret; } #else __ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x16x4_t __rev1; __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint16x8_t __ret; __ret = vcombine_u16(__p0, vraddhn_u32(__p1, __p2)); return __ret; } #else __ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint16x8_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __noswap_vcombine_u16(__rev0, __noswap_vraddhn_u32(__rev1, __rev2)); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint32x4_t __ret; __ret = vcombine_u32(__p0, vraddhn_u64(__p1, __p2)); return __ret; } #else __ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint32x4_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = __noswap_vcombine_u32(__rev0, __noswap_vraddhn_u64(__rev1, __rev2)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint8x16_t __ret; __ret = vcombine_u8(__p0, vraddhn_u16(__p1, __p2)); return __ret; } #else __ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint8x16_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vcombine_u8(__rev0, __noswap_vraddhn_u16(__rev1, __rev2)); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int16x8_t __ret; __ret = vcombine_s16(__p0, vraddhn_s32(__p1, __p2)); return __ret; } #else __ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int16x8_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __noswap_vcombine_s16(__rev0, __noswap_vraddhn_s32(__rev1, __rev2)); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { int32x4_t __ret; __ret = vcombine_s32(__p0, vraddhn_s64(__p1, __p2)); return __ret; } #else __ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { int32x4_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = __noswap_vcombine_s32(__rev0, __noswap_vraddhn_s64(__rev1, __rev2)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int8x16_t __ret; __ret = vcombine_s8(__p0, vraddhn_s16(__p1, __p2)); return __ret; } #else __ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int8x16_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vcombine_s8(__rev0, __noswap_vraddhn_s16(__rev1, __rev2)); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vrbit_p8(poly8x8_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 4); return __ret; } #else __ai poly8x8_t vrbit_p8(poly8x8_t __p0) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 4); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x16_t vrbitq_p8(poly8x16_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 36); return __ret; } #else __ai poly8x16_t vrbitq_p8(poly8x16_t __p0) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 36); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vrbitq_u8(uint8x16_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 48); return __ret; } #else __ai uint8x16_t vrbitq_u8(uint8x16_t __p0) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vrbitq_s8(int8x16_t __p0) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 32); return __ret; } #else __ai int8x16_t vrbitq_s8(int8x16_t __p0) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vrbit_u8(uint8x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 16); return __ret; } #else __ai uint8x8_t vrbit_u8(uint8x8_t __p0) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vrbit_s8(int8x8_t __p0) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 0); return __ret; } #else __ai int8x8_t vrbit_s8(int8x8_t __p0) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vrecpeq_f64(float64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 42); return __ret; } #else __ai float64x2_t vrecpeq_f64(float64x2_t __p0) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai float64x1_t vrecpe_f64(float64x1_t __p0) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 10); return __ret; } __ai float64_t vrecped_f64(float64_t __p0) { float64_t __ret; __ret = (float64_t) __builtin_neon_vrecped_f64(__p0); return __ret; } __ai float32_t vrecpes_f32(float32_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vrecpes_f32(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #else __ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai float64x1_t vrecps_f64(float64x1_t __p0, float64x1_t __p1) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 10); return __ret; } __ai float64_t vrecpsd_f64(float64_t __p0, float64_t __p1) { float64_t __ret; __ret = (float64_t) __builtin_neon_vrecpsd_f64(__p0, __p1); return __ret; } __ai float32_t vrecpss_f32(float32_t __p0, float32_t __p1) { float32_t __ret; __ret = (float32_t) __builtin_neon_vrecpss_f32(__p0, __p1); return __ret; } __ai float64_t vrecpxd_f64(float64_t __p0) { float64_t __ret; __ret = (float64_t) __builtin_neon_vrecpxd_f64(__p0); return __ret; } __ai float32_t vrecpxs_f32(float32_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vrecpxs_f32(__p0); return __ret; } __ai uint64_t vrshld_u64(uint64_t __p0, int64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vrshld_u64(__p0, __p1); return __ret; } __ai int64_t vrshld_s64(int64_t __p0, int64_t __p1) { int64_t __ret; __ret = (int64_t) __builtin_neon_vrshld_s64(__p0, __p1); return __ret; } #define vrshrd_n_u64(__p0, __p1) __extension__ ({ \ uint64_t __ret; \ uint64_t __s0 = __p0; \ __ret = (uint64_t) __builtin_neon_vrshrd_n_u64(__s0, __p1); \ __ret; \ }) #define vrshrd_n_s64(__p0, __p1) __extension__ ({ \ int64_t __ret; \ int64_t __s0 = __p0; \ __ret = (int64_t) __builtin_neon_vrshrd_n_s64(__s0, __p1); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vrshrn_high_n_u32(__p0_807, __p1_807, __p2_807) __extension__ ({ \ uint16x8_t __ret_807; \ uint16x4_t __s0_807 = __p0_807; \ uint32x4_t __s1_807 = __p1_807; \ __ret_807 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_807), (uint16x4_t)(vrshrn_n_u32(__s1_807, __p2_807)))); \ __ret_807; \ }) #else #define vrshrn_high_n_u32(__p0_808, __p1_808, __p2_808) __extension__ ({ \ uint16x8_t __ret_808; \ uint16x4_t __s0_808 = __p0_808; \ uint32x4_t __s1_808 = __p1_808; \ uint16x4_t __rev0_808; __rev0_808 = __builtin_shufflevector(__s0_808, __s0_808, 3, 2, 1, 0); \ uint32x4_t __rev1_808; __rev1_808 = __builtin_shufflevector(__s1_808, __s1_808, 3, 2, 1, 0); \ __ret_808 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_808), (uint16x4_t)(__noswap_vrshrn_n_u32(__rev1_808, __p2_808)))); \ __ret_808 = __builtin_shufflevector(__ret_808, __ret_808, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_808; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vrshrn_high_n_u64(__p0_809, __p1_809, __p2_809) __extension__ ({ \ uint32x4_t __ret_809; \ uint32x2_t __s0_809 = __p0_809; \ uint64x2_t __s1_809 = __p1_809; \ __ret_809 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_809), (uint32x2_t)(vrshrn_n_u64(__s1_809, __p2_809)))); \ __ret_809; \ }) #else #define vrshrn_high_n_u64(__p0_810, __p1_810, __p2_810) __extension__ ({ \ uint32x4_t __ret_810; \ uint32x2_t __s0_810 = __p0_810; \ uint64x2_t __s1_810 = __p1_810; \ uint32x2_t __rev0_810; __rev0_810 = __builtin_shufflevector(__s0_810, __s0_810, 1, 0); \ uint64x2_t __rev1_810; __rev1_810 = __builtin_shufflevector(__s1_810, __s1_810, 1, 0); \ __ret_810 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_810), (uint32x2_t)(__noswap_vrshrn_n_u64(__rev1_810, __p2_810)))); \ __ret_810 = __builtin_shufflevector(__ret_810, __ret_810, 3, 2, 1, 0); \ __ret_810; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vrshrn_high_n_u16(__p0_811, __p1_811, __p2_811) __extension__ ({ \ uint8x16_t __ret_811; \ uint8x8_t __s0_811 = __p0_811; \ uint16x8_t __s1_811 = __p1_811; \ __ret_811 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_811), (uint8x8_t)(vrshrn_n_u16(__s1_811, __p2_811)))); \ __ret_811; \ }) #else #define vrshrn_high_n_u16(__p0_812, __p1_812, __p2_812) __extension__ ({ \ uint8x16_t __ret_812; \ uint8x8_t __s0_812 = __p0_812; \ uint16x8_t __s1_812 = __p1_812; \ uint8x8_t __rev0_812; __rev0_812 = __builtin_shufflevector(__s0_812, __s0_812, 7, 6, 5, 4, 3, 2, 1, 0); \ uint16x8_t __rev1_812; __rev1_812 = __builtin_shufflevector(__s1_812, __s1_812, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_812 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_812), (uint8x8_t)(__noswap_vrshrn_n_u16(__rev1_812, __p2_812)))); \ __ret_812 = __builtin_shufflevector(__ret_812, __ret_812, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_812; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vrshrn_high_n_s32(__p0_813, __p1_813, __p2_813) __extension__ ({ \ int16x8_t __ret_813; \ int16x4_t __s0_813 = __p0_813; \ int32x4_t __s1_813 = __p1_813; \ __ret_813 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_813), (int16x4_t)(vrshrn_n_s32(__s1_813, __p2_813)))); \ __ret_813; \ }) #else #define vrshrn_high_n_s32(__p0_814, __p1_814, __p2_814) __extension__ ({ \ int16x8_t __ret_814; \ int16x4_t __s0_814 = __p0_814; \ int32x4_t __s1_814 = __p1_814; \ int16x4_t __rev0_814; __rev0_814 = __builtin_shufflevector(__s0_814, __s0_814, 3, 2, 1, 0); \ int32x4_t __rev1_814; __rev1_814 = __builtin_shufflevector(__s1_814, __s1_814, 3, 2, 1, 0); \ __ret_814 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_814), (int16x4_t)(__noswap_vrshrn_n_s32(__rev1_814, __p2_814)))); \ __ret_814 = __builtin_shufflevector(__ret_814, __ret_814, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_814; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vrshrn_high_n_s64(__p0_815, __p1_815, __p2_815) __extension__ ({ \ int32x4_t __ret_815; \ int32x2_t __s0_815 = __p0_815; \ int64x2_t __s1_815 = __p1_815; \ __ret_815 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_815), (int32x2_t)(vrshrn_n_s64(__s1_815, __p2_815)))); \ __ret_815; \ }) #else #define vrshrn_high_n_s64(__p0_816, __p1_816, __p2_816) __extension__ ({ \ int32x4_t __ret_816; \ int32x2_t __s0_816 = __p0_816; \ int64x2_t __s1_816 = __p1_816; \ int32x2_t __rev0_816; __rev0_816 = __builtin_shufflevector(__s0_816, __s0_816, 1, 0); \ int64x2_t __rev1_816; __rev1_816 = __builtin_shufflevector(__s1_816, __s1_816, 1, 0); \ __ret_816 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_816), (int32x2_t)(__noswap_vrshrn_n_s64(__rev1_816, __p2_816)))); \ __ret_816 = __builtin_shufflevector(__ret_816, __ret_816, 3, 2, 1, 0); \ __ret_816; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vrshrn_high_n_s16(__p0_817, __p1_817, __p2_817) __extension__ ({ \ int8x16_t __ret_817; \ int8x8_t __s0_817 = __p0_817; \ int16x8_t __s1_817 = __p1_817; \ __ret_817 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_817), (int8x8_t)(vrshrn_n_s16(__s1_817, __p2_817)))); \ __ret_817; \ }) #else #define vrshrn_high_n_s16(__p0_818, __p1_818, __p2_818) __extension__ ({ \ int8x16_t __ret_818; \ int8x8_t __s0_818 = __p0_818; \ int16x8_t __s1_818 = __p1_818; \ int8x8_t __rev0_818; __rev0_818 = __builtin_shufflevector(__s0_818, __s0_818, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev1_818; __rev1_818 = __builtin_shufflevector(__s1_818, __s1_818, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_818 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_818), (int8x8_t)(__noswap_vrshrn_n_s16(__rev1_818, __p2_818)))); \ __ret_818 = __builtin_shufflevector(__ret_818, __ret_818, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_818; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vrsqrteq_f64(float64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 42); return __ret; } #else __ai float64x2_t vrsqrteq_f64(float64x2_t __p0) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai float64x1_t vrsqrte_f64(float64x1_t __p0) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 10); return __ret; } __ai float64_t vrsqrted_f64(float64_t __p0) { float64_t __ret; __ret = (float64_t) __builtin_neon_vrsqrted_f64(__p0); return __ret; } __ai float32_t vrsqrtes_f32(float32_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vrsqrtes_f32(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #else __ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai float64x1_t vrsqrts_f64(float64x1_t __p0, float64x1_t __p1) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 10); return __ret; } __ai float64_t vrsqrtsd_f64(float64_t __p0, float64_t __p1) { float64_t __ret; __ret = (float64_t) __builtin_neon_vrsqrtsd_f64(__p0, __p1); return __ret; } __ai float32_t vrsqrtss_f32(float32_t __p0, float32_t __p1) { float32_t __ret; __ret = (float32_t) __builtin_neon_vrsqrtss_f32(__p0, __p1); return __ret; } #define vrsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \ uint64_t __ret; \ uint64_t __s0 = __p0; \ uint64_t __s1 = __p1; \ __ret = (uint64_t) __builtin_neon_vrsrad_n_u64(__s0, __s1, __p2); \ __ret; \ }) #define vrsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \ int64_t __ret; \ int64_t __s0 = __p0; \ int64_t __s1 = __p1; \ __ret = (int64_t) __builtin_neon_vrsrad_n_s64(__s0, __s1, __p2); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint16x8_t __ret; __ret = vcombine_u16(__p0, vrsubhn_u32(__p1, __p2)); return __ret; } #else __ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint16x8_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __noswap_vcombine_u16(__rev0, __noswap_vrsubhn_u32(__rev1, __rev2)); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint32x4_t __ret; __ret = vcombine_u32(__p0, vrsubhn_u64(__p1, __p2)); return __ret; } #else __ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint32x4_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = __noswap_vcombine_u32(__rev0, __noswap_vrsubhn_u64(__rev1, __rev2)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint8x16_t __ret; __ret = vcombine_u8(__p0, vrsubhn_u16(__p1, __p2)); return __ret; } #else __ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint8x16_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vcombine_u8(__rev0, __noswap_vrsubhn_u16(__rev1, __rev2)); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int16x8_t __ret; __ret = vcombine_s16(__p0, vrsubhn_s32(__p1, __p2)); return __ret; } #else __ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int16x8_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __noswap_vcombine_s16(__rev0, __noswap_vrsubhn_s32(__rev1, __rev2)); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { int32x4_t __ret; __ret = vcombine_s32(__p0, vrsubhn_s64(__p1, __p2)); return __ret; } #else __ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { int32x4_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = __noswap_vcombine_s32(__rev0, __noswap_vrsubhn_s64(__rev1, __rev2)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int8x16_t __ret; __ret = vcombine_s8(__p0, vrsubhn_s16(__p1, __p2)); return __ret; } #else __ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int8x16_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vcombine_s8(__rev0, __noswap_vrsubhn_s16(__rev1, __rev2)); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #define vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x1_t __ret; \ poly64_t __s0 = __p0; \ poly64x1_t __s1 = __p1; \ __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (poly64x1_t)__s1, __p2); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x2_t __ret; \ poly64_t __s0 = __p0; \ poly64x2_t __s1 = __p1; \ __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__s1, __p2); \ __ret; \ }) #else #define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x2_t __ret; \ poly64_t __s0 = __p0; \ poly64x2_t __s1 = __p1; \ poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x2_t __ret; \ poly64_t __s0 = __p0; \ poly64x2_t __s1 = __p1; \ __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__s1, __p2); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \ float64x2_t __ret; \ float64_t __s0 = __p0; \ float64x2_t __s1 = __p1; \ __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__s1, __p2); \ __ret; \ }) #else #define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \ float64x2_t __ret; \ float64_t __s0 = __p0; \ float64x2_t __s1 = __p1; \ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #define __noswap_vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \ float64x2_t __ret; \ float64_t __s0 = __p0; \ float64x2_t __s1 = __p1; \ __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__s1, __p2); \ __ret; \ }) #endif #define vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \ float64x1_t __ret; \ float64_t __s0 = __p0; \ float64x1_t __s1 = __p1; \ __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (float64x1_t)__s1, __p2); \ __ret; \ }) __ai uint64_t vshld_u64(uint64_t __p0, int64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vshld_u64(__p0, __p1); return __ret; } __ai int64_t vshld_s64(int64_t __p0, int64_t __p1) { int64_t __ret; __ret = (int64_t) __builtin_neon_vshld_s64(__p0, __p1); return __ret; } #define vshld_n_u64(__p0, __p1) __extension__ ({ \ uint64_t __ret; \ uint64_t __s0 = __p0; \ __ret = (uint64_t) __builtin_neon_vshld_n_u64(__s0, __p1); \ __ret; \ }) #define vshld_n_s64(__p0, __p1) __extension__ ({ \ int64_t __ret; \ int64_t __s0 = __p0; \ __ret = (int64_t) __builtin_neon_vshld_n_s64(__s0, __p1); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vshll_high_n_u8(__p0_819, __p1_819) __extension__ ({ \ uint16x8_t __ret_819; \ uint8x16_t __s0_819 = __p0_819; \ __ret_819 = (uint16x8_t)(vshll_n_u8(vget_high_u8(__s0_819), __p1_819)); \ __ret_819; \ }) #else #define vshll_high_n_u8(__p0_820, __p1_820) __extension__ ({ \ uint16x8_t __ret_820; \ uint8x16_t __s0_820 = __p0_820; \ uint8x16_t __rev0_820; __rev0_820 = __builtin_shufflevector(__s0_820, __s0_820, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_820 = (uint16x8_t)(__noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_820), __p1_820)); \ __ret_820 = __builtin_shufflevector(__ret_820, __ret_820, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_820; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshll_high_n_u32(__p0_821, __p1_821) __extension__ ({ \ uint64x2_t __ret_821; \ uint32x4_t __s0_821 = __p0_821; \ __ret_821 = (uint64x2_t)(vshll_n_u32(vget_high_u32(__s0_821), __p1_821)); \ __ret_821; \ }) #else #define vshll_high_n_u32(__p0_822, __p1_822) __extension__ ({ \ uint64x2_t __ret_822; \ uint32x4_t __s0_822 = __p0_822; \ uint32x4_t __rev0_822; __rev0_822 = __builtin_shufflevector(__s0_822, __s0_822, 3, 2, 1, 0); \ __ret_822 = (uint64x2_t)(__noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_822), __p1_822)); \ __ret_822 = __builtin_shufflevector(__ret_822, __ret_822, 1, 0); \ __ret_822; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshll_high_n_u16(__p0_823, __p1_823) __extension__ ({ \ uint32x4_t __ret_823; \ uint16x8_t __s0_823 = __p0_823; \ __ret_823 = (uint32x4_t)(vshll_n_u16(vget_high_u16(__s0_823), __p1_823)); \ __ret_823; \ }) #else #define vshll_high_n_u16(__p0_824, __p1_824) __extension__ ({ \ uint32x4_t __ret_824; \ uint16x8_t __s0_824 = __p0_824; \ uint16x8_t __rev0_824; __rev0_824 = __builtin_shufflevector(__s0_824, __s0_824, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_824 = (uint32x4_t)(__noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_824), __p1_824)); \ __ret_824 = __builtin_shufflevector(__ret_824, __ret_824, 3, 2, 1, 0); \ __ret_824; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshll_high_n_s8(__p0_825, __p1_825) __extension__ ({ \ int16x8_t __ret_825; \ int8x16_t __s0_825 = __p0_825; \ __ret_825 = (int16x8_t)(vshll_n_s8(vget_high_s8(__s0_825), __p1_825)); \ __ret_825; \ }) #else #define vshll_high_n_s8(__p0_826, __p1_826) __extension__ ({ \ int16x8_t __ret_826; \ int8x16_t __s0_826 = __p0_826; \ int8x16_t __rev0_826; __rev0_826 = __builtin_shufflevector(__s0_826, __s0_826, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_826 = (int16x8_t)(__noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_826), __p1_826)); \ __ret_826 = __builtin_shufflevector(__ret_826, __ret_826, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_826; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshll_high_n_s32(__p0_827, __p1_827) __extension__ ({ \ int64x2_t __ret_827; \ int32x4_t __s0_827 = __p0_827; \ __ret_827 = (int64x2_t)(vshll_n_s32(vget_high_s32(__s0_827), __p1_827)); \ __ret_827; \ }) #else #define vshll_high_n_s32(__p0_828, __p1_828) __extension__ ({ \ int64x2_t __ret_828; \ int32x4_t __s0_828 = __p0_828; \ int32x4_t __rev0_828; __rev0_828 = __builtin_shufflevector(__s0_828, __s0_828, 3, 2, 1, 0); \ __ret_828 = (int64x2_t)(__noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_828), __p1_828)); \ __ret_828 = __builtin_shufflevector(__ret_828, __ret_828, 1, 0); \ __ret_828; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshll_high_n_s16(__p0_829, __p1_829) __extension__ ({ \ int32x4_t __ret_829; \ int16x8_t __s0_829 = __p0_829; \ __ret_829 = (int32x4_t)(vshll_n_s16(vget_high_s16(__s0_829), __p1_829)); \ __ret_829; \ }) #else #define vshll_high_n_s16(__p0_830, __p1_830) __extension__ ({ \ int32x4_t __ret_830; \ int16x8_t __s0_830 = __p0_830; \ int16x8_t __rev0_830; __rev0_830 = __builtin_shufflevector(__s0_830, __s0_830, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_830 = (int32x4_t)(__noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_830), __p1_830)); \ __ret_830 = __builtin_shufflevector(__ret_830, __ret_830, 3, 2, 1, 0); \ __ret_830; \ }) #endif #define vshrd_n_u64(__p0, __p1) __extension__ ({ \ uint64_t __ret; \ uint64_t __s0 = __p0; \ __ret = (uint64_t) __builtin_neon_vshrd_n_u64(__s0, __p1); \ __ret; \ }) #define vshrd_n_s64(__p0, __p1) __extension__ ({ \ int64_t __ret; \ int64_t __s0 = __p0; \ __ret = (int64_t) __builtin_neon_vshrd_n_s64(__s0, __p1); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vshrn_high_n_u32(__p0_831, __p1_831, __p2_831) __extension__ ({ \ uint16x8_t __ret_831; \ uint16x4_t __s0_831 = __p0_831; \ uint32x4_t __s1_831 = __p1_831; \ __ret_831 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_831), (uint16x4_t)(vshrn_n_u32(__s1_831, __p2_831)))); \ __ret_831; \ }) #else #define vshrn_high_n_u32(__p0_832, __p1_832, __p2_832) __extension__ ({ \ uint16x8_t __ret_832; \ uint16x4_t __s0_832 = __p0_832; \ uint32x4_t __s1_832 = __p1_832; \ uint16x4_t __rev0_832; __rev0_832 = __builtin_shufflevector(__s0_832, __s0_832, 3, 2, 1, 0); \ uint32x4_t __rev1_832; __rev1_832 = __builtin_shufflevector(__s1_832, __s1_832, 3, 2, 1, 0); \ __ret_832 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_832), (uint16x4_t)(__noswap_vshrn_n_u32(__rev1_832, __p2_832)))); \ __ret_832 = __builtin_shufflevector(__ret_832, __ret_832, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_832; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshrn_high_n_u64(__p0_833, __p1_833, __p2_833) __extension__ ({ \ uint32x4_t __ret_833; \ uint32x2_t __s0_833 = __p0_833; \ uint64x2_t __s1_833 = __p1_833; \ __ret_833 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_833), (uint32x2_t)(vshrn_n_u64(__s1_833, __p2_833)))); \ __ret_833; \ }) #else #define vshrn_high_n_u64(__p0_834, __p1_834, __p2_834) __extension__ ({ \ uint32x4_t __ret_834; \ uint32x2_t __s0_834 = __p0_834; \ uint64x2_t __s1_834 = __p1_834; \ uint32x2_t __rev0_834; __rev0_834 = __builtin_shufflevector(__s0_834, __s0_834, 1, 0); \ uint64x2_t __rev1_834; __rev1_834 = __builtin_shufflevector(__s1_834, __s1_834, 1, 0); \ __ret_834 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_834), (uint32x2_t)(__noswap_vshrn_n_u64(__rev1_834, __p2_834)))); \ __ret_834 = __builtin_shufflevector(__ret_834, __ret_834, 3, 2, 1, 0); \ __ret_834; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshrn_high_n_u16(__p0_835, __p1_835, __p2_835) __extension__ ({ \ uint8x16_t __ret_835; \ uint8x8_t __s0_835 = __p0_835; \ uint16x8_t __s1_835 = __p1_835; \ __ret_835 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_835), (uint8x8_t)(vshrn_n_u16(__s1_835, __p2_835)))); \ __ret_835; \ }) #else #define vshrn_high_n_u16(__p0_836, __p1_836, __p2_836) __extension__ ({ \ uint8x16_t __ret_836; \ uint8x8_t __s0_836 = __p0_836; \ uint16x8_t __s1_836 = __p1_836; \ uint8x8_t __rev0_836; __rev0_836 = __builtin_shufflevector(__s0_836, __s0_836, 7, 6, 5, 4, 3, 2, 1, 0); \ uint16x8_t __rev1_836; __rev1_836 = __builtin_shufflevector(__s1_836, __s1_836, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_836 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_836), (uint8x8_t)(__noswap_vshrn_n_u16(__rev1_836, __p2_836)))); \ __ret_836 = __builtin_shufflevector(__ret_836, __ret_836, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_836; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshrn_high_n_s32(__p0_837, __p1_837, __p2_837) __extension__ ({ \ int16x8_t __ret_837; \ int16x4_t __s0_837 = __p0_837; \ int32x4_t __s1_837 = __p1_837; \ __ret_837 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_837), (int16x4_t)(vshrn_n_s32(__s1_837, __p2_837)))); \ __ret_837; \ }) #else #define vshrn_high_n_s32(__p0_838, __p1_838, __p2_838) __extension__ ({ \ int16x8_t __ret_838; \ int16x4_t __s0_838 = __p0_838; \ int32x4_t __s1_838 = __p1_838; \ int16x4_t __rev0_838; __rev0_838 = __builtin_shufflevector(__s0_838, __s0_838, 3, 2, 1, 0); \ int32x4_t __rev1_838; __rev1_838 = __builtin_shufflevector(__s1_838, __s1_838, 3, 2, 1, 0); \ __ret_838 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_838), (int16x4_t)(__noswap_vshrn_n_s32(__rev1_838, __p2_838)))); \ __ret_838 = __builtin_shufflevector(__ret_838, __ret_838, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_838; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshrn_high_n_s64(__p0_839, __p1_839, __p2_839) __extension__ ({ \ int32x4_t __ret_839; \ int32x2_t __s0_839 = __p0_839; \ int64x2_t __s1_839 = __p1_839; \ __ret_839 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_839), (int32x2_t)(vshrn_n_s64(__s1_839, __p2_839)))); \ __ret_839; \ }) #else #define vshrn_high_n_s64(__p0_840, __p1_840, __p2_840) __extension__ ({ \ int32x4_t __ret_840; \ int32x2_t __s0_840 = __p0_840; \ int64x2_t __s1_840 = __p1_840; \ int32x2_t __rev0_840; __rev0_840 = __builtin_shufflevector(__s0_840, __s0_840, 1, 0); \ int64x2_t __rev1_840; __rev1_840 = __builtin_shufflevector(__s1_840, __s1_840, 1, 0); \ __ret_840 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_840), (int32x2_t)(__noswap_vshrn_n_s64(__rev1_840, __p2_840)))); \ __ret_840 = __builtin_shufflevector(__ret_840, __ret_840, 3, 2, 1, 0); \ __ret_840; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vshrn_high_n_s16(__p0_841, __p1_841, __p2_841) __extension__ ({ \ int8x16_t __ret_841; \ int8x8_t __s0_841 = __p0_841; \ int16x8_t __s1_841 = __p1_841; \ __ret_841 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_841), (int8x8_t)(vshrn_n_s16(__s1_841, __p2_841)))); \ __ret_841; \ }) #else #define vshrn_high_n_s16(__p0_842, __p1_842, __p2_842) __extension__ ({ \ int8x16_t __ret_842; \ int8x8_t __s0_842 = __p0_842; \ int16x8_t __s1_842 = __p1_842; \ int8x8_t __rev0_842; __rev0_842 = __builtin_shufflevector(__s0_842, __s0_842, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev1_842; __rev1_842 = __builtin_shufflevector(__s1_842, __s1_842, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_842 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_842), (int8x8_t)(__noswap_vshrn_n_s16(__rev1_842, __p2_842)))); \ __ret_842 = __builtin_shufflevector(__ret_842, __ret_842, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_842; \ }) #endif #define vslid_n_u64(__p0, __p1, __p2) __extension__ ({ \ uint64_t __ret; \ uint64_t __s0 = __p0; \ uint64_t __s1 = __p1; \ __ret = (uint64_t) __builtin_neon_vslid_n_u64(__s0, __s1, __p2); \ __ret; \ }) #define vslid_n_s64(__p0, __p1, __p2) __extension__ ({ \ int64_t __ret; \ int64_t __s0 = __p0; \ int64_t __s1 = __p1; \ __ret = (int64_t) __builtin_neon_vslid_n_s64(__s0, __s1, __p2); \ __ret; \ }) #define vsli_n_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x1_t __ret; \ poly64x1_t __s0 = __p0; \ poly64x1_t __s1 = __p1; \ __ret = (poly64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x2_t __ret; \ poly64x2_t __s0 = __p0; \ poly64x2_t __s1 = __p1; \ __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \ __ret; \ }) #else #define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x2_t __ret; \ poly64x2_t __s0 = __p0; \ poly64x2_t __s1 = __p1; \ poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif __ai uint8_t vsqaddb_u8(uint8_t __p0, int8_t __p1) { uint8_t __ret; __ret = (uint8_t) __builtin_neon_vsqaddb_u8(__p0, __p1); return __ret; } __ai uint32_t vsqadds_u32(uint32_t __p0, int32_t __p1) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vsqadds_u32(__p0, __p1); return __ret; } __ai uint64_t vsqaddd_u64(uint64_t __p0, int64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vsqaddd_u64(__p0, __p1); return __ret; } __ai uint16_t vsqaddh_u16(uint16_t __p0, int16_t __p1) { uint16_t __ret; __ret = (uint16_t) __builtin_neon_vsqaddh_u16(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else __ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else __ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else __ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else __ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vsqadd_u8(uint8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else __ai uint8x8_t vsqadd_u8(uint8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vsqadd_u32(uint32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else __ai uint32x2_t vsqadd_u32(uint32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vsqadd_u64(uint64x1_t __p0, int64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vsqadd_u16(uint16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else __ai uint16x4_t vsqadd_u16(uint16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vsqrtq_f64(float64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 42); return __ret; } #else __ai float64x2_t vsqrtq_f64(float64x2_t __p0) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vsqrtq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 41); return __ret; } #else __ai float32x4_t vsqrtq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif __ai float64x1_t vsqrt_f64(float64x1_t __p0) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 10); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vsqrt_f32(float32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 9); return __ret; } #else __ai float32x2_t vsqrt_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #define vsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \ uint64_t __ret; \ uint64_t __s0 = __p0; \ uint64_t __s1 = __p1; \ __ret = (uint64_t) __builtin_neon_vsrad_n_u64(__s0, __s1, __p2); \ __ret; \ }) #define vsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \ int64_t __ret; \ int64_t __s0 = __p0; \ int64_t __s1 = __p1; \ __ret = (int64_t) __builtin_neon_vsrad_n_s64(__s0, __s1, __p2); \ __ret; \ }) #define vsrid_n_u64(__p0, __p1, __p2) __extension__ ({ \ uint64_t __ret; \ uint64_t __s0 = __p0; \ uint64_t __s1 = __p1; \ __ret = (uint64_t) __builtin_neon_vsrid_n_u64(__s0, __s1, __p2); \ __ret; \ }) #define vsrid_n_s64(__p0, __p1, __p2) __extension__ ({ \ int64_t __ret; \ int64_t __s0 = __p0; \ int64_t __s1 = __p1; \ __ret = (int64_t) __builtin_neon_vsrid_n_s64(__s0, __s1, __p2); \ __ret; \ }) #define vsri_n_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x1_t __ret; \ poly64x1_t __s0 = __p0; \ poly64x1_t __s1 = __p1; \ __ret = (poly64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ #define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x2_t __ret; \ poly64x2_t __s0 = __p0; \ poly64x2_t __s1 = __p1; \ __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \ __ret; \ }) #else #define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x2_t __ret; \ poly64x2_t __s0 = __p0; \ poly64x2_t __s1 = __p1; \ poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #define vst1_p64(__p0, __p1) __extension__ ({ \ poly64x1_t __s1 = __p1; \ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 6); \ }) #ifdef __LITTLE_ENDIAN__ #define vst1q_p64(__p0, __p1) __extension__ ({ \ poly64x2_t __s1 = __p1; \ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 38); \ }) #else #define vst1q_p64(__p0, __p1) __extension__ ({ \ poly64x2_t __s1 = __p1; \ poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 38); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_f64(__p0, __p1) __extension__ ({ \ float64x2_t __s1 = __p1; \ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 42); \ }) #else #define vst1q_f64(__p0, __p1) __extension__ ({ \ float64x2_t __s1 = __p1; \ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 42); \ }) #endif #define vst1_f64(__p0, __p1) __extension__ ({ \ float64x1_t __s1 = __p1; \ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 10); \ }) #define vst1_lane_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x1_t __s1 = __p1; \ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \ }) #ifdef __LITTLE_ENDIAN__ #define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x2_t __s1 = __p1; \ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \ }) #else #define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x2_t __s1 = __p1; \ poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ float64x2_t __s1 = __p1; \ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \ }) #else #define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ float64x2_t __s1 = __p1; \ float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \ }) #endif #define vst1_lane_f64(__p0, __p1, __p2) __extension__ ({ \ float64x1_t __s1 = __p1; \ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \ }) #define vst1_p64_x2(__p0, __p1) __extension__ ({ \ poly64x1x2_t __s1 = __p1; \ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \ }) #ifdef __LITTLE_ENDIAN__ #define vst1q_p64_x2(__p0, __p1) __extension__ ({ \ poly64x2x2_t __s1 = __p1; \ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \ }) #else #define vst1q_p64_x2(__p0, __p1) __extension__ ({ \ poly64x2x2_t __s1 = __p1; \ poly64x2x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_f64_x2(__p0, __p1) __extension__ ({ \ float64x2x2_t __s1 = __p1; \ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 42); \ }) #else #define vst1q_f64_x2(__p0, __p1) __extension__ ({ \ float64x2x2_t __s1 = __p1; \ float64x2x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 42); \ }) #endif #define vst1_f64_x2(__p0, __p1) __extension__ ({ \ float64x1x2_t __s1 = __p1; \ __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 10); \ }) #define vst1_p64_x3(__p0, __p1) __extension__ ({ \ poly64x1x3_t __s1 = __p1; \ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \ }) #ifdef __LITTLE_ENDIAN__ #define vst1q_p64_x3(__p0, __p1) __extension__ ({ \ poly64x2x3_t __s1 = __p1; \ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \ }) #else #define vst1q_p64_x3(__p0, __p1) __extension__ ({ \ poly64x2x3_t __s1 = __p1; \ poly64x2x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_f64_x3(__p0, __p1) __extension__ ({ \ float64x2x3_t __s1 = __p1; \ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 42); \ }) #else #define vst1q_f64_x3(__p0, __p1) __extension__ ({ \ float64x2x3_t __s1 = __p1; \ float64x2x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 42); \ }) #endif #define vst1_f64_x3(__p0, __p1) __extension__ ({ \ float64x1x3_t __s1 = __p1; \ __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 10); \ }) #define vst1_p64_x4(__p0, __p1) __extension__ ({ \ poly64x1x4_t __s1 = __p1; \ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \ }) #ifdef __LITTLE_ENDIAN__ #define vst1q_p64_x4(__p0, __p1) __extension__ ({ \ poly64x2x4_t __s1 = __p1; \ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \ }) #else #define vst1q_p64_x4(__p0, __p1) __extension__ ({ \ poly64x2x4_t __s1 = __p1; \ poly64x2x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst1q_f64_x4(__p0, __p1) __extension__ ({ \ float64x2x4_t __s1 = __p1; \ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 42); \ }) #else #define vst1q_f64_x4(__p0, __p1) __extension__ ({ \ float64x2x4_t __s1 = __p1; \ float64x2x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 42); \ }) #endif #define vst1_f64_x4(__p0, __p1) __extension__ ({ \ float64x1x4_t __s1 = __p1; \ __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 10); \ }) #define vst2_p64(__p0, __p1) __extension__ ({ \ poly64x1x2_t __s1 = __p1; \ __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \ }) #ifdef __LITTLE_ENDIAN__ #define vst2q_p64(__p0, __p1) __extension__ ({ \ poly64x2x2_t __s1 = __p1; \ __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \ }) #else #define vst2q_p64(__p0, __p1) __extension__ ({ \ poly64x2x2_t __s1 = __p1; \ poly64x2x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2q_u64(__p0, __p1) __extension__ ({ \ uint64x2x2_t __s1 = __p1; \ __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \ }) #else #define vst2q_u64(__p0, __p1) __extension__ ({ \ uint64x2x2_t __s1 = __p1; \ uint64x2x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2q_f64(__p0, __p1) __extension__ ({ \ float64x2x2_t __s1 = __p1; \ __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 42); \ }) #else #define vst2q_f64(__p0, __p1) __extension__ ({ \ float64x2x2_t __s1 = __p1; \ float64x2x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 42); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2q_s64(__p0, __p1) __extension__ ({ \ int64x2x2_t __s1 = __p1; \ __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 35); \ }) #else #define vst2q_s64(__p0, __p1) __extension__ ({ \ int64x2x2_t __s1 = __p1; \ int64x2x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 35); \ }) #endif #define vst2_f64(__p0, __p1) __extension__ ({ \ float64x1x2_t __s1 = __p1; \ __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 10); \ }) #define vst2_lane_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x1x2_t __s1 = __p1; \ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \ }) #ifdef __LITTLE_ENDIAN__ #define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x16x2_t __s1 = __p1; \ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \ }) #else #define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x16x2_t __s1 = __p1; \ poly8x16x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x2x2_t __s1 = __p1; \ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \ }) #else #define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x2x2_t __s1 = __p1; \ poly64x2x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x16x2_t __s1 = __p1; \ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \ }) #else #define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x16x2_t __s1 = __p1; \ uint8x16x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2x2_t __s1 = __p1; \ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \ }) #else #define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2x2_t __s1 = __p1; \ uint64x2x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x16x2_t __s1 = __p1; \ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \ }) #else #define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x16x2_t __s1 = __p1; \ int8x16x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ float64x2x2_t __s1 = __p1; \ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 42); \ }) #else #define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ float64x2x2_t __s1 = __p1; \ float64x2x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 42); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ int64x2x2_t __s1 = __p1; \ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 35); \ }) #else #define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ int64x2x2_t __s1 = __p1; \ int64x2x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 35); \ }) #endif #define vst2_lane_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x1x2_t __s1 = __p1; \ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \ }) #define vst2_lane_f64(__p0, __p1, __p2) __extension__ ({ \ float64x1x2_t __s1 = __p1; \ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 10); \ }) #define vst2_lane_s64(__p0, __p1, __p2) __extension__ ({ \ int64x1x2_t __s1 = __p1; \ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 3); \ }) #define vst3_p64(__p0, __p1) __extension__ ({ \ poly64x1x3_t __s1 = __p1; \ __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \ }) #ifdef __LITTLE_ENDIAN__ #define vst3q_p64(__p0, __p1) __extension__ ({ \ poly64x2x3_t __s1 = __p1; \ __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \ }) #else #define vst3q_p64(__p0, __p1) __extension__ ({ \ poly64x2x3_t __s1 = __p1; \ poly64x2x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3q_u64(__p0, __p1) __extension__ ({ \ uint64x2x3_t __s1 = __p1; \ __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \ }) #else #define vst3q_u64(__p0, __p1) __extension__ ({ \ uint64x2x3_t __s1 = __p1; \ uint64x2x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3q_f64(__p0, __p1) __extension__ ({ \ float64x2x3_t __s1 = __p1; \ __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 42); \ }) #else #define vst3q_f64(__p0, __p1) __extension__ ({ \ float64x2x3_t __s1 = __p1; \ float64x2x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 42); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3q_s64(__p0, __p1) __extension__ ({ \ int64x2x3_t __s1 = __p1; \ __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 35); \ }) #else #define vst3q_s64(__p0, __p1) __extension__ ({ \ int64x2x3_t __s1 = __p1; \ int64x2x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 35); \ }) #endif #define vst3_f64(__p0, __p1) __extension__ ({ \ float64x1x3_t __s1 = __p1; \ __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 10); \ }) #define vst3_lane_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x1x3_t __s1 = __p1; \ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \ }) #ifdef __LITTLE_ENDIAN__ #define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x16x3_t __s1 = __p1; \ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \ }) #else #define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x16x3_t __s1 = __p1; \ poly8x16x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x2x3_t __s1 = __p1; \ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \ }) #else #define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x2x3_t __s1 = __p1; \ poly64x2x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x16x3_t __s1 = __p1; \ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \ }) #else #define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x16x3_t __s1 = __p1; \ uint8x16x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2x3_t __s1 = __p1; \ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \ }) #else #define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2x3_t __s1 = __p1; \ uint64x2x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x16x3_t __s1 = __p1; \ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \ }) #else #define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x16x3_t __s1 = __p1; \ int8x16x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ float64x2x3_t __s1 = __p1; \ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 42); \ }) #else #define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ float64x2x3_t __s1 = __p1; \ float64x2x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 42); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ int64x2x3_t __s1 = __p1; \ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 35); \ }) #else #define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ int64x2x3_t __s1 = __p1; \ int64x2x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 35); \ }) #endif #define vst3_lane_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x1x3_t __s1 = __p1; \ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \ }) #define vst3_lane_f64(__p0, __p1, __p2) __extension__ ({ \ float64x1x3_t __s1 = __p1; \ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 10); \ }) #define vst3_lane_s64(__p0, __p1, __p2) __extension__ ({ \ int64x1x3_t __s1 = __p1; \ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 3); \ }) #define vst4_p64(__p0, __p1) __extension__ ({ \ poly64x1x4_t __s1 = __p1; \ __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \ }) #ifdef __LITTLE_ENDIAN__ #define vst4q_p64(__p0, __p1) __extension__ ({ \ poly64x2x4_t __s1 = __p1; \ __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \ }) #else #define vst4q_p64(__p0, __p1) __extension__ ({ \ poly64x2x4_t __s1 = __p1; \ poly64x2x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4q_u64(__p0, __p1) __extension__ ({ \ uint64x2x4_t __s1 = __p1; \ __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \ }) #else #define vst4q_u64(__p0, __p1) __extension__ ({ \ uint64x2x4_t __s1 = __p1; \ uint64x2x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4q_f64(__p0, __p1) __extension__ ({ \ float64x2x4_t __s1 = __p1; \ __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 42); \ }) #else #define vst4q_f64(__p0, __p1) __extension__ ({ \ float64x2x4_t __s1 = __p1; \ float64x2x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 42); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4q_s64(__p0, __p1) __extension__ ({ \ int64x2x4_t __s1 = __p1; \ __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 35); \ }) #else #define vst4q_s64(__p0, __p1) __extension__ ({ \ int64x2x4_t __s1 = __p1; \ int64x2x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 35); \ }) #endif #define vst4_f64(__p0, __p1) __extension__ ({ \ float64x1x4_t __s1 = __p1; \ __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 10); \ }) #define vst4_lane_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x1x4_t __s1 = __p1; \ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \ }) #ifdef __LITTLE_ENDIAN__ #define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x16x4_t __s1 = __p1; \ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \ }) #else #define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x16x4_t __s1 = __p1; \ poly8x16x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x2x4_t __s1 = __p1; \ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \ }) #else #define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x2x4_t __s1 = __p1; \ poly64x2x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x16x4_t __s1 = __p1; \ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \ }) #else #define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x16x4_t __s1 = __p1; \ uint8x16x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2x4_t __s1 = __p1; \ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \ }) #else #define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2x4_t __s1 = __p1; \ uint64x2x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x16x4_t __s1 = __p1; \ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \ }) #else #define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x16x4_t __s1 = __p1; \ int8x16x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ float64x2x4_t __s1 = __p1; \ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 42); \ }) #else #define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ float64x2x4_t __s1 = __p1; \ float64x2x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 42); \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ int64x2x4_t __s1 = __p1; \ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 35); \ }) #else #define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ int64x2x4_t __s1 = __p1; \ int64x2x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 35); \ }) #endif #define vst4_lane_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x1x4_t __s1 = __p1; \ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \ }) #define vst4_lane_f64(__p0, __p1, __p2) __extension__ ({ \ float64x1x4_t __s1 = __p1; \ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 10); \ }) #define vst4_lane_s64(__p0, __p1, __p2) __extension__ ({ \ int64x1x4_t __s1 = __p1; \ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 3); \ }) #define vstrq_p128(__p0, __p1) __extension__ ({ \ poly128_t __s1 = __p1; \ __builtin_neon_vstrq_p128(__p0, __s1); \ }) __ai uint64_t vsubd_u64(uint64_t __p0, uint64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vsubd_u64(__p0, __p1); return __ret; } __ai int64_t vsubd_s64(int64_t __p0, int64_t __p1) { int64_t __ret; __ret = (int64_t) __builtin_neon_vsubd_s64(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = __p0 - __p1; return __ret; } #else __ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 - __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai float64x1_t vsub_f64(float64x1_t __p0, float64x1_t __p1) { float64x1_t __ret; __ret = __p0 - __p1; return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint16x8_t __ret; __ret = vcombine_u16(__p0, vsubhn_u32(__p1, __p2)); return __ret; } #else __ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint16x8_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __noswap_vcombine_u16(__rev0, __noswap_vsubhn_u32(__rev1, __rev2)); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint32x4_t __ret; __ret = vcombine_u32(__p0, vsubhn_u64(__p1, __p2)); return __ret; } #else __ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint32x4_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = __noswap_vcombine_u32(__rev0, __noswap_vsubhn_u64(__rev1, __rev2)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint8x16_t __ret; __ret = vcombine_u8(__p0, vsubhn_u16(__p1, __p2)); return __ret; } #else __ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint8x16_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vcombine_u8(__rev0, __noswap_vsubhn_u16(__rev1, __rev2)); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int16x8_t __ret; __ret = vcombine_s16(__p0, vsubhn_s32(__p1, __p2)); return __ret; } #else __ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int16x8_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __noswap_vcombine_s16(__rev0, __noswap_vsubhn_s32(__rev1, __rev2)); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { int32x4_t __ret; __ret = vcombine_s32(__p0, vsubhn_s64(__p1, __p2)); return __ret; } #else __ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { int32x4_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = __noswap_vcombine_s32(__rev0, __noswap_vsubhn_s64(__rev1, __rev2)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int8x16_t __ret; __ret = vcombine_s8(__p0, vsubhn_s16(__p1, __p2)); return __ret; } #else __ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int8x16_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vcombine_s8(__rev0, __noswap_vsubhn_s16(__rev1, __rev2)); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { uint16x8_t __ret; __ret = vmovl_high_u8(__p0) - vmovl_high_u8(__p1); return __ret; } #else __ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { uint16x8_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vmovl_high_u8(__rev0) - __noswap_vmovl_high_u8(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { uint64x2_t __ret; __ret = vmovl_high_u32(__p0) - vmovl_high_u32(__p1); return __ret; } #else __ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { uint64x2_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __noswap_vmovl_high_u32(__rev0) - __noswap_vmovl_high_u32(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { uint32x4_t __ret; __ret = vmovl_high_u16(__p0) - vmovl_high_u16(__p1); return __ret; } #else __ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { uint32x4_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vmovl_high_u16(__rev0) - __noswap_vmovl_high_u16(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) { int16x8_t __ret; __ret = vmovl_high_s8(__p0) - vmovl_high_s8(__p1); return __ret; } #else __ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) { int16x8_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vmovl_high_s8(__rev0) - __noswap_vmovl_high_s8(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) { int64x2_t __ret; __ret = vmovl_high_s32(__p0) - vmovl_high_s32(__p1); return __ret; } #else __ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) { int64x2_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __noswap_vmovl_high_s32(__rev0) - __noswap_vmovl_high_s32(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) { int32x4_t __ret; __ret = vmovl_high_s16(__p0) - vmovl_high_s16(__p1); return __ret; } #else __ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) { int32x4_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vmovl_high_s16(__rev0) - __noswap_vmovl_high_s16(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { uint16x8_t __ret; __ret = __p0 - vmovl_high_u8(__p1); return __ret; } #else __ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 - __noswap_vmovl_high_u8(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { uint64x2_t __ret; __ret = __p0 - vmovl_high_u32(__p1); return __ret; } #else __ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 - __noswap_vmovl_high_u32(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { uint32x4_t __ret; __ret = __p0 - vmovl_high_u16(__p1); return __ret; } #else __ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 - __noswap_vmovl_high_u16(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) { int16x8_t __ret; __ret = __p0 - vmovl_high_s8(__p1); return __ret; } #else __ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 - __noswap_vmovl_high_s8(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) { int64x2_t __ret; __ret = __p0 - vmovl_high_s32(__p1); return __ret; } #else __ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 - __noswap_vmovl_high_s32(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) { int32x4_t __ret; __ret = __p0 - vmovl_high_s16(__p1); return __ret; } #else __ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 - __noswap_vmovl_high_s16(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vsudotq_laneq_s32(__p0_843, __p1_843, __p2_843, __p3_843) __extension__ ({ \ int32x4_t __ret_843; \ int32x4_t __s0_843 = __p0_843; \ int8x16_t __s1_843 = __p1_843; \ uint8x16_t __s2_843 = __p2_843; \ uint8x16_t __reint_843 = __s2_843; \ __ret_843 = vusdotq_s32(__s0_843, (uint8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_843, __p3_843)), __s1_843); \ __ret_843; \ }) #else #define vsudotq_laneq_s32(__p0_844, __p1_844, __p2_844, __p3_844) __extension__ ({ \ int32x4_t __ret_844; \ int32x4_t __s0_844 = __p0_844; \ int8x16_t __s1_844 = __p1_844; \ uint8x16_t __s2_844 = __p2_844; \ int32x4_t __rev0_844; __rev0_844 = __builtin_shufflevector(__s0_844, __s0_844, 3, 2, 1, 0); \ int8x16_t __rev1_844; __rev1_844 = __builtin_shufflevector(__s1_844, __s1_844, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ uint8x16_t __rev2_844; __rev2_844 = __builtin_shufflevector(__s2_844, __s2_844, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ uint8x16_t __reint_844 = __rev2_844; \ __ret_844 = __noswap_vusdotq_s32(__rev0_844, (uint8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_844, __p3_844)), __rev1_844); \ __ret_844 = __builtin_shufflevector(__ret_844, __ret_844, 3, 2, 1, 0); \ __ret_844; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsudot_laneq_s32(__p0_845, __p1_845, __p2_845, __p3_845) __extension__ ({ \ int32x2_t __ret_845; \ int32x2_t __s0_845 = __p0_845; \ int8x8_t __s1_845 = __p1_845; \ uint8x16_t __s2_845 = __p2_845; \ uint8x16_t __reint_845 = __s2_845; \ __ret_845 = vusdot_s32(__s0_845, (uint8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_845, __p3_845)), __s1_845); \ __ret_845; \ }) #else #define vsudot_laneq_s32(__p0_846, __p1_846, __p2_846, __p3_846) __extension__ ({ \ int32x2_t __ret_846; \ int32x2_t __s0_846 = __p0_846; \ int8x8_t __s1_846 = __p1_846; \ uint8x16_t __s2_846 = __p2_846; \ int32x2_t __rev0_846; __rev0_846 = __builtin_shufflevector(__s0_846, __s0_846, 1, 0); \ int8x8_t __rev1_846; __rev1_846 = __builtin_shufflevector(__s1_846, __s1_846, 7, 6, 5, 4, 3, 2, 1, 0); \ uint8x16_t __rev2_846; __rev2_846 = __builtin_shufflevector(__s2_846, __s2_846, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ uint8x16_t __reint_846 = __rev2_846; \ __ret_846 = __noswap_vusdot_s32(__rev0_846, (uint8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_846, __p3_846)), __rev1_846); \ __ret_846 = __builtin_shufflevector(__ret_846, __ret_846, 1, 0); \ __ret_846; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); return __ret; } #else __ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); return __ret; } #else __ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4_t __ret; poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); return __ret; } #else __ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) { poly64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else __ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) { poly64x2_t __ret; poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); return __ret; } #else __ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8_t __ret; poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); return __ret; } #else __ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); return __ret; } #else __ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else __ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); return __ret; } #else __ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); return __ret; } #else __ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else __ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); return __ret; } #else __ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); return __ret; } #else __ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else __ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); return __ret; } #else __ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); return __ret; } #else __ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else __ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); return __ret; } #else __ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); return __ret; } #else __ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else __ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else __ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); return __ret; } #else __ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); return __ret; } #else __ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); return __ret; } #else __ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4_t __ret; poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); return __ret; } #else __ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) { poly64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else __ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) { poly64x2_t __ret; poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); return __ret; } #else __ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8_t __ret; poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); return __ret; } #else __ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); return __ret; } #else __ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else __ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); return __ret; } #else __ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); return __ret; } #else __ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else __ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); return __ret; } #else __ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); return __ret; } #else __ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else __ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); return __ret; } #else __ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); return __ret; } #else __ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else __ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); return __ret; } #else __ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); return __ret; } #else __ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else __ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else __ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); return __ret; } #else __ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif __ai uint64x1_t vtst_p64(poly64x1_t __p0, poly64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else __ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) { uint64x2_t __ret; poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else __ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else __ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai uint64x1_t vtst_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19); return __ret; } __ai uint64x1_t vtst_s64(int64x1_t __p0, int64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19); return __ret; } __ai uint64_t vtstd_u64(uint64_t __p0, uint64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vtstd_u64(__p0, __p1); return __ret; } __ai uint64_t vtstd_s64(int64_t __p0, int64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vtstd_s64(__p0, __p1); return __ret; } __ai int8_t vuqaddb_s8(int8_t __p0, uint8_t __p1) { int8_t __ret; __ret = (int8_t) __builtin_neon_vuqaddb_s8(__p0, __p1); return __ret; } __ai int32_t vuqadds_s32(int32_t __p0, uint32_t __p1) { int32_t __ret; __ret = (int32_t) __builtin_neon_vuqadds_s32(__p0, __p1); return __ret; } __ai int64_t vuqaddd_s64(int64_t __p0, uint64_t __p1) { int64_t __ret; __ret = (int64_t) __builtin_neon_vuqaddd_s64(__p0, __p1); return __ret; } __ai int16_t vuqaddh_s16(int16_t __p0, uint16_t __p1) { int16_t __ret; __ret = (int16_t) __builtin_neon_vuqaddh_s16(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vuqaddq_s8(int8x16_t __p0, uint8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else __ai int8x16_t vuqaddq_s8(int8x16_t __p0, uint8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vuqaddq_s32(int32x4_t __p0, uint32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else __ai int32x4_t vuqaddq_s32(int32x4_t __p0, uint32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vuqaddq_s64(int64x2_t __p0, uint64x2_t __p1) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); return __ret; } #else __ai int64x2_t vuqaddq_s64(int64x2_t __p0, uint64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vuqaddq_s16(int16x8_t __p0, uint16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else __ai int16x8_t vuqaddq_s16(int16x8_t __p0, uint16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vuqadd_s8(int8x8_t __p0, uint8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else __ai int8x8_t vuqadd_s8(int8x8_t __p0, uint8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vuqadd_s32(int32x2_t __p0, uint32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else __ai int32x2_t vuqadd_s32(int32x2_t __p0, uint32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif __ai int64x1_t vuqadd_s64(int64x1_t __p0, uint64x1_t __p1) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3); return __ret; } #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vuqadd_s16(int16x4_t __p0, uint16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else __ai int16x4_t vuqadd_s16(int16x4_t __p0, uint16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vusdotq_laneq_s32(__p0_847, __p1_847, __p2_847, __p3_847) __extension__ ({ \ int32x4_t __ret_847; \ int32x4_t __s0_847 = __p0_847; \ uint8x16_t __s1_847 = __p1_847; \ int8x16_t __s2_847 = __p2_847; \ int8x16_t __reint_847 = __s2_847; \ __ret_847 = vusdotq_s32(__s0_847, __s1_847, (int8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_847, __p3_847))); \ __ret_847; \ }) #else #define vusdotq_laneq_s32(__p0_848, __p1_848, __p2_848, __p3_848) __extension__ ({ \ int32x4_t __ret_848; \ int32x4_t __s0_848 = __p0_848; \ uint8x16_t __s1_848 = __p1_848; \ int8x16_t __s2_848 = __p2_848; \ int32x4_t __rev0_848; __rev0_848 = __builtin_shufflevector(__s0_848, __s0_848, 3, 2, 1, 0); \ uint8x16_t __rev1_848; __rev1_848 = __builtin_shufflevector(__s1_848, __s1_848, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x16_t __rev2_848; __rev2_848 = __builtin_shufflevector(__s2_848, __s2_848, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x16_t __reint_848 = __rev2_848; \ __ret_848 = __noswap_vusdotq_s32(__rev0_848, __rev1_848, (int8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_848, __p3_848))); \ __ret_848 = __builtin_shufflevector(__ret_848, __ret_848, 3, 2, 1, 0); \ __ret_848; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vusdot_laneq_s32(__p0_849, __p1_849, __p2_849, __p3_849) __extension__ ({ \ int32x2_t __ret_849; \ int32x2_t __s0_849 = __p0_849; \ uint8x8_t __s1_849 = __p1_849; \ int8x16_t __s2_849 = __p2_849; \ int8x16_t __reint_849 = __s2_849; \ __ret_849 = vusdot_s32(__s0_849, __s1_849, (int8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_849, __p3_849))); \ __ret_849; \ }) #else #define vusdot_laneq_s32(__p0_850, __p1_850, __p2_850, __p3_850) __extension__ ({ \ int32x2_t __ret_850; \ int32x2_t __s0_850 = __p0_850; \ uint8x8_t __s1_850 = __p1_850; \ int8x16_t __s2_850 = __p2_850; \ int32x2_t __rev0_850; __rev0_850 = __builtin_shufflevector(__s0_850, __s0_850, 1, 0); \ uint8x8_t __rev1_850; __rev1_850 = __builtin_shufflevector(__s1_850, __s1_850, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x16_t __rev2_850; __rev2_850 = __builtin_shufflevector(__s2_850, __s2_850, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x16_t __reint_850 = __rev2_850; \ __ret_850 = __noswap_vusdot_s32(__rev0_850, __rev1_850, (int8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_850, __p3_850))); \ __ret_850 = __builtin_shufflevector(__ret_850, __ret_850, 1, 0); \ __ret_850; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); return __ret; } #else __ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); return __ret; } #else __ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4_t __ret; poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); return __ret; } #else __ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) { poly64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else __ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) { poly64x2_t __ret; poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); return __ret; } #else __ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8_t __ret; poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); return __ret; } #else __ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); return __ret; } #else __ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else __ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); return __ret; } #else __ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); return __ret; } #else __ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else __ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); return __ret; } #else __ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); return __ret; } #else __ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else __ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); return __ret; } #else __ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); return __ret; } #else __ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else __ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); return __ret; } #else __ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); return __ret; } #else __ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else __ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else __ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); return __ret; } #else __ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); return __ret; } #else __ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); return __ret; } #else __ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4_t __ret; poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); return __ret; } #else __ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) { poly64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else __ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) { poly64x2_t __ret; poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); return __ret; } #else __ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8_t __ret; poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); return __ret; } #else __ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); return __ret; } #else __ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else __ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); return __ret; } #else __ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); return __ret; } #else __ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else __ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); return __ret; } #else __ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); return __ret; } #else __ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else __ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); return __ret; } #else __ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); return __ret; } #else __ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else __ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); return __ret; } #else __ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); return __ret; } #else __ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else __ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else __ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); return __ret; } #else __ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); return __ret; } #else __ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); return __ret; } #else __ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4_t __ret; poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); return __ret; } #else __ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) { poly64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else __ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) { poly64x2_t __ret; poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); return __ret; } #else __ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8_t __ret; poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); return __ret; } #else __ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); return __ret; } #else __ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else __ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); return __ret; } #else __ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); return __ret; } #else __ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else __ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); return __ret; } #else __ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); return __ret; } #else __ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else __ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); return __ret; } #else __ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); return __ret; } #else __ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else __ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); return __ret; } #else __ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); return __ret; } #else __ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else __ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else __ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); return __ret; } #else __ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); return __ret; } #else __ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); return __ret; } #else __ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4_t __ret; poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); return __ret; } #else __ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) { poly64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else __ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) { poly64x2_t __ret; poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); return __ret; } #else __ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8_t __ret; poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); return __ret; } #else __ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); return __ret; } #else __ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else __ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); return __ret; } #else __ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); return __ret; } #else __ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else __ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); return __ret; } #else __ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); return __ret; } #else __ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else __ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); return __ret; } #else __ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); return __ret; } #else __ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else __ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); return __ret; } #else __ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); return __ret; } #else __ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else __ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else __ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); return __ret; } #else __ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; __ret = __p0 + vabdq_u8(__p1, __p2); return __ret; } #else __ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 + __noswap_vabdq_u8(__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = __p0 + vabdq_u32(__p1, __p2); return __ret; } #else __ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __rev0 + __noswap_vabdq_u32(__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; __ret = __p0 + vabdq_u16(__p1, __p2); return __ret; } #else __ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 + __noswap_vabdq_u16(__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; __ret = __p0 + vabdq_s8(__p1, __p2); return __ret; } #else __ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 + __noswap_vabdq_s8(__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; __ret = __p0 + vabdq_s32(__p1, __p2); return __ret; } #else __ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __rev0 + __noswap_vabdq_s32(__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; __ret = __p0 + vabdq_s16(__p1, __p2); return __ret; } #else __ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 + __noswap_vabdq_s16(__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint8x8_t __ret; __ret = __p0 + vabd_u8(__p1, __p2); return __ret; } #else __ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 + __noswap_vabd_u8(__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint32x2_t __ret; __ret = __p0 + vabd_u32(__p1, __p2); return __ret; } #else __ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = __rev0 + __noswap_vabd_u32(__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint16x4_t __ret; __ret = __p0 + vabd_u16(__p1, __p2); return __ret; } #else __ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __rev0 + __noswap_vabd_u16(__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int8x8_t __ret; __ret = __p0 + vabd_s8(__p1, __p2); return __ret; } #else __ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 + __noswap_vabd_s8(__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; __ret = __p0 + vabd_s32(__p1, __p2); return __ret; } #else __ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = __rev0 + __noswap_vabd_s32(__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; __ret = __p0 + vabd_s16(__p1, __p2); return __ret; } #else __ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __rev0 + __noswap_vabd_s16(__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t)(vmovl_u8((uint8x8_t)(vabd_u8(__p0, __p1)))); return __ret; } #else __ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__rev0, __rev1)))); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai uint16x8_t __noswap_vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__p0, __p1)))); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(vmovl_u32((uint32x2_t)(vabd_u32(__p0, __p1)))); return __ret; } #else __ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__rev0, __rev1)))); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai uint64x2_t __noswap_vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__p0, __p1)))); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(vmovl_u16((uint16x4_t)(vabd_u16(__p0, __p1)))); return __ret; } #else __ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__rev0, __rev1)))); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai uint32x4_t __noswap_vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__p0, __p1)))); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t)(vmovl_u8((uint8x8_t)(vabd_s8(__p0, __p1)))); return __ret; } #else __ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) { int16x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__rev0, __rev1)))); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai int16x8_t __noswap_vabdl_s8(int8x8_t __p0, int8x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__p0, __p1)))); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) { int64x2_t __ret; __ret = (int64x2_t)(vmovl_u32((uint32x2_t)(vabd_s32(__p0, __p1)))); return __ret; } #else __ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) { int64x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__rev0, __rev1)))); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai int64x2_t __noswap_vabdl_s32(int32x2_t __p0, int32x2_t __p1) { int64x2_t __ret; __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__p0, __p1)))); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t)(vmovl_u16((uint16x4_t)(vabd_s16(__p0, __p1)))); return __ret; } #else __ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) { int32x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__rev0, __rev1)))); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int32x4_t __noswap_vabdl_s16(int16x4_t __p0, int16x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__p0, __p1)))); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; __ret = vmovl_u8(__p0) + vmovl_u8(__p1); return __ret; } #else __ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vmovl_u8(__rev0) + __noswap_vmovl_u8(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; __ret = vmovl_u32(__p0) + vmovl_u32(__p1); return __ret; } #else __ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __noswap_vmovl_u32(__rev0) + __noswap_vmovl_u32(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; __ret = vmovl_u16(__p0) + vmovl_u16(__p1); return __ret; } #else __ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __noswap_vmovl_u16(__rev0) + __noswap_vmovl_u16(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) { int16x8_t __ret; __ret = vmovl_s8(__p0) + vmovl_s8(__p1); return __ret; } #else __ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) { int16x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vmovl_s8(__rev0) + __noswap_vmovl_s8(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) { int64x2_t __ret; __ret = vmovl_s32(__p0) + vmovl_s32(__p1); return __ret; } #else __ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) { int64x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __noswap_vmovl_s32(__rev0) + __noswap_vmovl_s32(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) { int32x4_t __ret; __ret = vmovl_s16(__p0) + vmovl_s16(__p1); return __ret; } #else __ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) { int32x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __noswap_vmovl_s16(__rev0) + __noswap_vmovl_s16(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; __ret = __p0 + vmovl_u8(__p1); return __ret; } #else __ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 + __noswap_vmovl_u8(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; __ret = __p0 + vmovl_u32(__p1); return __ret; } #else __ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 + __noswap_vmovl_u32(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; __ret = __p0 + vmovl_u16(__p1); return __ret; } #else __ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 + __noswap_vmovl_u16(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) { int16x8_t __ret; __ret = __p0 + vmovl_s8(__p1); return __ret; } #else __ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 + __noswap_vmovl_s8(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) { int64x2_t __ret; __ret = __p0 + vmovl_s32(__p1); return __ret; } #else __ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 + __noswap_vmovl_s32(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) { int32x4_t __ret; __ret = __p0 + vmovl_s16(__p1); return __ret; } #else __ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 + __noswap_vmovl_s16(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vget_lane_f16(__p0_851, __p1_851) __extension__ ({ \ float16_t __ret_851; \ float16x4_t __s0_851 = __p0_851; \ float16x4_t __reint_851 = __s0_851; \ int16_t __reint1_851 = vget_lane_s16(*(int16x4_t *) &__reint_851, __p1_851); \ __ret_851 = *(float16_t *) &__reint1_851; \ __ret_851; \ }) #else #define vget_lane_f16(__p0_852, __p1_852) __extension__ ({ \ float16_t __ret_852; \ float16x4_t __s0_852 = __p0_852; \ float16x4_t __rev0_852; __rev0_852 = __builtin_shufflevector(__s0_852, __s0_852, 3, 2, 1, 0); \ float16x4_t __reint_852 = __rev0_852; \ int16_t __reint1_852 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_852, __p1_852); \ __ret_852 = *(float16_t *) &__reint1_852; \ __ret_852; \ }) #define __noswap_vget_lane_f16(__p0_853, __p1_853) __extension__ ({ \ float16_t __ret_853; \ float16x4_t __s0_853 = __p0_853; \ float16x4_t __reint_853 = __s0_853; \ int16_t __reint1_853 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_853, __p1_853); \ __ret_853 = *(float16_t *) &__reint1_853; \ __ret_853; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vgetq_lane_f16(__p0_854, __p1_854) __extension__ ({ \ float16_t __ret_854; \ float16x8_t __s0_854 = __p0_854; \ float16x8_t __reint_854 = __s0_854; \ int16_t __reint1_854 = vgetq_lane_s16(*(int16x8_t *) &__reint_854, __p1_854); \ __ret_854 = *(float16_t *) &__reint1_854; \ __ret_854; \ }) #else #define vgetq_lane_f16(__p0_855, __p1_855) __extension__ ({ \ float16_t __ret_855; \ float16x8_t __s0_855 = __p0_855; \ float16x8_t __rev0_855; __rev0_855 = __builtin_shufflevector(__s0_855, __s0_855, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __reint_855 = __rev0_855; \ int16_t __reint1_855 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_855, __p1_855); \ __ret_855 = *(float16_t *) &__reint1_855; \ __ret_855; \ }) #define __noswap_vgetq_lane_f16(__p0_856, __p1_856) __extension__ ({ \ float16_t __ret_856; \ float16x8_t __s0_856 = __p0_856; \ float16x8_t __reint_856 = __s0_856; \ int16_t __reint1_856 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_856, __p1_856); \ __ret_856 = *(float16_t *) &__reint1_856; \ __ret_856; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint16x8_t __ret; __ret = __p0 + vmull_u8(__p1, __p2); return __ret; } #else __ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 + __noswap_vmull_u8(__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai uint16x8_t __noswap_vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint16x8_t __ret; __ret = __p0 + __noswap_vmull_u8(__p1, __p2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint64x2_t __ret; __ret = __p0 + vmull_u32(__p1, __p2); return __ret; } #else __ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = __rev0 + __noswap_vmull_u32(__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai uint64x2_t __noswap_vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint64x2_t __ret; __ret = __p0 + __noswap_vmull_u32(__p1, __p2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint32x4_t __ret; __ret = __p0 + vmull_u16(__p1, __p2); return __ret; } #else __ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __rev0 + __noswap_vmull_u16(__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai uint32x4_t __noswap_vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint32x4_t __ret; __ret = __p0 + __noswap_vmull_u16(__p1, __p2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int16x8_t __ret; __ret = __p0 + vmull_s8(__p1, __p2); return __ret; } #else __ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 + __noswap_vmull_s8(__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai int16x8_t __noswap_vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int16x8_t __ret; __ret = __p0 + __noswap_vmull_s8(__p1, __p2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; __ret = __p0 + vmull_s32(__p1, __p2); return __ret; } #else __ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = __rev0 + __noswap_vmull_s32(__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai int64x2_t __noswap_vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; __ret = __p0 + __noswap_vmull_s32(__p1, __p2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; __ret = __p0 + vmull_s16(__p1, __p2); return __ret; } #else __ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __rev0 + __noswap_vmull_s16(__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int32x4_t __noswap_vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; __ret = __p0 + __noswap_vmull_s16(__p1, __p2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vmlal_lane_u32(__p0_857, __p1_857, __p2_857, __p3_857) __extension__ ({ \ uint64x2_t __ret_857; \ uint64x2_t __s0_857 = __p0_857; \ uint32x2_t __s1_857 = __p1_857; \ uint32x2_t __s2_857 = __p2_857; \ __ret_857 = __s0_857 + vmull_u32(__s1_857, splat_lane_u32(__s2_857, __p3_857)); \ __ret_857; \ }) #else #define vmlal_lane_u32(__p0_858, __p1_858, __p2_858, __p3_858) __extension__ ({ \ uint64x2_t __ret_858; \ uint64x2_t __s0_858 = __p0_858; \ uint32x2_t __s1_858 = __p1_858; \ uint32x2_t __s2_858 = __p2_858; \ uint64x2_t __rev0_858; __rev0_858 = __builtin_shufflevector(__s0_858, __s0_858, 1, 0); \ uint32x2_t __rev1_858; __rev1_858 = __builtin_shufflevector(__s1_858, __s1_858, 1, 0); \ uint32x2_t __rev2_858; __rev2_858 = __builtin_shufflevector(__s2_858, __s2_858, 1, 0); \ __ret_858 = __rev0_858 + __noswap_vmull_u32(__rev1_858, __noswap_splat_lane_u32(__rev2_858, __p3_858)); \ __ret_858 = __builtin_shufflevector(__ret_858, __ret_858, 1, 0); \ __ret_858; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlal_lane_u16(__p0_859, __p1_859, __p2_859, __p3_859) __extension__ ({ \ uint32x4_t __ret_859; \ uint32x4_t __s0_859 = __p0_859; \ uint16x4_t __s1_859 = __p1_859; \ uint16x4_t __s2_859 = __p2_859; \ __ret_859 = __s0_859 + vmull_u16(__s1_859, splat_lane_u16(__s2_859, __p3_859)); \ __ret_859; \ }) #else #define vmlal_lane_u16(__p0_860, __p1_860, __p2_860, __p3_860) __extension__ ({ \ uint32x4_t __ret_860; \ uint32x4_t __s0_860 = __p0_860; \ uint16x4_t __s1_860 = __p1_860; \ uint16x4_t __s2_860 = __p2_860; \ uint32x4_t __rev0_860; __rev0_860 = __builtin_shufflevector(__s0_860, __s0_860, 3, 2, 1, 0); \ uint16x4_t __rev1_860; __rev1_860 = __builtin_shufflevector(__s1_860, __s1_860, 3, 2, 1, 0); \ uint16x4_t __rev2_860; __rev2_860 = __builtin_shufflevector(__s2_860, __s2_860, 3, 2, 1, 0); \ __ret_860 = __rev0_860 + __noswap_vmull_u16(__rev1_860, __noswap_splat_lane_u16(__rev2_860, __p3_860)); \ __ret_860 = __builtin_shufflevector(__ret_860, __ret_860, 3, 2, 1, 0); \ __ret_860; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlal_lane_s32(__p0_861, __p1_861, __p2_861, __p3_861) __extension__ ({ \ int64x2_t __ret_861; \ int64x2_t __s0_861 = __p0_861; \ int32x2_t __s1_861 = __p1_861; \ int32x2_t __s2_861 = __p2_861; \ __ret_861 = __s0_861 + vmull_s32(__s1_861, splat_lane_s32(__s2_861, __p3_861)); \ __ret_861; \ }) #else #define vmlal_lane_s32(__p0_862, __p1_862, __p2_862, __p3_862) __extension__ ({ \ int64x2_t __ret_862; \ int64x2_t __s0_862 = __p0_862; \ int32x2_t __s1_862 = __p1_862; \ int32x2_t __s2_862 = __p2_862; \ int64x2_t __rev0_862; __rev0_862 = __builtin_shufflevector(__s0_862, __s0_862, 1, 0); \ int32x2_t __rev1_862; __rev1_862 = __builtin_shufflevector(__s1_862, __s1_862, 1, 0); \ int32x2_t __rev2_862; __rev2_862 = __builtin_shufflevector(__s2_862, __s2_862, 1, 0); \ __ret_862 = __rev0_862 + __noswap_vmull_s32(__rev1_862, __noswap_splat_lane_s32(__rev2_862, __p3_862)); \ __ret_862 = __builtin_shufflevector(__ret_862, __ret_862, 1, 0); \ __ret_862; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlal_lane_s16(__p0_863, __p1_863, __p2_863, __p3_863) __extension__ ({ \ int32x4_t __ret_863; \ int32x4_t __s0_863 = __p0_863; \ int16x4_t __s1_863 = __p1_863; \ int16x4_t __s2_863 = __p2_863; \ __ret_863 = __s0_863 + vmull_s16(__s1_863, splat_lane_s16(__s2_863, __p3_863)); \ __ret_863; \ }) #else #define vmlal_lane_s16(__p0_864, __p1_864, __p2_864, __p3_864) __extension__ ({ \ int32x4_t __ret_864; \ int32x4_t __s0_864 = __p0_864; \ int16x4_t __s1_864 = __p1_864; \ int16x4_t __s2_864 = __p2_864; \ int32x4_t __rev0_864; __rev0_864 = __builtin_shufflevector(__s0_864, __s0_864, 3, 2, 1, 0); \ int16x4_t __rev1_864; __rev1_864 = __builtin_shufflevector(__s1_864, __s1_864, 3, 2, 1, 0); \ int16x4_t __rev2_864; __rev2_864 = __builtin_shufflevector(__s2_864, __s2_864, 3, 2, 1, 0); \ __ret_864 = __rev0_864 + __noswap_vmull_s16(__rev1_864, __noswap_splat_lane_s16(__rev2_864, __p3_864)); \ __ret_864 = __builtin_shufflevector(__ret_864, __ret_864, 3, 2, 1, 0); \ __ret_864; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { uint64x2_t __ret; __ret = __p0 + vmull_u32(__p1, (uint32x2_t) {__p2, __p2}); return __ret; } #else __ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 + __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2}); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai uint64x2_t __noswap_vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { uint64x2_t __ret; __ret = __p0 + __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2}); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { uint32x4_t __ret; __ret = __p0 + vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2}); return __ret; } #else __ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 + __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2}); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai uint32x4_t __noswap_vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { uint32x4_t __ret; __ret = __p0 + __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2}); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { int64x2_t __ret; __ret = __p0 + vmull_s32(__p1, (int32x2_t) {__p2, __p2}); return __ret; } #else __ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 + __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2}); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai int64x2_t __noswap_vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { int64x2_t __ret; __ret = __p0 + __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2}); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { int32x4_t __ret; __ret = __p0 + vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2}); return __ret; } #else __ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 + __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2}); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int32x4_t __noswap_vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { int32x4_t __ret; __ret = __p0 + __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2}); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint16x8_t __ret; __ret = __p0 - vmull_u8(__p1, __p2); return __ret; } #else __ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 - __noswap_vmull_u8(__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai uint16x8_t __noswap_vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint16x8_t __ret; __ret = __p0 - __noswap_vmull_u8(__p1, __p2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint64x2_t __ret; __ret = __p0 - vmull_u32(__p1, __p2); return __ret; } #else __ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = __rev0 - __noswap_vmull_u32(__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai uint64x2_t __noswap_vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint64x2_t __ret; __ret = __p0 - __noswap_vmull_u32(__p1, __p2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint32x4_t __ret; __ret = __p0 - vmull_u16(__p1, __p2); return __ret; } #else __ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __rev0 - __noswap_vmull_u16(__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai uint32x4_t __noswap_vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint32x4_t __ret; __ret = __p0 - __noswap_vmull_u16(__p1, __p2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int16x8_t __ret; __ret = __p0 - vmull_s8(__p1, __p2); return __ret; } #else __ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 - __noswap_vmull_s8(__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai int16x8_t __noswap_vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int16x8_t __ret; __ret = __p0 - __noswap_vmull_s8(__p1, __p2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; __ret = __p0 - vmull_s32(__p1, __p2); return __ret; } #else __ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = __rev0 - __noswap_vmull_s32(__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai int64x2_t __noswap_vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; __ret = __p0 - __noswap_vmull_s32(__p1, __p2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; __ret = __p0 - vmull_s16(__p1, __p2); return __ret; } #else __ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __rev0 - __noswap_vmull_s16(__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int32x4_t __noswap_vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; __ret = __p0 - __noswap_vmull_s16(__p1, __p2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vmlsl_lane_u32(__p0_865, __p1_865, __p2_865, __p3_865) __extension__ ({ \ uint64x2_t __ret_865; \ uint64x2_t __s0_865 = __p0_865; \ uint32x2_t __s1_865 = __p1_865; \ uint32x2_t __s2_865 = __p2_865; \ __ret_865 = __s0_865 - vmull_u32(__s1_865, splat_lane_u32(__s2_865, __p3_865)); \ __ret_865; \ }) #else #define vmlsl_lane_u32(__p0_866, __p1_866, __p2_866, __p3_866) __extension__ ({ \ uint64x2_t __ret_866; \ uint64x2_t __s0_866 = __p0_866; \ uint32x2_t __s1_866 = __p1_866; \ uint32x2_t __s2_866 = __p2_866; \ uint64x2_t __rev0_866; __rev0_866 = __builtin_shufflevector(__s0_866, __s0_866, 1, 0); \ uint32x2_t __rev1_866; __rev1_866 = __builtin_shufflevector(__s1_866, __s1_866, 1, 0); \ uint32x2_t __rev2_866; __rev2_866 = __builtin_shufflevector(__s2_866, __s2_866, 1, 0); \ __ret_866 = __rev0_866 - __noswap_vmull_u32(__rev1_866, __noswap_splat_lane_u32(__rev2_866, __p3_866)); \ __ret_866 = __builtin_shufflevector(__ret_866, __ret_866, 1, 0); \ __ret_866; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlsl_lane_u16(__p0_867, __p1_867, __p2_867, __p3_867) __extension__ ({ \ uint32x4_t __ret_867; \ uint32x4_t __s0_867 = __p0_867; \ uint16x4_t __s1_867 = __p1_867; \ uint16x4_t __s2_867 = __p2_867; \ __ret_867 = __s0_867 - vmull_u16(__s1_867, splat_lane_u16(__s2_867, __p3_867)); \ __ret_867; \ }) #else #define vmlsl_lane_u16(__p0_868, __p1_868, __p2_868, __p3_868) __extension__ ({ \ uint32x4_t __ret_868; \ uint32x4_t __s0_868 = __p0_868; \ uint16x4_t __s1_868 = __p1_868; \ uint16x4_t __s2_868 = __p2_868; \ uint32x4_t __rev0_868; __rev0_868 = __builtin_shufflevector(__s0_868, __s0_868, 3, 2, 1, 0); \ uint16x4_t __rev1_868; __rev1_868 = __builtin_shufflevector(__s1_868, __s1_868, 3, 2, 1, 0); \ uint16x4_t __rev2_868; __rev2_868 = __builtin_shufflevector(__s2_868, __s2_868, 3, 2, 1, 0); \ __ret_868 = __rev0_868 - __noswap_vmull_u16(__rev1_868, __noswap_splat_lane_u16(__rev2_868, __p3_868)); \ __ret_868 = __builtin_shufflevector(__ret_868, __ret_868, 3, 2, 1, 0); \ __ret_868; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlsl_lane_s32(__p0_869, __p1_869, __p2_869, __p3_869) __extension__ ({ \ int64x2_t __ret_869; \ int64x2_t __s0_869 = __p0_869; \ int32x2_t __s1_869 = __p1_869; \ int32x2_t __s2_869 = __p2_869; \ __ret_869 = __s0_869 - vmull_s32(__s1_869, splat_lane_s32(__s2_869, __p3_869)); \ __ret_869; \ }) #else #define vmlsl_lane_s32(__p0_870, __p1_870, __p2_870, __p3_870) __extension__ ({ \ int64x2_t __ret_870; \ int64x2_t __s0_870 = __p0_870; \ int32x2_t __s1_870 = __p1_870; \ int32x2_t __s2_870 = __p2_870; \ int64x2_t __rev0_870; __rev0_870 = __builtin_shufflevector(__s0_870, __s0_870, 1, 0); \ int32x2_t __rev1_870; __rev1_870 = __builtin_shufflevector(__s1_870, __s1_870, 1, 0); \ int32x2_t __rev2_870; __rev2_870 = __builtin_shufflevector(__s2_870, __s2_870, 1, 0); \ __ret_870 = __rev0_870 - __noswap_vmull_s32(__rev1_870, __noswap_splat_lane_s32(__rev2_870, __p3_870)); \ __ret_870 = __builtin_shufflevector(__ret_870, __ret_870, 1, 0); \ __ret_870; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmlsl_lane_s16(__p0_871, __p1_871, __p2_871, __p3_871) __extension__ ({ \ int32x4_t __ret_871; \ int32x4_t __s0_871 = __p0_871; \ int16x4_t __s1_871 = __p1_871; \ int16x4_t __s2_871 = __p2_871; \ __ret_871 = __s0_871 - vmull_s16(__s1_871, splat_lane_s16(__s2_871, __p3_871)); \ __ret_871; \ }) #else #define vmlsl_lane_s16(__p0_872, __p1_872, __p2_872, __p3_872) __extension__ ({ \ int32x4_t __ret_872; \ int32x4_t __s0_872 = __p0_872; \ int16x4_t __s1_872 = __p1_872; \ int16x4_t __s2_872 = __p2_872; \ int32x4_t __rev0_872; __rev0_872 = __builtin_shufflevector(__s0_872, __s0_872, 3, 2, 1, 0); \ int16x4_t __rev1_872; __rev1_872 = __builtin_shufflevector(__s1_872, __s1_872, 3, 2, 1, 0); \ int16x4_t __rev2_872; __rev2_872 = __builtin_shufflevector(__s2_872, __s2_872, 3, 2, 1, 0); \ __ret_872 = __rev0_872 - __noswap_vmull_s16(__rev1_872, __noswap_splat_lane_s16(__rev2_872, __p3_872)); \ __ret_872 = __builtin_shufflevector(__ret_872, __ret_872, 3, 2, 1, 0); \ __ret_872; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { uint64x2_t __ret; __ret = __p0 - vmull_u32(__p1, (uint32x2_t) {__p2, __p2}); return __ret; } #else __ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 - __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2}); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai uint64x2_t __noswap_vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { uint64x2_t __ret; __ret = __p0 - __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2}); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { uint32x4_t __ret; __ret = __p0 - vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2}); return __ret; } #else __ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 - __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2}); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai uint32x4_t __noswap_vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { uint32x4_t __ret; __ret = __p0 - __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2}); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { int64x2_t __ret; __ret = __p0 - vmull_s32(__p1, (int32x2_t) {__p2, __p2}); return __ret; } #else __ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = __rev0 - __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2}); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai int64x2_t __noswap_vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { int64x2_t __ret; __ret = __p0 - __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2}); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { int32x4_t __ret; __ret = __p0 - vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2}); return __ret; } #else __ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 - __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2}); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int32x4_t __noswap_vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { int32x4_t __ret; __ret = __p0 - __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2}); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vset_lane_f16(__p0_873, __p1_873, __p2_873) __extension__ ({ \ float16x4_t __ret_873; \ float16_t __s0_873 = __p0_873; \ float16x4_t __s1_873 = __p1_873; \ float16_t __reint_873 = __s0_873; \ float16x4_t __reint1_873 = __s1_873; \ int16x4_t __reint2_873 = vset_lane_s16(*(int16_t *) &__reint_873, *(int16x4_t *) &__reint1_873, __p2_873); \ __ret_873 = *(float16x4_t *) &__reint2_873; \ __ret_873; \ }) #else #define vset_lane_f16(__p0_874, __p1_874, __p2_874) __extension__ ({ \ float16x4_t __ret_874; \ float16_t __s0_874 = __p0_874; \ float16x4_t __s1_874 = __p1_874; \ float16x4_t __rev1_874; __rev1_874 = __builtin_shufflevector(__s1_874, __s1_874, 3, 2, 1, 0); \ float16_t __reint_874 = __s0_874; \ float16x4_t __reint1_874 = __rev1_874; \ int16x4_t __reint2_874 = __noswap_vset_lane_s16(*(int16_t *) &__reint_874, *(int16x4_t *) &__reint1_874, __p2_874); \ __ret_874 = *(float16x4_t *) &__reint2_874; \ __ret_874 = __builtin_shufflevector(__ret_874, __ret_874, 3, 2, 1, 0); \ __ret_874; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsetq_lane_f16(__p0_875, __p1_875, __p2_875) __extension__ ({ \ float16x8_t __ret_875; \ float16_t __s0_875 = __p0_875; \ float16x8_t __s1_875 = __p1_875; \ float16_t __reint_875 = __s0_875; \ float16x8_t __reint1_875 = __s1_875; \ int16x8_t __reint2_875 = vsetq_lane_s16(*(int16_t *) &__reint_875, *(int16x8_t *) &__reint1_875, __p2_875); \ __ret_875 = *(float16x8_t *) &__reint2_875; \ __ret_875; \ }) #else #define vsetq_lane_f16(__p0_876, __p1_876, __p2_876) __extension__ ({ \ float16x8_t __ret_876; \ float16_t __s0_876 = __p0_876; \ float16x8_t __s1_876 = __p1_876; \ float16x8_t __rev1_876; __rev1_876 = __builtin_shufflevector(__s1_876, __s1_876, 7, 6, 5, 4, 3, 2, 1, 0); \ float16_t __reint_876 = __s0_876; \ float16x8_t __reint1_876 = __rev1_876; \ int16x8_t __reint2_876 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_876, *(int16x8_t *) &__reint1_876, __p2_876); \ __ret_876 = *(float16x8_t *) &__reint2_876; \ __ret_876 = __builtin_shufflevector(__ret_876, __ret_876, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_876; \ }) #endif #if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) #ifdef __LITTLE_ENDIAN__ #define vbfmlalbq_lane_f32(__p0_877, __p1_877, __p2_877, __p3_877) __extension__ ({ \ float32x4_t __ret_877; \ float32x4_t __s0_877 = __p0_877; \ bfloat16x8_t __s1_877 = __p1_877; \ bfloat16x4_t __s2_877 = __p2_877; \ __ret_877 = vbfmlalbq_f32(__s0_877, __s1_877, (bfloat16x8_t) {vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877)}); \ __ret_877; \ }) #else #define vbfmlalbq_lane_f32(__p0_878, __p1_878, __p2_878, __p3_878) __extension__ ({ \ float32x4_t __ret_878; \ float32x4_t __s0_878 = __p0_878; \ bfloat16x8_t __s1_878 = __p1_878; \ bfloat16x4_t __s2_878 = __p2_878; \ float32x4_t __rev0_878; __rev0_878 = __builtin_shufflevector(__s0_878, __s0_878, 3, 2, 1, 0); \ bfloat16x8_t __rev1_878; __rev1_878 = __builtin_shufflevector(__s1_878, __s1_878, 7, 6, 5, 4, 3, 2, 1, 0); \ bfloat16x4_t __rev2_878; __rev2_878 = __builtin_shufflevector(__s2_878, __s2_878, 3, 2, 1, 0); \ __ret_878 = __noswap_vbfmlalbq_f32(__rev0_878, __rev1_878, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878)}); \ __ret_878 = __builtin_shufflevector(__ret_878, __ret_878, 3, 2, 1, 0); \ __ret_878; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vbfmlalbq_laneq_f32(__p0_879, __p1_879, __p2_879, __p3_879) __extension__ ({ \ float32x4_t __ret_879; \ float32x4_t __s0_879 = __p0_879; \ bfloat16x8_t __s1_879 = __p1_879; \ bfloat16x8_t __s2_879 = __p2_879; \ __ret_879 = vbfmlalbq_f32(__s0_879, __s1_879, (bfloat16x8_t) {vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879)}); \ __ret_879; \ }) #else #define vbfmlalbq_laneq_f32(__p0_880, __p1_880, __p2_880, __p3_880) __extension__ ({ \ float32x4_t __ret_880; \ float32x4_t __s0_880 = __p0_880; \ bfloat16x8_t __s1_880 = __p1_880; \ bfloat16x8_t __s2_880 = __p2_880; \ float32x4_t __rev0_880; __rev0_880 = __builtin_shufflevector(__s0_880, __s0_880, 3, 2, 1, 0); \ bfloat16x8_t __rev1_880; __rev1_880 = __builtin_shufflevector(__s1_880, __s1_880, 7, 6, 5, 4, 3, 2, 1, 0); \ bfloat16x8_t __rev2_880; __rev2_880 = __builtin_shufflevector(__s2_880, __s2_880, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_880 = __noswap_vbfmlalbq_f32(__rev0_880, __rev1_880, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880)}); \ __ret_880 = __builtin_shufflevector(__ret_880, __ret_880, 3, 2, 1, 0); \ __ret_880; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vbfmlaltq_lane_f32(__p0_881, __p1_881, __p2_881, __p3_881) __extension__ ({ \ float32x4_t __ret_881; \ float32x4_t __s0_881 = __p0_881; \ bfloat16x8_t __s1_881 = __p1_881; \ bfloat16x4_t __s2_881 = __p2_881; \ __ret_881 = vbfmlaltq_f32(__s0_881, __s1_881, (bfloat16x8_t) {vget_lane_bf16(__s2_881, __p3_881), vget_lane_bf16(__s2_881, __p3_881), vget_lane_bf16(__s2_881, __p3_881), vget_lane_bf16(__s2_881, __p3_881), vget_lane_bf16(__s2_881, __p3_881), vget_lane_bf16(__s2_881, __p3_881), vget_lane_bf16(__s2_881, __p3_881), vget_lane_bf16(__s2_881, __p3_881)}); \ __ret_881; \ }) #else #define vbfmlaltq_lane_f32(__p0_882, __p1_882, __p2_882, __p3_882) __extension__ ({ \ float32x4_t __ret_882; \ float32x4_t __s0_882 = __p0_882; \ bfloat16x8_t __s1_882 = __p1_882; \ bfloat16x4_t __s2_882 = __p2_882; \ float32x4_t __rev0_882; __rev0_882 = __builtin_shufflevector(__s0_882, __s0_882, 3, 2, 1, 0); \ bfloat16x8_t __rev1_882; __rev1_882 = __builtin_shufflevector(__s1_882, __s1_882, 7, 6, 5, 4, 3, 2, 1, 0); \ bfloat16x4_t __rev2_882; __rev2_882 = __builtin_shufflevector(__s2_882, __s2_882, 3, 2, 1, 0); \ __ret_882 = __noswap_vbfmlaltq_f32(__rev0_882, __rev1_882, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_882, __p3_882), __noswap_vget_lane_bf16(__rev2_882, __p3_882), __noswap_vget_lane_bf16(__rev2_882, __p3_882), __noswap_vget_lane_bf16(__rev2_882, __p3_882), __noswap_vget_lane_bf16(__rev2_882, __p3_882), __noswap_vget_lane_bf16(__rev2_882, __p3_882), __noswap_vget_lane_bf16(__rev2_882, __p3_882), __noswap_vget_lane_bf16(__rev2_882, __p3_882)}); \ __ret_882 = __builtin_shufflevector(__ret_882, __ret_882, 3, 2, 1, 0); \ __ret_882; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vbfmlaltq_laneq_f32(__p0_883, __p1_883, __p2_883, __p3_883) __extension__ ({ \ float32x4_t __ret_883; \ float32x4_t __s0_883 = __p0_883; \ bfloat16x8_t __s1_883 = __p1_883; \ bfloat16x8_t __s2_883 = __p2_883; \ __ret_883 = vbfmlaltq_f32(__s0_883, __s1_883, (bfloat16x8_t) {vgetq_lane_bf16(__s2_883, __p3_883), vgetq_lane_bf16(__s2_883, __p3_883), vgetq_lane_bf16(__s2_883, __p3_883), vgetq_lane_bf16(__s2_883, __p3_883), vgetq_lane_bf16(__s2_883, __p3_883), vgetq_lane_bf16(__s2_883, __p3_883), vgetq_lane_bf16(__s2_883, __p3_883), vgetq_lane_bf16(__s2_883, __p3_883)}); \ __ret_883; \ }) #else #define vbfmlaltq_laneq_f32(__p0_884, __p1_884, __p2_884, __p3_884) __extension__ ({ \ float32x4_t __ret_884; \ float32x4_t __s0_884 = __p0_884; \ bfloat16x8_t __s1_884 = __p1_884; \ bfloat16x8_t __s2_884 = __p2_884; \ float32x4_t __rev0_884; __rev0_884 = __builtin_shufflevector(__s0_884, __s0_884, 3, 2, 1, 0); \ bfloat16x8_t __rev1_884; __rev1_884 = __builtin_shufflevector(__s1_884, __s1_884, 7, 6, 5, 4, 3, 2, 1, 0); \ bfloat16x8_t __rev2_884; __rev2_884 = __builtin_shufflevector(__s2_884, __s2_884, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_884 = __noswap_vbfmlaltq_f32(__rev0_884, __rev1_884, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_884, __p3_884), __noswap_vgetq_lane_bf16(__rev2_884, __p3_884), __noswap_vgetq_lane_bf16(__rev2_884, __p3_884), __noswap_vgetq_lane_bf16(__rev2_884, __p3_884), __noswap_vgetq_lane_bf16(__rev2_884, __p3_884), __noswap_vgetq_lane_bf16(__rev2_884, __p3_884), __noswap_vgetq_lane_bf16(__rev2_884, __p3_884), __noswap_vgetq_lane_bf16(__rev2_884, __p3_884)}); \ __ret_884 = __builtin_shufflevector(__ret_884, __ret_884, 3, 2, 1, 0); \ __ret_884; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) { float32x4_t __ret; __ret = vcvt_f32_bf16(vget_high_bf16(__p0)); return __ret; } #else __ai float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) { float32x4_t __ret; bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vcvt_f32_bf16(__noswap_vget_high_bf16(__rev0)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) { float32x4_t __ret; __ret = vcvt_f32_bf16(vget_low_bf16(__p0)); return __ret; } #else __ai float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) { float32x4_t __ret; bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vcvt_f32_bf16(__noswap_vget_low_bf16(__rev0)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #endif #if defined(__ARM_FEATURE_FP16_FML) && defined(__aarch64__) #ifdef __LITTLE_ENDIAN__ #define vfmlalq_lane_high_f16(__p0_885, __p1_885, __p2_885, __p3_885) __extension__ ({ \ float32x4_t __ret_885; \ float32x4_t __s0_885 = __p0_885; \ float16x8_t __s1_885 = __p1_885; \ float16x4_t __s2_885 = __p2_885; \ __ret_885 = vfmlalq_high_f16(__s0_885, __s1_885, (float16x8_t) {vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885)}); \ __ret_885; \ }) #else #define vfmlalq_lane_high_f16(__p0_886, __p1_886, __p2_886, __p3_886) __extension__ ({ \ float32x4_t __ret_886; \ float32x4_t __s0_886 = __p0_886; \ float16x8_t __s1_886 = __p1_886; \ float16x4_t __s2_886 = __p2_886; \ float32x4_t __rev0_886; __rev0_886 = __builtin_shufflevector(__s0_886, __s0_886, 3, 2, 1, 0); \ float16x8_t __rev1_886; __rev1_886 = __builtin_shufflevector(__s1_886, __s1_886, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x4_t __rev2_886; __rev2_886 = __builtin_shufflevector(__s2_886, __s2_886, 3, 2, 1, 0); \ __ret_886 = __noswap_vfmlalq_high_f16(__rev0_886, __rev1_886, (float16x8_t) {__noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886)}); \ __ret_886 = __builtin_shufflevector(__ret_886, __ret_886, 3, 2, 1, 0); \ __ret_886; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmlal_lane_high_f16(__p0_887, __p1_887, __p2_887, __p3_887) __extension__ ({ \ float32x2_t __ret_887; \ float32x2_t __s0_887 = __p0_887; \ float16x4_t __s1_887 = __p1_887; \ float16x4_t __s2_887 = __p2_887; \ __ret_887 = vfmlal_high_f16(__s0_887, __s1_887, (float16x4_t) {vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887)}); \ __ret_887; \ }) #else #define vfmlal_lane_high_f16(__p0_888, __p1_888, __p2_888, __p3_888) __extension__ ({ \ float32x2_t __ret_888; \ float32x2_t __s0_888 = __p0_888; \ float16x4_t __s1_888 = __p1_888; \ float16x4_t __s2_888 = __p2_888; \ float32x2_t __rev0_888; __rev0_888 = __builtin_shufflevector(__s0_888, __s0_888, 1, 0); \ float16x4_t __rev1_888; __rev1_888 = __builtin_shufflevector(__s1_888, __s1_888, 3, 2, 1, 0); \ float16x4_t __rev2_888; __rev2_888 = __builtin_shufflevector(__s2_888, __s2_888, 3, 2, 1, 0); \ __ret_888 = __noswap_vfmlal_high_f16(__rev0_888, __rev1_888, (float16x4_t) {__noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888)}); \ __ret_888 = __builtin_shufflevector(__ret_888, __ret_888, 1, 0); \ __ret_888; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmlalq_lane_low_f16(__p0_889, __p1_889, __p2_889, __p3_889) __extension__ ({ \ float32x4_t __ret_889; \ float32x4_t __s0_889 = __p0_889; \ float16x8_t __s1_889 = __p1_889; \ float16x4_t __s2_889 = __p2_889; \ __ret_889 = vfmlalq_low_f16(__s0_889, __s1_889, (float16x8_t) {vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889)}); \ __ret_889; \ }) #else #define vfmlalq_lane_low_f16(__p0_890, __p1_890, __p2_890, __p3_890) __extension__ ({ \ float32x4_t __ret_890; \ float32x4_t __s0_890 = __p0_890; \ float16x8_t __s1_890 = __p1_890; \ float16x4_t __s2_890 = __p2_890; \ float32x4_t __rev0_890; __rev0_890 = __builtin_shufflevector(__s0_890, __s0_890, 3, 2, 1, 0); \ float16x8_t __rev1_890; __rev1_890 = __builtin_shufflevector(__s1_890, __s1_890, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x4_t __rev2_890; __rev2_890 = __builtin_shufflevector(__s2_890, __s2_890, 3, 2, 1, 0); \ __ret_890 = __noswap_vfmlalq_low_f16(__rev0_890, __rev1_890, (float16x8_t) {__noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890)}); \ __ret_890 = __builtin_shufflevector(__ret_890, __ret_890, 3, 2, 1, 0); \ __ret_890; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmlal_lane_low_f16(__p0_891, __p1_891, __p2_891, __p3_891) __extension__ ({ \ float32x2_t __ret_891; \ float32x2_t __s0_891 = __p0_891; \ float16x4_t __s1_891 = __p1_891; \ float16x4_t __s2_891 = __p2_891; \ __ret_891 = vfmlal_low_f16(__s0_891, __s1_891, (float16x4_t) {vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891)}); \ __ret_891; \ }) #else #define vfmlal_lane_low_f16(__p0_892, __p1_892, __p2_892, __p3_892) __extension__ ({ \ float32x2_t __ret_892; \ float32x2_t __s0_892 = __p0_892; \ float16x4_t __s1_892 = __p1_892; \ float16x4_t __s2_892 = __p2_892; \ float32x2_t __rev0_892; __rev0_892 = __builtin_shufflevector(__s0_892, __s0_892, 1, 0); \ float16x4_t __rev1_892; __rev1_892 = __builtin_shufflevector(__s1_892, __s1_892, 3, 2, 1, 0); \ float16x4_t __rev2_892; __rev2_892 = __builtin_shufflevector(__s2_892, __s2_892, 3, 2, 1, 0); \ __ret_892 = __noswap_vfmlal_low_f16(__rev0_892, __rev1_892, (float16x4_t) {__noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892)}); \ __ret_892 = __builtin_shufflevector(__ret_892, __ret_892, 1, 0); \ __ret_892; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmlalq_laneq_high_f16(__p0_893, __p1_893, __p2_893, __p3_893) __extension__ ({ \ float32x4_t __ret_893; \ float32x4_t __s0_893 = __p0_893; \ float16x8_t __s1_893 = __p1_893; \ float16x8_t __s2_893 = __p2_893; \ __ret_893 = vfmlalq_high_f16(__s0_893, __s1_893, (float16x8_t) {vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893)}); \ __ret_893; \ }) #else #define vfmlalq_laneq_high_f16(__p0_894, __p1_894, __p2_894, __p3_894) __extension__ ({ \ float32x4_t __ret_894; \ float32x4_t __s0_894 = __p0_894; \ float16x8_t __s1_894 = __p1_894; \ float16x8_t __s2_894 = __p2_894; \ float32x4_t __rev0_894; __rev0_894 = __builtin_shufflevector(__s0_894, __s0_894, 3, 2, 1, 0); \ float16x8_t __rev1_894; __rev1_894 = __builtin_shufflevector(__s1_894, __s1_894, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __rev2_894; __rev2_894 = __builtin_shufflevector(__s2_894, __s2_894, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_894 = __noswap_vfmlalq_high_f16(__rev0_894, __rev1_894, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894)}); \ __ret_894 = __builtin_shufflevector(__ret_894, __ret_894, 3, 2, 1, 0); \ __ret_894; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmlal_laneq_high_f16(__p0_895, __p1_895, __p2_895, __p3_895) __extension__ ({ \ float32x2_t __ret_895; \ float32x2_t __s0_895 = __p0_895; \ float16x4_t __s1_895 = __p1_895; \ float16x8_t __s2_895 = __p2_895; \ __ret_895 = vfmlal_high_f16(__s0_895, __s1_895, (float16x4_t) {vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895)}); \ __ret_895; \ }) #else #define vfmlal_laneq_high_f16(__p0_896, __p1_896, __p2_896, __p3_896) __extension__ ({ \ float32x2_t __ret_896; \ float32x2_t __s0_896 = __p0_896; \ float16x4_t __s1_896 = __p1_896; \ float16x8_t __s2_896 = __p2_896; \ float32x2_t __rev0_896; __rev0_896 = __builtin_shufflevector(__s0_896, __s0_896, 1, 0); \ float16x4_t __rev1_896; __rev1_896 = __builtin_shufflevector(__s1_896, __s1_896, 3, 2, 1, 0); \ float16x8_t __rev2_896; __rev2_896 = __builtin_shufflevector(__s2_896, __s2_896, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_896 = __noswap_vfmlal_high_f16(__rev0_896, __rev1_896, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896)}); \ __ret_896 = __builtin_shufflevector(__ret_896, __ret_896, 1, 0); \ __ret_896; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmlalq_laneq_low_f16(__p0_897, __p1_897, __p2_897, __p3_897) __extension__ ({ \ float32x4_t __ret_897; \ float32x4_t __s0_897 = __p0_897; \ float16x8_t __s1_897 = __p1_897; \ float16x8_t __s2_897 = __p2_897; \ __ret_897 = vfmlalq_low_f16(__s0_897, __s1_897, (float16x8_t) {vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897)}); \ __ret_897; \ }) #else #define vfmlalq_laneq_low_f16(__p0_898, __p1_898, __p2_898, __p3_898) __extension__ ({ \ float32x4_t __ret_898; \ float32x4_t __s0_898 = __p0_898; \ float16x8_t __s1_898 = __p1_898; \ float16x8_t __s2_898 = __p2_898; \ float32x4_t __rev0_898; __rev0_898 = __builtin_shufflevector(__s0_898, __s0_898, 3, 2, 1, 0); \ float16x8_t __rev1_898; __rev1_898 = __builtin_shufflevector(__s1_898, __s1_898, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __rev2_898; __rev2_898 = __builtin_shufflevector(__s2_898, __s2_898, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_898 = __noswap_vfmlalq_low_f16(__rev0_898, __rev1_898, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898)}); \ __ret_898 = __builtin_shufflevector(__ret_898, __ret_898, 3, 2, 1, 0); \ __ret_898; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmlal_laneq_low_f16(__p0_899, __p1_899, __p2_899, __p3_899) __extension__ ({ \ float32x2_t __ret_899; \ float32x2_t __s0_899 = __p0_899; \ float16x4_t __s1_899 = __p1_899; \ float16x8_t __s2_899 = __p2_899; \ __ret_899 = vfmlal_low_f16(__s0_899, __s1_899, (float16x4_t) {vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899)}); \ __ret_899; \ }) #else #define vfmlal_laneq_low_f16(__p0_900, __p1_900, __p2_900, __p3_900) __extension__ ({ \ float32x2_t __ret_900; \ float32x2_t __s0_900 = __p0_900; \ float16x4_t __s1_900 = __p1_900; \ float16x8_t __s2_900 = __p2_900; \ float32x2_t __rev0_900; __rev0_900 = __builtin_shufflevector(__s0_900, __s0_900, 1, 0); \ float16x4_t __rev1_900; __rev1_900 = __builtin_shufflevector(__s1_900, __s1_900, 3, 2, 1, 0); \ float16x8_t __rev2_900; __rev2_900 = __builtin_shufflevector(__s2_900, __s2_900, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_900 = __noswap_vfmlal_low_f16(__rev0_900, __rev1_900, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900)}); \ __ret_900 = __builtin_shufflevector(__ret_900, __ret_900, 1, 0); \ __ret_900; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmlslq_lane_high_f16(__p0_901, __p1_901, __p2_901, __p3_901) __extension__ ({ \ float32x4_t __ret_901; \ float32x4_t __s0_901 = __p0_901; \ float16x8_t __s1_901 = __p1_901; \ float16x4_t __s2_901 = __p2_901; \ __ret_901 = vfmlslq_high_f16(__s0_901, __s1_901, (float16x8_t) {vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901)}); \ __ret_901; \ }) #else #define vfmlslq_lane_high_f16(__p0_902, __p1_902, __p2_902, __p3_902) __extension__ ({ \ float32x4_t __ret_902; \ float32x4_t __s0_902 = __p0_902; \ float16x8_t __s1_902 = __p1_902; \ float16x4_t __s2_902 = __p2_902; \ float32x4_t __rev0_902; __rev0_902 = __builtin_shufflevector(__s0_902, __s0_902, 3, 2, 1, 0); \ float16x8_t __rev1_902; __rev1_902 = __builtin_shufflevector(__s1_902, __s1_902, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x4_t __rev2_902; __rev2_902 = __builtin_shufflevector(__s2_902, __s2_902, 3, 2, 1, 0); \ __ret_902 = __noswap_vfmlslq_high_f16(__rev0_902, __rev1_902, (float16x8_t) {__noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902)}); \ __ret_902 = __builtin_shufflevector(__ret_902, __ret_902, 3, 2, 1, 0); \ __ret_902; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmlsl_lane_high_f16(__p0_903, __p1_903, __p2_903, __p3_903) __extension__ ({ \ float32x2_t __ret_903; \ float32x2_t __s0_903 = __p0_903; \ float16x4_t __s1_903 = __p1_903; \ float16x4_t __s2_903 = __p2_903; \ __ret_903 = vfmlsl_high_f16(__s0_903, __s1_903, (float16x4_t) {vget_lane_f16(__s2_903, __p3_903), vget_lane_f16(__s2_903, __p3_903), vget_lane_f16(__s2_903, __p3_903), vget_lane_f16(__s2_903, __p3_903)}); \ __ret_903; \ }) #else #define vfmlsl_lane_high_f16(__p0_904, __p1_904, __p2_904, __p3_904) __extension__ ({ \ float32x2_t __ret_904; \ float32x2_t __s0_904 = __p0_904; \ float16x4_t __s1_904 = __p1_904; \ float16x4_t __s2_904 = __p2_904; \ float32x2_t __rev0_904; __rev0_904 = __builtin_shufflevector(__s0_904, __s0_904, 1, 0); \ float16x4_t __rev1_904; __rev1_904 = __builtin_shufflevector(__s1_904, __s1_904, 3, 2, 1, 0); \ float16x4_t __rev2_904; __rev2_904 = __builtin_shufflevector(__s2_904, __s2_904, 3, 2, 1, 0); \ __ret_904 = __noswap_vfmlsl_high_f16(__rev0_904, __rev1_904, (float16x4_t) {__noswap_vget_lane_f16(__rev2_904, __p3_904), __noswap_vget_lane_f16(__rev2_904, __p3_904), __noswap_vget_lane_f16(__rev2_904, __p3_904), __noswap_vget_lane_f16(__rev2_904, __p3_904)}); \ __ret_904 = __builtin_shufflevector(__ret_904, __ret_904, 1, 0); \ __ret_904; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmlslq_lane_low_f16(__p0_905, __p1_905, __p2_905, __p3_905) __extension__ ({ \ float32x4_t __ret_905; \ float32x4_t __s0_905 = __p0_905; \ float16x8_t __s1_905 = __p1_905; \ float16x4_t __s2_905 = __p2_905; \ __ret_905 = vfmlslq_low_f16(__s0_905, __s1_905, (float16x8_t) {vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905)}); \ __ret_905; \ }) #else #define vfmlslq_lane_low_f16(__p0_906, __p1_906, __p2_906, __p3_906) __extension__ ({ \ float32x4_t __ret_906; \ float32x4_t __s0_906 = __p0_906; \ float16x8_t __s1_906 = __p1_906; \ float16x4_t __s2_906 = __p2_906; \ float32x4_t __rev0_906; __rev0_906 = __builtin_shufflevector(__s0_906, __s0_906, 3, 2, 1, 0); \ float16x8_t __rev1_906; __rev1_906 = __builtin_shufflevector(__s1_906, __s1_906, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x4_t __rev2_906; __rev2_906 = __builtin_shufflevector(__s2_906, __s2_906, 3, 2, 1, 0); \ __ret_906 = __noswap_vfmlslq_low_f16(__rev0_906, __rev1_906, (float16x8_t) {__noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906)}); \ __ret_906 = __builtin_shufflevector(__ret_906, __ret_906, 3, 2, 1, 0); \ __ret_906; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmlsl_lane_low_f16(__p0_907, __p1_907, __p2_907, __p3_907) __extension__ ({ \ float32x2_t __ret_907; \ float32x2_t __s0_907 = __p0_907; \ float16x4_t __s1_907 = __p1_907; \ float16x4_t __s2_907 = __p2_907; \ __ret_907 = vfmlsl_low_f16(__s0_907, __s1_907, (float16x4_t) {vget_lane_f16(__s2_907, __p3_907), vget_lane_f16(__s2_907, __p3_907), vget_lane_f16(__s2_907, __p3_907), vget_lane_f16(__s2_907, __p3_907)}); \ __ret_907; \ }) #else #define vfmlsl_lane_low_f16(__p0_908, __p1_908, __p2_908, __p3_908) __extension__ ({ \ float32x2_t __ret_908; \ float32x2_t __s0_908 = __p0_908; \ float16x4_t __s1_908 = __p1_908; \ float16x4_t __s2_908 = __p2_908; \ float32x2_t __rev0_908; __rev0_908 = __builtin_shufflevector(__s0_908, __s0_908, 1, 0); \ float16x4_t __rev1_908; __rev1_908 = __builtin_shufflevector(__s1_908, __s1_908, 3, 2, 1, 0); \ float16x4_t __rev2_908; __rev2_908 = __builtin_shufflevector(__s2_908, __s2_908, 3, 2, 1, 0); \ __ret_908 = __noswap_vfmlsl_low_f16(__rev0_908, __rev1_908, (float16x4_t) {__noswap_vget_lane_f16(__rev2_908, __p3_908), __noswap_vget_lane_f16(__rev2_908, __p3_908), __noswap_vget_lane_f16(__rev2_908, __p3_908), __noswap_vget_lane_f16(__rev2_908, __p3_908)}); \ __ret_908 = __builtin_shufflevector(__ret_908, __ret_908, 1, 0); \ __ret_908; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmlslq_laneq_high_f16(__p0_909, __p1_909, __p2_909, __p3_909) __extension__ ({ \ float32x4_t __ret_909; \ float32x4_t __s0_909 = __p0_909; \ float16x8_t __s1_909 = __p1_909; \ float16x8_t __s2_909 = __p2_909; \ __ret_909 = vfmlslq_high_f16(__s0_909, __s1_909, (float16x8_t) {vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909)}); \ __ret_909; \ }) #else #define vfmlslq_laneq_high_f16(__p0_910, __p1_910, __p2_910, __p3_910) __extension__ ({ \ float32x4_t __ret_910; \ float32x4_t __s0_910 = __p0_910; \ float16x8_t __s1_910 = __p1_910; \ float16x8_t __s2_910 = __p2_910; \ float32x4_t __rev0_910; __rev0_910 = __builtin_shufflevector(__s0_910, __s0_910, 3, 2, 1, 0); \ float16x8_t __rev1_910; __rev1_910 = __builtin_shufflevector(__s1_910, __s1_910, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __rev2_910; __rev2_910 = __builtin_shufflevector(__s2_910, __s2_910, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_910 = __noswap_vfmlslq_high_f16(__rev0_910, __rev1_910, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910)}); \ __ret_910 = __builtin_shufflevector(__ret_910, __ret_910, 3, 2, 1, 0); \ __ret_910; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmlsl_laneq_high_f16(__p0_911, __p1_911, __p2_911, __p3_911) __extension__ ({ \ float32x2_t __ret_911; \ float32x2_t __s0_911 = __p0_911; \ float16x4_t __s1_911 = __p1_911; \ float16x8_t __s2_911 = __p2_911; \ __ret_911 = vfmlsl_high_f16(__s0_911, __s1_911, (float16x4_t) {vgetq_lane_f16(__s2_911, __p3_911), vgetq_lane_f16(__s2_911, __p3_911), vgetq_lane_f16(__s2_911, __p3_911), vgetq_lane_f16(__s2_911, __p3_911)}); \ __ret_911; \ }) #else #define vfmlsl_laneq_high_f16(__p0_912, __p1_912, __p2_912, __p3_912) __extension__ ({ \ float32x2_t __ret_912; \ float32x2_t __s0_912 = __p0_912; \ float16x4_t __s1_912 = __p1_912; \ float16x8_t __s2_912 = __p2_912; \ float32x2_t __rev0_912; __rev0_912 = __builtin_shufflevector(__s0_912, __s0_912, 1, 0); \ float16x4_t __rev1_912; __rev1_912 = __builtin_shufflevector(__s1_912, __s1_912, 3, 2, 1, 0); \ float16x8_t __rev2_912; __rev2_912 = __builtin_shufflevector(__s2_912, __s2_912, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_912 = __noswap_vfmlsl_high_f16(__rev0_912, __rev1_912, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_912, __p3_912), __noswap_vgetq_lane_f16(__rev2_912, __p3_912), __noswap_vgetq_lane_f16(__rev2_912, __p3_912), __noswap_vgetq_lane_f16(__rev2_912, __p3_912)}); \ __ret_912 = __builtin_shufflevector(__ret_912, __ret_912, 1, 0); \ __ret_912; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmlslq_laneq_low_f16(__p0_913, __p1_913, __p2_913, __p3_913) __extension__ ({ \ float32x4_t __ret_913; \ float32x4_t __s0_913 = __p0_913; \ float16x8_t __s1_913 = __p1_913; \ float16x8_t __s2_913 = __p2_913; \ __ret_913 = vfmlslq_low_f16(__s0_913, __s1_913, (float16x8_t) {vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913)}); \ __ret_913; \ }) #else #define vfmlslq_laneq_low_f16(__p0_914, __p1_914, __p2_914, __p3_914) __extension__ ({ \ float32x4_t __ret_914; \ float32x4_t __s0_914 = __p0_914; \ float16x8_t __s1_914 = __p1_914; \ float16x8_t __s2_914 = __p2_914; \ float32x4_t __rev0_914; __rev0_914 = __builtin_shufflevector(__s0_914, __s0_914, 3, 2, 1, 0); \ float16x8_t __rev1_914; __rev1_914 = __builtin_shufflevector(__s1_914, __s1_914, 7, 6, 5, 4, 3, 2, 1, 0); \ float16x8_t __rev2_914; __rev2_914 = __builtin_shufflevector(__s2_914, __s2_914, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_914 = __noswap_vfmlslq_low_f16(__rev0_914, __rev1_914, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914)}); \ __ret_914 = __builtin_shufflevector(__ret_914, __ret_914, 3, 2, 1, 0); \ __ret_914; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vfmlsl_laneq_low_f16(__p0_915, __p1_915, __p2_915, __p3_915) __extension__ ({ \ float32x2_t __ret_915; \ float32x2_t __s0_915 = __p0_915; \ float16x4_t __s1_915 = __p1_915; \ float16x8_t __s2_915 = __p2_915; \ __ret_915 = vfmlsl_low_f16(__s0_915, __s1_915, (float16x4_t) {vgetq_lane_f16(__s2_915, __p3_915), vgetq_lane_f16(__s2_915, __p3_915), vgetq_lane_f16(__s2_915, __p3_915), vgetq_lane_f16(__s2_915, __p3_915)}); \ __ret_915; \ }) #else #define vfmlsl_laneq_low_f16(__p0_916, __p1_916, __p2_916, __p3_916) __extension__ ({ \ float32x2_t __ret_916; \ float32x2_t __s0_916 = __p0_916; \ float16x4_t __s1_916 = __p1_916; \ float16x8_t __s2_916 = __p2_916; \ float32x2_t __rev0_916; __rev0_916 = __builtin_shufflevector(__s0_916, __s0_916, 1, 0); \ float16x4_t __rev1_916; __rev1_916 = __builtin_shufflevector(__s1_916, __s1_916, 3, 2, 1, 0); \ float16x8_t __rev2_916; __rev2_916 = __builtin_shufflevector(__s2_916, __s2_916, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_916 = __noswap_vfmlsl_low_f16(__rev0_916, __rev1_916, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_916, __p3_916), __noswap_vgetq_lane_f16(__rev2_916, __p3_916), __noswap_vgetq_lane_f16(__rev2_916, __p3_916), __noswap_vgetq_lane_f16(__rev2_916, __p3_916)}); \ __ret_916 = __builtin_shufflevector(__ret_916, __ret_916, 1, 0); \ __ret_916; \ }) #endif #endif #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__) #ifdef __LITTLE_ENDIAN__ #define vmulh_lane_f16(__p0_917, __p1_917, __p2_917) __extension__ ({ \ float16_t __ret_917; \ float16_t __s0_917 = __p0_917; \ float16x4_t __s1_917 = __p1_917; \ __ret_917 = __s0_917 * vget_lane_f16(__s1_917, __p2_917); \ __ret_917; \ }) #else #define vmulh_lane_f16(__p0_918, __p1_918, __p2_918) __extension__ ({ \ float16_t __ret_918; \ float16_t __s0_918 = __p0_918; \ float16x4_t __s1_918 = __p1_918; \ float16x4_t __rev1_918; __rev1_918 = __builtin_shufflevector(__s1_918, __s1_918, 3, 2, 1, 0); \ __ret_918 = __s0_918 * __noswap_vget_lane_f16(__rev1_918, __p2_918); \ __ret_918; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vmulh_laneq_f16(__p0_919, __p1_919, __p2_919) __extension__ ({ \ float16_t __ret_919; \ float16_t __s0_919 = __p0_919; \ float16x8_t __s1_919 = __p1_919; \ __ret_919 = __s0_919 * vgetq_lane_f16(__s1_919, __p2_919); \ __ret_919; \ }) #else #define vmulh_laneq_f16(__p0_920, __p1_920, __p2_920) __extension__ ({ \ float16_t __ret_920; \ float16_t __s0_920 = __p0_920; \ float16x8_t __s1_920 = __p1_920; \ float16x8_t __rev1_920; __rev1_920 = __builtin_shufflevector(__s1_920, __s1_920, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_920 = __s0_920 * __noswap_vgetq_lane_f16(__rev1_920, __p2_920); \ __ret_920; \ }) #endif #endif #if defined(__ARM_FEATURE_MATMUL_INT8) #ifdef __LITTLE_ENDIAN__ #define vsudotq_lane_s32(__p0_921, __p1_921, __p2_921, __p3_921) __extension__ ({ \ int32x4_t __ret_921; \ int32x4_t __s0_921 = __p0_921; \ int8x16_t __s1_921 = __p1_921; \ uint8x8_t __s2_921 = __p2_921; \ uint8x8_t __reint_921 = __s2_921; \ __ret_921 = vusdotq_s32(__s0_921, (uint8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_921, __p3_921)), __s1_921); \ __ret_921; \ }) #else #define vsudotq_lane_s32(__p0_922, __p1_922, __p2_922, __p3_922) __extension__ ({ \ int32x4_t __ret_922; \ int32x4_t __s0_922 = __p0_922; \ int8x16_t __s1_922 = __p1_922; \ uint8x8_t __s2_922 = __p2_922; \ int32x4_t __rev0_922; __rev0_922 = __builtin_shufflevector(__s0_922, __s0_922, 3, 2, 1, 0); \ int8x16_t __rev1_922; __rev1_922 = __builtin_shufflevector(__s1_922, __s1_922, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ uint8x8_t __rev2_922; __rev2_922 = __builtin_shufflevector(__s2_922, __s2_922, 7, 6, 5, 4, 3, 2, 1, 0); \ uint8x8_t __reint_922 = __rev2_922; \ __ret_922 = __noswap_vusdotq_s32(__rev0_922, (uint8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_922, __p3_922)), __rev1_922); \ __ret_922 = __builtin_shufflevector(__ret_922, __ret_922, 3, 2, 1, 0); \ __ret_922; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vsudot_lane_s32(__p0_923, __p1_923, __p2_923, __p3_923) __extension__ ({ \ int32x2_t __ret_923; \ int32x2_t __s0_923 = __p0_923; \ int8x8_t __s1_923 = __p1_923; \ uint8x8_t __s2_923 = __p2_923; \ uint8x8_t __reint_923 = __s2_923; \ __ret_923 = vusdot_s32(__s0_923, (uint8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_923, __p3_923)), __s1_923); \ __ret_923; \ }) #else #define vsudot_lane_s32(__p0_924, __p1_924, __p2_924, __p3_924) __extension__ ({ \ int32x2_t __ret_924; \ int32x2_t __s0_924 = __p0_924; \ int8x8_t __s1_924 = __p1_924; \ uint8x8_t __s2_924 = __p2_924; \ int32x2_t __rev0_924; __rev0_924 = __builtin_shufflevector(__s0_924, __s0_924, 1, 0); \ int8x8_t __rev1_924; __rev1_924 = __builtin_shufflevector(__s1_924, __s1_924, 7, 6, 5, 4, 3, 2, 1, 0); \ uint8x8_t __rev2_924; __rev2_924 = __builtin_shufflevector(__s2_924, __s2_924, 7, 6, 5, 4, 3, 2, 1, 0); \ uint8x8_t __reint_924 = __rev2_924; \ __ret_924 = __noswap_vusdot_s32(__rev0_924, (uint8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_924, __p3_924)), __rev1_924); \ __ret_924 = __builtin_shufflevector(__ret_924, __ret_924, 1, 0); \ __ret_924; \ }) #endif #endif #if defined(__aarch64__) #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { uint16x8_t __ret; __ret = vabdl_u8(vget_high_u8(__p0), vget_high_u8(__p1)); return __ret; } #else __ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { uint16x8_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vabdl_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { uint64x2_t __ret; __ret = vabdl_u32(vget_high_u32(__p0), vget_high_u32(__p1)); return __ret; } #else __ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { uint64x2_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __noswap_vabdl_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { uint32x4_t __ret; __ret = vabdl_u16(vget_high_u16(__p0), vget_high_u16(__p1)); return __ret; } #else __ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { uint32x4_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vabdl_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) { int16x8_t __ret; __ret = vabdl_s8(vget_high_s8(__p0), vget_high_s8(__p1)); return __ret; } #else __ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) { int16x8_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vabdl_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) { int64x2_t __ret; __ret = vabdl_s32(vget_high_s32(__p0), vget_high_s32(__p1)); return __ret; } #else __ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) { int64x2_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __noswap_vabdl_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) { int32x4_t __ret; __ret = vabdl_s16(vget_high_s16(__p0), vget_high_s16(__p1)); return __ret; } #else __ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) { int32x4_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vabdl_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { uint16x8_t __ret; __ret = vmovl_high_u8(__p0) + vmovl_high_u8(__p1); return __ret; } #else __ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { uint16x8_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vmovl_high_u8(__rev0) + __noswap_vmovl_high_u8(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { uint64x2_t __ret; __ret = vmovl_high_u32(__p0) + vmovl_high_u32(__p1); return __ret; } #else __ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { uint64x2_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __noswap_vmovl_high_u32(__rev0) + __noswap_vmovl_high_u32(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { uint32x4_t __ret; __ret = vmovl_high_u16(__p0) + vmovl_high_u16(__p1); return __ret; } #else __ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { uint32x4_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vmovl_high_u16(__rev0) + __noswap_vmovl_high_u16(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) { int16x8_t __ret; __ret = vmovl_high_s8(__p0) + vmovl_high_s8(__p1); return __ret; } #else __ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) { int16x8_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vmovl_high_s8(__rev0) + __noswap_vmovl_high_s8(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) { int64x2_t __ret; __ret = vmovl_high_s32(__p0) + vmovl_high_s32(__p1); return __ret; } #else __ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) { int64x2_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __noswap_vmovl_high_s32(__rev0) + __noswap_vmovl_high_s32(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) { int32x4_t __ret; __ret = vmovl_high_s16(__p0) + vmovl_high_s16(__p1); return __ret; } #else __ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) { int32x4_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vmovl_high_s16(__rev0) + __noswap_vmovl_high_s16(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { uint16x8_t __ret; __ret = __p0 + vmovl_high_u8(__p1); return __ret; } #else __ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 + __noswap_vmovl_high_u8(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { uint64x2_t __ret; __ret = __p0 + vmovl_high_u32(__p1); return __ret; } #else __ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 + __noswap_vmovl_high_u32(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { uint32x4_t __ret; __ret = __p0 + vmovl_high_u16(__p1); return __ret; } #else __ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 + __noswap_vmovl_high_u16(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) { int16x8_t __ret; __ret = __p0 + vmovl_high_s8(__p1); return __ret; } #else __ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 + __noswap_vmovl_high_s8(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) { int64x2_t __ret; __ret = __p0 + vmovl_high_s32(__p1); return __ret; } #else __ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __rev0 + __noswap_vmovl_high_s32(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) { int32x4_t __ret; __ret = __p0 + vmovl_high_s16(__p1); return __ret; } #else __ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 + __noswap_vmovl_high_s16(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ #define vcopyq_lane_p64(__p0_925, __p1_925, __p2_925, __p3_925) __extension__ ({ \ poly64x2_t __ret_925; \ poly64x2_t __s0_925 = __p0_925; \ poly64x1_t __s2_925 = __p2_925; \ __ret_925 = vsetq_lane_p64(vget_lane_p64(__s2_925, __p3_925), __s0_925, __p1_925); \ __ret_925; \ }) #else #define vcopyq_lane_p64(__p0_926, __p1_926, __p2_926, __p3_926) __extension__ ({ \ poly64x2_t __ret_926; \ poly64x2_t __s0_926 = __p0_926; \ poly64x1_t __s2_926 = __p2_926; \ poly64x2_t __rev0_926; __rev0_926 = __builtin_shufflevector(__s0_926, __s0_926, 1, 0); \ __ret_926 = __noswap_vsetq_lane_p64(vget_lane_p64(__s2_926, __p3_926), __rev0_926, __p1_926); \ __ret_926 = __builtin_shufflevector(__ret_926, __ret_926, 1, 0); \ __ret_926; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopyq_lane_f64(__p0_927, __p1_927, __p2_927, __p3_927) __extension__ ({ \ float64x2_t __ret_927; \ float64x2_t __s0_927 = __p0_927; \ float64x1_t __s2_927 = __p2_927; \ __ret_927 = vsetq_lane_f64(vget_lane_f64(__s2_927, __p3_927), __s0_927, __p1_927); \ __ret_927; \ }) #else #define vcopyq_lane_f64(__p0_928, __p1_928, __p2_928, __p3_928) __extension__ ({ \ float64x2_t __ret_928; \ float64x2_t __s0_928 = __p0_928; \ float64x1_t __s2_928 = __p2_928; \ float64x2_t __rev0_928; __rev0_928 = __builtin_shufflevector(__s0_928, __s0_928, 1, 0); \ __ret_928 = __noswap_vsetq_lane_f64(vget_lane_f64(__s2_928, __p3_928), __rev0_928, __p1_928); \ __ret_928 = __builtin_shufflevector(__ret_928, __ret_928, 1, 0); \ __ret_928; \ }) #endif #define vcopy_lane_p64(__p0_929, __p1_929, __p2_929, __p3_929) __extension__ ({ \ poly64x1_t __ret_929; \ poly64x1_t __s0_929 = __p0_929; \ poly64x1_t __s2_929 = __p2_929; \ __ret_929 = vset_lane_p64(vget_lane_p64(__s2_929, __p3_929), __s0_929, __p1_929); \ __ret_929; \ }) #define vcopy_lane_f64(__p0_930, __p1_930, __p2_930, __p3_930) __extension__ ({ \ float64x1_t __ret_930; \ float64x1_t __s0_930 = __p0_930; \ float64x1_t __s2_930 = __p2_930; \ __ret_930 = vset_lane_f64(vget_lane_f64(__s2_930, __p3_930), __s0_930, __p1_930); \ __ret_930; \ }) #ifdef __LITTLE_ENDIAN__ #define vcopyq_laneq_p64(__p0_931, __p1_931, __p2_931, __p3_931) __extension__ ({ \ poly64x2_t __ret_931; \ poly64x2_t __s0_931 = __p0_931; \ poly64x2_t __s2_931 = __p2_931; \ __ret_931 = vsetq_lane_p64(vgetq_lane_p64(__s2_931, __p3_931), __s0_931, __p1_931); \ __ret_931; \ }) #else #define vcopyq_laneq_p64(__p0_932, __p1_932, __p2_932, __p3_932) __extension__ ({ \ poly64x2_t __ret_932; \ poly64x2_t __s0_932 = __p0_932; \ poly64x2_t __s2_932 = __p2_932; \ poly64x2_t __rev0_932; __rev0_932 = __builtin_shufflevector(__s0_932, __s0_932, 1, 0); \ poly64x2_t __rev2_932; __rev2_932 = __builtin_shufflevector(__s2_932, __s2_932, 1, 0); \ __ret_932 = __noswap_vsetq_lane_p64(__noswap_vgetq_lane_p64(__rev2_932, __p3_932), __rev0_932, __p1_932); \ __ret_932 = __builtin_shufflevector(__ret_932, __ret_932, 1, 0); \ __ret_932; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopyq_laneq_f64(__p0_933, __p1_933, __p2_933, __p3_933) __extension__ ({ \ float64x2_t __ret_933; \ float64x2_t __s0_933 = __p0_933; \ float64x2_t __s2_933 = __p2_933; \ __ret_933 = vsetq_lane_f64(vgetq_lane_f64(__s2_933, __p3_933), __s0_933, __p1_933); \ __ret_933; \ }) #else #define vcopyq_laneq_f64(__p0_934, __p1_934, __p2_934, __p3_934) __extension__ ({ \ float64x2_t __ret_934; \ float64x2_t __s0_934 = __p0_934; \ float64x2_t __s2_934 = __p2_934; \ float64x2_t __rev0_934; __rev0_934 = __builtin_shufflevector(__s0_934, __s0_934, 1, 0); \ float64x2_t __rev2_934; __rev2_934 = __builtin_shufflevector(__s2_934, __s2_934, 1, 0); \ __ret_934 = __noswap_vsetq_lane_f64(__noswap_vgetq_lane_f64(__rev2_934, __p3_934), __rev0_934, __p1_934); \ __ret_934 = __builtin_shufflevector(__ret_934, __ret_934, 1, 0); \ __ret_934; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopy_laneq_p64(__p0_935, __p1_935, __p2_935, __p3_935) __extension__ ({ \ poly64x1_t __ret_935; \ poly64x1_t __s0_935 = __p0_935; \ poly64x2_t __s2_935 = __p2_935; \ __ret_935 = vset_lane_p64(vgetq_lane_p64(__s2_935, __p3_935), __s0_935, __p1_935); \ __ret_935; \ }) #else #define vcopy_laneq_p64(__p0_936, __p1_936, __p2_936, __p3_936) __extension__ ({ \ poly64x1_t __ret_936; \ poly64x1_t __s0_936 = __p0_936; \ poly64x2_t __s2_936 = __p2_936; \ poly64x2_t __rev2_936; __rev2_936 = __builtin_shufflevector(__s2_936, __s2_936, 1, 0); \ __ret_936 = vset_lane_p64(__noswap_vgetq_lane_p64(__rev2_936, __p3_936), __s0_936, __p1_936); \ __ret_936; \ }) #endif #ifdef __LITTLE_ENDIAN__ #define vcopy_laneq_f64(__p0_937, __p1_937, __p2_937, __p3_937) __extension__ ({ \ float64x1_t __ret_937; \ float64x1_t __s0_937 = __p0_937; \ float64x2_t __s2_937 = __p2_937; \ __ret_937 = vset_lane_f64(vgetq_lane_f64(__s2_937, __p3_937), __s0_937, __p1_937); \ __ret_937; \ }) #else #define vcopy_laneq_f64(__p0_938, __p1_938, __p2_938, __p3_938) __extension__ ({ \ float64x1_t __ret_938; \ float64x1_t __s0_938 = __p0_938; \ float64x2_t __s2_938 = __p2_938; \ float64x2_t __rev2_938; __rev2_938 = __builtin_shufflevector(__s2_938, __s2_938, 1, 0); \ __ret_938 = vset_lane_f64(__noswap_vgetq_lane_f64(__rev2_938, __p3_938), __s0_938, __p1_938); \ __ret_938; \ }) #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint16x8_t __ret; __ret = vmlal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2)); return __ret; } #else __ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vmlal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2)); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint64x2_t __ret; __ret = vmlal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2)); return __ret; } #else __ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __noswap_vmlal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2)); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint32x4_t __ret; __ret = vmlal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2)); return __ret; } #else __ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vmlal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { int16x8_t __ret; __ret = vmlal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2)); return __ret; } #else __ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vmlal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2)); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { int64x2_t __ret; __ret = vmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); return __ret; } #else __ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __noswap_vmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2)); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { int32x4_t __ret; __ret = vmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); return __ret; } #else __ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { uint64x2_t __ret; __ret = vmlal_n_u32(__p0, vget_high_u32(__p1), __p2); return __ret; } #else __ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __noswap_vmlal_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { uint32x4_t __ret; __ret = vmlal_n_u16(__p0, vget_high_u16(__p1), __p2); return __ret; } #else __ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vmlal_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { int64x2_t __ret; __ret = vmlal_n_s32(__p0, vget_high_s32(__p1), __p2); return __ret; } #else __ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __noswap_vmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { int32x4_t __ret; __ret = vmlal_n_s16(__p0, vget_high_s16(__p1), __p2); return __ret; } #else __ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint16x8_t __ret; __ret = vmlsl_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2)); return __ret; } #else __ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vmlsl_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2)); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint64x2_t __ret; __ret = vmlsl_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2)); return __ret; } #else __ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __noswap_vmlsl_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2)); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint32x4_t __ret; __ret = vmlsl_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2)); return __ret; } #else __ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vmlsl_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { int16x8_t __ret; __ret = vmlsl_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2)); return __ret; } #else __ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vmlsl_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2)); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { int64x2_t __ret; __ret = vmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); return __ret; } #else __ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __noswap_vmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2)); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { int32x4_t __ret; __ret = vmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); return __ret; } #else __ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { uint64x2_t __ret; __ret = vmlsl_n_u32(__p0, vget_high_u32(__p1), __p2); return __ret; } #else __ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __noswap_vmlsl_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { uint32x4_t __ret; __ret = vmlsl_n_u16(__p0, vget_high_u16(__p1), __p2); return __ret; } #else __ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vmlsl_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { int64x2_t __ret; __ret = vmlsl_n_s32(__p0, vget_high_s32(__p1), __p2); return __ret; } #else __ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __noswap_vmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { int32x4_t __ret; __ret = vmlsl_n_s16(__p0, vget_high_s16(__p1), __p2); return __ret; } #else __ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #define vmulx_lane_f64(__p0_939, __p1_939, __p2_939) __extension__ ({ \ float64x1_t __ret_939; \ float64x1_t __s0_939 = __p0_939; \ float64x1_t __s1_939 = __p1_939; \ float64_t __x_939 = vget_lane_f64(__s0_939, 0); \ float64_t __y_939 = vget_lane_f64(__s1_939, __p2_939); \ float64_t __z_939 = vmulxd_f64(__x_939, __y_939); \ __ret_939 = vset_lane_f64(__z_939, __s0_939, __p2_939); \ __ret_939; \ }) #ifdef __LITTLE_ENDIAN__ #define vmulx_laneq_f64(__p0_940, __p1_940, __p2_940) __extension__ ({ \ float64x1_t __ret_940; \ float64x1_t __s0_940 = __p0_940; \ float64x2_t __s1_940 = __p1_940; \ float64_t __x_940 = vget_lane_f64(__s0_940, 0); \ float64_t __y_940 = vgetq_lane_f64(__s1_940, __p2_940); \ float64_t __z_940 = vmulxd_f64(__x_940, __y_940); \ __ret_940 = vset_lane_f64(__z_940, __s0_940, 0); \ __ret_940; \ }) #else #define vmulx_laneq_f64(__p0_941, __p1_941, __p2_941) __extension__ ({ \ float64x1_t __ret_941; \ float64x1_t __s0_941 = __p0_941; \ float64x2_t __s1_941 = __p1_941; \ float64x2_t __rev1_941; __rev1_941 = __builtin_shufflevector(__s1_941, __s1_941, 1, 0); \ float64_t __x_941 = vget_lane_f64(__s0_941, 0); \ float64_t __y_941 = __noswap_vgetq_lane_f64(__rev1_941, __p2_941); \ float64_t __z_941 = vmulxd_f64(__x_941, __y_941); \ __ret_941 = vset_lane_f64(__z_941, __s0_941, 0); \ __ret_941; \ }) #endif #endif #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint16x8_t __ret; __ret = __p0 + vabdl_u8(__p1, __p2); return __ret; } #else __ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 + __noswap_vabdl_u8(__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai uint16x8_t __noswap_vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint16x8_t __ret; __ret = __p0 + __noswap_vabdl_u8(__p1, __p2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint64x2_t __ret; __ret = __p0 + vabdl_u32(__p1, __p2); return __ret; } #else __ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = __rev0 + __noswap_vabdl_u32(__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai uint64x2_t __noswap_vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint64x2_t __ret; __ret = __p0 + __noswap_vabdl_u32(__p1, __p2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint32x4_t __ret; __ret = __p0 + vabdl_u16(__p1, __p2); return __ret; } #else __ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __rev0 + __noswap_vabdl_u16(__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai uint32x4_t __noswap_vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint32x4_t __ret; __ret = __p0 + __noswap_vabdl_u16(__p1, __p2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int16x8_t __ret; __ret = __p0 + vabdl_s8(__p1, __p2); return __ret; } #else __ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 + __noswap_vabdl_s8(__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } __ai int16x8_t __noswap_vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int16x8_t __ret; __ret = __p0 + __noswap_vabdl_s8(__p1, __p2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; __ret = __p0 + vabdl_s32(__p1, __p2); return __ret; } #else __ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); __ret = __rev0 + __noswap_vabdl_s32(__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } __ai int64x2_t __noswap_vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; __ret = __p0 + __noswap_vabdl_s32(__p1, __p2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; __ret = __p0 + vabdl_s16(__p1, __p2); return __ret; } #else __ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __rev0 + __noswap_vabdl_s16(__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } __ai int32x4_t __noswap_vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; __ret = __p0 + __noswap_vabdl_s16(__p1, __p2); return __ret; } #endif #if defined(__aarch64__) #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint16x8_t __ret; __ret = vabal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2)); return __ret; } #else __ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vabal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2)); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint64x2_t __ret; __ret = vabal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2)); return __ret; } #else __ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __noswap_vabal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2)); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint32x4_t __ret; __ret = vabal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2)); return __ret; } #else __ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vabal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { int16x8_t __ret; __ret = vabal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2)); return __ret; } #else __ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vabal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2)); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { int64x2_t __ret; __ret = vabal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); return __ret; } #else __ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); __ret = __noswap_vabal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2)); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ __ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { int32x4_t __ret; __ret = vabal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); return __ret; } #else __ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vabal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #endif #undef __ai #endif /* if !defined(__ARM_NEON) */ #endif /* ifndef __ARM_FP */ #endif /* __ARM_NEON_H */