core/stdarch/crates/core_arch/src/aarch64/neon/
generated.rs

1// This code is automatically generated. DO NOT MODIFY.
2//
3// Instead, modify `crates/stdarch-gen-arm/spec/` and run the following command to re-generate this file:
4//
5// ```
6// cargo run --bin=stdarch-gen-arm -- crates/stdarch-gen-arm/spec
7// ```
8#![allow(improper_ctypes)]
9
10#[cfg(test)]
11use stdarch_test::assert_instr;
12
13use super::*;
14
15#[doc = "CRC32-C single round checksum for quad words (64 bits)."]
16#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32cd)"]
17#[inline]
18#[target_feature(enable = "crc")]
19#[cfg(not(target_arch = "arm"))]
20#[cfg_attr(test, assert_instr(crc32cx))]
21#[stable(feature = "stdarch_aarch64_crc32", since = "1.80.0")]
22pub fn __crc32cd(crc: u32, data: u64) -> u32 {
23    unsafe extern "unadjusted" {
24        #[cfg_attr(
25            any(target_arch = "aarch64", target_arch = "arm64ec"),
26            link_name = "llvm.aarch64.crc32cx"
27        )]
28        fn ___crc32cd(crc: u32, data: u64) -> u32;
29    }
30    unsafe { ___crc32cd(crc, data) }
31}
32#[doc = "CRC32 single round checksum for quad words (64 bits)."]
33#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32d)"]
34#[inline]
35#[target_feature(enable = "crc")]
36#[cfg(not(target_arch = "arm"))]
37#[cfg_attr(test, assert_instr(crc32x))]
38#[stable(feature = "stdarch_aarch64_crc32", since = "1.80.0")]
39pub fn __crc32d(crc: u32, data: u64) -> u32 {
40    unsafe extern "unadjusted" {
41        #[cfg_attr(
42            any(target_arch = "aarch64", target_arch = "arm64ec"),
43            link_name = "llvm.aarch64.crc32x"
44        )]
45        fn ___crc32d(crc: u32, data: u64) -> u32;
46    }
47    unsafe { ___crc32d(crc, data) }
48}
49#[doc = "Signed Absolute difference and Accumulate Long"]
50#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s8)"]
51#[inline]
52#[target_feature(enable = "neon")]
53#[stable(feature = "neon_intrinsics", since = "1.59.0")]
54#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal2))]
55pub fn vabal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
56    unsafe {
57        let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
58        let e: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
59        let f: int8x8_t = vabd_s8(d, e);
60        let f: uint8x8_t = simd_cast(f);
61        simd_add(a, simd_cast(f))
62    }
63}
64#[doc = "Signed Absolute difference and Accumulate Long"]
65#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s16)"]
66#[inline]
67#[target_feature(enable = "neon")]
68#[stable(feature = "neon_intrinsics", since = "1.59.0")]
69#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal2))]
70pub fn vabal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
71    unsafe {
72        let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
73        let e: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
74        let f: int16x4_t = vabd_s16(d, e);
75        let f: uint16x4_t = simd_cast(f);
76        simd_add(a, simd_cast(f))
77    }
78}
79#[doc = "Signed Absolute difference and Accumulate Long"]
80#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s32)"]
81#[inline]
82#[target_feature(enable = "neon")]
83#[stable(feature = "neon_intrinsics", since = "1.59.0")]
84#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal2))]
85pub fn vabal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
86    unsafe {
87        let d: int32x2_t = simd_shuffle!(b, b, [2, 3]);
88        let e: int32x2_t = simd_shuffle!(c, c, [2, 3]);
89        let f: int32x2_t = vabd_s32(d, e);
90        let f: uint32x2_t = simd_cast(f);
91        simd_add(a, simd_cast(f))
92    }
93}
94#[doc = "Unsigned Absolute difference and Accumulate Long"]
95#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u8)"]
96#[inline]
97#[target_feature(enable = "neon")]
98#[stable(feature = "neon_intrinsics", since = "1.59.0")]
99#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal2))]
100pub fn vabal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
101    unsafe {
102        let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
103        let e: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
104        let f: uint8x8_t = vabd_u8(d, e);
105        simd_add(a, simd_cast(f))
106    }
107}
108#[doc = "Unsigned Absolute difference and Accumulate Long"]
109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u16)"]
110#[inline]
111#[target_feature(enable = "neon")]
112#[stable(feature = "neon_intrinsics", since = "1.59.0")]
113#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal2))]
114pub fn vabal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
115    unsafe {
116        let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
117        let e: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
118        let f: uint16x4_t = vabd_u16(d, e);
119        simd_add(a, simd_cast(f))
120    }
121}
122#[doc = "Unsigned Absolute difference and Accumulate Long"]
123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u32)"]
124#[inline]
125#[target_feature(enable = "neon")]
126#[stable(feature = "neon_intrinsics", since = "1.59.0")]
127#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal2))]
128pub fn vabal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
129    unsafe {
130        let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
131        let e: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
132        let f: uint32x2_t = vabd_u32(d, e);
133        simd_add(a, simd_cast(f))
134    }
135}
136#[doc = "Absolute difference between the arguments of Floating"]
137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_f64)"]
138#[inline]
139#[target_feature(enable = "neon")]
140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
141#[cfg_attr(test, assert_instr(fabd))]
142pub fn vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
143    unsafe extern "unadjusted" {
144        #[cfg_attr(
145            any(target_arch = "aarch64", target_arch = "arm64ec"),
146            link_name = "llvm.aarch64.neon.fabd.v1f64"
147        )]
148        fn _vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
149    }
150    unsafe { _vabd_f64(a, b) }
151}
152#[doc = "Absolute difference between the arguments of Floating"]
153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_f64)"]
154#[inline]
155#[target_feature(enable = "neon")]
156#[stable(feature = "neon_intrinsics", since = "1.59.0")]
157#[cfg_attr(test, assert_instr(fabd))]
158pub fn vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
159    unsafe extern "unadjusted" {
160        #[cfg_attr(
161            any(target_arch = "aarch64", target_arch = "arm64ec"),
162            link_name = "llvm.aarch64.neon.fabd.v2f64"
163        )]
164        fn _vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
165    }
166    unsafe { _vabdq_f64(a, b) }
167}
168#[doc = "Floating-point absolute difference"]
169#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdd_f64)"]
170#[inline]
171#[target_feature(enable = "neon")]
172#[stable(feature = "neon_intrinsics", since = "1.59.0")]
173#[cfg_attr(test, assert_instr(fabd))]
174pub fn vabdd_f64(a: f64, b: f64) -> f64 {
175    unsafe { simd_extract!(vabd_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
176}
177#[doc = "Floating-point absolute difference"]
178#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabds_f32)"]
179#[inline]
180#[target_feature(enable = "neon")]
181#[stable(feature = "neon_intrinsics", since = "1.59.0")]
182#[cfg_attr(test, assert_instr(fabd))]
183pub fn vabds_f32(a: f32, b: f32) -> f32 {
184    unsafe { simd_extract!(vabd_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
185}
186#[doc = "Floating-point absolute difference"]
187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdh_f16)"]
188#[inline]
189#[target_feature(enable = "neon,fp16")]
190#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
191#[cfg_attr(test, assert_instr(fabd))]
192pub fn vabdh_f16(a: f16, b: f16) -> f16 {
193    unsafe { simd_extract!(vabd_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
194}
195#[doc = "Signed Absolute difference Long"]
196#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s16)"]
197#[inline]
198#[target_feature(enable = "neon")]
199#[stable(feature = "neon_intrinsics", since = "1.59.0")]
200#[cfg_attr(test, assert_instr(sabdl2))]
201pub fn vabdl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
202    unsafe {
203        let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
204        let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
205        let e: uint16x4_t = simd_cast(vabd_s16(c, d));
206        simd_cast(e)
207    }
208}
209#[doc = "Signed Absolute difference Long"]
210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s32)"]
211#[inline]
212#[target_feature(enable = "neon")]
213#[stable(feature = "neon_intrinsics", since = "1.59.0")]
214#[cfg_attr(test, assert_instr(sabdl2))]
215pub fn vabdl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
216    unsafe {
217        let c: int32x2_t = simd_shuffle!(a, a, [2, 3]);
218        let d: int32x2_t = simd_shuffle!(b, b, [2, 3]);
219        let e: uint32x2_t = simd_cast(vabd_s32(c, d));
220        simd_cast(e)
221    }
222}
223#[doc = "Signed Absolute difference Long"]
224#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s8)"]
225#[inline]
226#[target_feature(enable = "neon")]
227#[stable(feature = "neon_intrinsics", since = "1.59.0")]
228#[cfg_attr(test, assert_instr(sabdl2))]
229pub fn vabdl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
230    unsafe {
231        let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
232        let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
233        let e: uint8x8_t = simd_cast(vabd_s8(c, d));
234        simd_cast(e)
235    }
236}
237#[doc = "Unsigned Absolute difference Long"]
238#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u8)"]
239#[inline]
240#[target_feature(enable = "neon")]
241#[cfg_attr(test, assert_instr(uabdl2))]
242#[stable(feature = "neon_intrinsics", since = "1.59.0")]
243pub fn vabdl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
244    unsafe {
245        let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
246        let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
247        simd_cast(vabd_u8(c, d))
248    }
249}
250#[doc = "Unsigned Absolute difference Long"]
251#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u16)"]
252#[inline]
253#[target_feature(enable = "neon")]
254#[cfg_attr(test, assert_instr(uabdl2))]
255#[stable(feature = "neon_intrinsics", since = "1.59.0")]
256pub fn vabdl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
257    unsafe {
258        let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
259        let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
260        simd_cast(vabd_u16(c, d))
261    }
262}
263#[doc = "Unsigned Absolute difference Long"]
264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u32)"]
265#[inline]
266#[target_feature(enable = "neon")]
267#[cfg_attr(test, assert_instr(uabdl2))]
268#[stable(feature = "neon_intrinsics", since = "1.59.0")]
269pub fn vabdl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
270    unsafe {
271        let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
272        let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
273        simd_cast(vabd_u32(c, d))
274    }
275}
276#[doc = "Floating-point absolute value"]
277#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_f64)"]
278#[inline]
279#[target_feature(enable = "neon")]
280#[cfg_attr(test, assert_instr(fabs))]
281#[stable(feature = "neon_intrinsics", since = "1.59.0")]
282pub fn vabs_f64(a: float64x1_t) -> float64x1_t {
283    unsafe { simd_fabs(a) }
284}
285#[doc = "Floating-point absolute value"]
286#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_f64)"]
287#[inline]
288#[target_feature(enable = "neon")]
289#[cfg_attr(test, assert_instr(fabs))]
290#[stable(feature = "neon_intrinsics", since = "1.59.0")]
291pub fn vabsq_f64(a: float64x2_t) -> float64x2_t {
292    unsafe { simd_fabs(a) }
293}
294#[doc = "Absolute Value (wrapping)."]
295#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s64)"]
296#[inline]
297#[target_feature(enable = "neon")]
298#[stable(feature = "neon_intrinsics", since = "1.59.0")]
299#[cfg_attr(test, assert_instr(abs))]
300pub fn vabs_s64(a: int64x1_t) -> int64x1_t {
301    unsafe {
302        let neg: int64x1_t = simd_neg(a);
303        let mask: int64x1_t = simd_ge(a, neg);
304        simd_select(mask, a, neg)
305    }
306}
307#[doc = "Absolute Value (wrapping)."]
308#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s64)"]
309#[inline]
310#[target_feature(enable = "neon")]
311#[stable(feature = "neon_intrinsics", since = "1.59.0")]
312#[cfg_attr(test, assert_instr(abs))]
313pub fn vabsq_s64(a: int64x2_t) -> int64x2_t {
314    unsafe {
315        let neg: int64x2_t = simd_neg(a);
316        let mask: int64x2_t = simd_ge(a, neg);
317        simd_select(mask, a, neg)
318    }
319}
320#[doc = "Absolute Value (wrapping)."]
321#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsd_s64)"]
322#[inline]
323#[target_feature(enable = "neon")]
324#[stable(feature = "neon_intrinsics", since = "1.59.0")]
325#[cfg_attr(test, assert_instr(abs))]
326pub fn vabsd_s64(a: i64) -> i64 {
327    unsafe extern "unadjusted" {
328        #[cfg_attr(
329            any(target_arch = "aarch64", target_arch = "arm64ec"),
330            link_name = "llvm.aarch64.neon.abs.i64"
331        )]
332        fn _vabsd_s64(a: i64) -> i64;
333    }
334    unsafe { _vabsd_s64(a) }
335}
336#[doc = "Add"]
337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_s64)"]
338#[inline]
339#[target_feature(enable = "neon")]
340#[stable(feature = "neon_intrinsics", since = "1.59.0")]
341#[cfg_attr(test, assert_instr(nop))]
342pub fn vaddd_s64(a: i64, b: i64) -> i64 {
343    a.wrapping_add(b)
344}
345#[doc = "Add"]
346#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_u64)"]
347#[inline]
348#[target_feature(enable = "neon")]
349#[stable(feature = "neon_intrinsics", since = "1.59.0")]
350#[cfg_attr(test, assert_instr(nop))]
351pub fn vaddd_u64(a: u64, b: u64) -> u64 {
352    a.wrapping_add(b)
353}
354#[doc = "Signed Add Long across Vector"]
355#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s16)"]
356#[inline]
357#[target_feature(enable = "neon")]
358#[stable(feature = "neon_intrinsics", since = "1.59.0")]
359#[cfg_attr(test, assert_instr(saddlv))]
360pub fn vaddlv_s16(a: int16x4_t) -> i32 {
361    unsafe extern "unadjusted" {
362        #[cfg_attr(
363            any(target_arch = "aarch64", target_arch = "arm64ec"),
364            link_name = "llvm.aarch64.neon.saddlv.i32.v4i16"
365        )]
366        fn _vaddlv_s16(a: int16x4_t) -> i32;
367    }
368    unsafe { _vaddlv_s16(a) }
369}
370#[doc = "Signed Add Long across Vector"]
371#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s16)"]
372#[inline]
373#[target_feature(enable = "neon")]
374#[stable(feature = "neon_intrinsics", since = "1.59.0")]
375#[cfg_attr(test, assert_instr(saddlv))]
376pub fn vaddlvq_s16(a: int16x8_t) -> i32 {
377    unsafe extern "unadjusted" {
378        #[cfg_attr(
379            any(target_arch = "aarch64", target_arch = "arm64ec"),
380            link_name = "llvm.aarch64.neon.saddlv.i32.v8i16"
381        )]
382        fn _vaddlvq_s16(a: int16x8_t) -> i32;
383    }
384    unsafe { _vaddlvq_s16(a) }
385}
386#[doc = "Signed Add Long across Vector"]
387#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s32)"]
388#[inline]
389#[target_feature(enable = "neon")]
390#[stable(feature = "neon_intrinsics", since = "1.59.0")]
391#[cfg_attr(test, assert_instr(saddlv))]
392pub fn vaddlvq_s32(a: int32x4_t) -> i64 {
393    unsafe extern "unadjusted" {
394        #[cfg_attr(
395            any(target_arch = "aarch64", target_arch = "arm64ec"),
396            link_name = "llvm.aarch64.neon.saddlv.i64.v4i32"
397        )]
398        fn _vaddlvq_s32(a: int32x4_t) -> i64;
399    }
400    unsafe { _vaddlvq_s32(a) }
401}
402#[doc = "Signed Add Long across Vector"]
403#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s32)"]
404#[inline]
405#[target_feature(enable = "neon")]
406#[stable(feature = "neon_intrinsics", since = "1.59.0")]
407#[cfg_attr(test, assert_instr(saddlp))]
408pub fn vaddlv_s32(a: int32x2_t) -> i64 {
409    unsafe extern "unadjusted" {
410        #[cfg_attr(
411            any(target_arch = "aarch64", target_arch = "arm64ec"),
412            link_name = "llvm.aarch64.neon.saddlv.i64.v2i32"
413        )]
414        fn _vaddlv_s32(a: int32x2_t) -> i64;
415    }
416    unsafe { _vaddlv_s32(a) }
417}
418#[doc = "Signed Add Long across Vector"]
419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s8)"]
420#[inline]
421#[target_feature(enable = "neon")]
422#[stable(feature = "neon_intrinsics", since = "1.59.0")]
423#[cfg_attr(test, assert_instr(saddlv))]
424pub fn vaddlv_s8(a: int8x8_t) -> i16 {
425    unsafe extern "unadjusted" {
426        #[cfg_attr(
427            any(target_arch = "aarch64", target_arch = "arm64ec"),
428            link_name = "llvm.aarch64.neon.saddlv.i32.v8i8"
429        )]
430        fn _vaddlv_s8(a: int8x8_t) -> i32;
431    }
432    unsafe { _vaddlv_s8(a) as i16 }
433}
434#[doc = "Signed Add Long across Vector"]
435#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s8)"]
436#[inline]
437#[target_feature(enable = "neon")]
438#[stable(feature = "neon_intrinsics", since = "1.59.0")]
439#[cfg_attr(test, assert_instr(saddlv))]
440pub fn vaddlvq_s8(a: int8x16_t) -> i16 {
441    unsafe extern "unadjusted" {
442        #[cfg_attr(
443            any(target_arch = "aarch64", target_arch = "arm64ec"),
444            link_name = "llvm.aarch64.neon.saddlv.i32.v16i8"
445        )]
446        fn _vaddlvq_s8(a: int8x16_t) -> i32;
447    }
448    unsafe { _vaddlvq_s8(a) as i16 }
449}
450#[doc = "Unsigned Add Long across Vector"]
451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u16)"]
452#[inline]
453#[target_feature(enable = "neon")]
454#[stable(feature = "neon_intrinsics", since = "1.59.0")]
455#[cfg_attr(test, assert_instr(uaddlv))]
456pub fn vaddlv_u16(a: uint16x4_t) -> u32 {
457    unsafe extern "unadjusted" {
458        #[cfg_attr(
459            any(target_arch = "aarch64", target_arch = "arm64ec"),
460            link_name = "llvm.aarch64.neon.uaddlv.i32.v4i16"
461        )]
462        fn _vaddlv_u16(a: uint16x4_t) -> u32;
463    }
464    unsafe { _vaddlv_u16(a) }
465}
466#[doc = "Unsigned Add Long across Vector"]
467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u16)"]
468#[inline]
469#[target_feature(enable = "neon")]
470#[stable(feature = "neon_intrinsics", since = "1.59.0")]
471#[cfg_attr(test, assert_instr(uaddlv))]
472pub fn vaddlvq_u16(a: uint16x8_t) -> u32 {
473    unsafe extern "unadjusted" {
474        #[cfg_attr(
475            any(target_arch = "aarch64", target_arch = "arm64ec"),
476            link_name = "llvm.aarch64.neon.uaddlv.i32.v8i16"
477        )]
478        fn _vaddlvq_u16(a: uint16x8_t) -> u32;
479    }
480    unsafe { _vaddlvq_u16(a) }
481}
482#[doc = "Unsigned Add Long across Vector"]
483#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u32)"]
484#[inline]
485#[target_feature(enable = "neon")]
486#[stable(feature = "neon_intrinsics", since = "1.59.0")]
487#[cfg_attr(test, assert_instr(uaddlv))]
488pub fn vaddlvq_u32(a: uint32x4_t) -> u64 {
489    unsafe extern "unadjusted" {
490        #[cfg_attr(
491            any(target_arch = "aarch64", target_arch = "arm64ec"),
492            link_name = "llvm.aarch64.neon.uaddlv.i64.v4i32"
493        )]
494        fn _vaddlvq_u32(a: uint32x4_t) -> u64;
495    }
496    unsafe { _vaddlvq_u32(a) }
497}
498#[doc = "Unsigned Add Long across Vector"]
499#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u32)"]
500#[inline]
501#[target_feature(enable = "neon")]
502#[stable(feature = "neon_intrinsics", since = "1.59.0")]
503#[cfg_attr(test, assert_instr(uaddlp))]
504pub fn vaddlv_u32(a: uint32x2_t) -> u64 {
505    unsafe extern "unadjusted" {
506        #[cfg_attr(
507            any(target_arch = "aarch64", target_arch = "arm64ec"),
508            link_name = "llvm.aarch64.neon.uaddlv.i64.v2i32"
509        )]
510        fn _vaddlv_u32(a: uint32x2_t) -> u64;
511    }
512    unsafe { _vaddlv_u32(a) }
513}
514#[doc = "Unsigned Add Long across Vector"]
515#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u8)"]
516#[inline]
517#[target_feature(enable = "neon")]
518#[stable(feature = "neon_intrinsics", since = "1.59.0")]
519#[cfg_attr(test, assert_instr(uaddlv))]
520pub fn vaddlv_u8(a: uint8x8_t) -> u16 {
521    unsafe extern "unadjusted" {
522        #[cfg_attr(
523            any(target_arch = "aarch64", target_arch = "arm64ec"),
524            link_name = "llvm.aarch64.neon.uaddlv.i32.v8i8"
525        )]
526        fn _vaddlv_u8(a: uint8x8_t) -> i32;
527    }
528    unsafe { _vaddlv_u8(a) as u16 }
529}
530#[doc = "Unsigned Add Long across Vector"]
531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u8)"]
532#[inline]
533#[target_feature(enable = "neon")]
534#[stable(feature = "neon_intrinsics", since = "1.59.0")]
535#[cfg_attr(test, assert_instr(uaddlv))]
536pub fn vaddlvq_u8(a: uint8x16_t) -> u16 {
537    unsafe extern "unadjusted" {
538        #[cfg_attr(
539            any(target_arch = "aarch64", target_arch = "arm64ec"),
540            link_name = "llvm.aarch64.neon.uaddlv.i32.v16i8"
541        )]
542        fn _vaddlvq_u8(a: uint8x16_t) -> i32;
543    }
544    unsafe { _vaddlvq_u8(a) as u16 }
545}
546#[doc = "Floating-point add across vector"]
547#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_f32)"]
548#[inline]
549#[target_feature(enable = "neon")]
550#[stable(feature = "neon_intrinsics", since = "1.59.0")]
551#[cfg_attr(test, assert_instr(faddp))]
552pub fn vaddv_f32(a: float32x2_t) -> f32 {
553    unsafe extern "unadjusted" {
554        #[cfg_attr(
555            any(target_arch = "aarch64", target_arch = "arm64ec"),
556            link_name = "llvm.aarch64.neon.faddv.f32.v2f32"
557        )]
558        fn _vaddv_f32(a: float32x2_t) -> f32;
559    }
560    unsafe { _vaddv_f32(a) }
561}
562#[doc = "Floating-point add across vector"]
563#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f32)"]
564#[inline]
565#[target_feature(enable = "neon")]
566#[stable(feature = "neon_intrinsics", since = "1.59.0")]
567#[cfg_attr(test, assert_instr(faddp))]
568pub fn vaddvq_f32(a: float32x4_t) -> f32 {
569    unsafe extern "unadjusted" {
570        #[cfg_attr(
571            any(target_arch = "aarch64", target_arch = "arm64ec"),
572            link_name = "llvm.aarch64.neon.faddv.f32.v4f32"
573        )]
574        fn _vaddvq_f32(a: float32x4_t) -> f32;
575    }
576    unsafe { _vaddvq_f32(a) }
577}
578#[doc = "Floating-point add across vector"]
579#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f64)"]
580#[inline]
581#[target_feature(enable = "neon")]
582#[stable(feature = "neon_intrinsics", since = "1.59.0")]
583#[cfg_attr(test, assert_instr(faddp))]
584pub fn vaddvq_f64(a: float64x2_t) -> f64 {
585    unsafe extern "unadjusted" {
586        #[cfg_attr(
587            any(target_arch = "aarch64", target_arch = "arm64ec"),
588            link_name = "llvm.aarch64.neon.faddv.f64.v2f64"
589        )]
590        fn _vaddvq_f64(a: float64x2_t) -> f64;
591    }
592    unsafe { _vaddvq_f64(a) }
593}
594#[doc = "Add across vector"]
595#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s32)"]
596#[inline]
597#[target_feature(enable = "neon")]
598#[stable(feature = "neon_intrinsics", since = "1.59.0")]
599#[cfg_attr(test, assert_instr(addp))]
600pub fn vaddv_s32(a: int32x2_t) -> i32 {
601    unsafe { simd_reduce_add_unordered(a) }
602}
603#[doc = "Add across vector"]
604#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s8)"]
605#[inline]
606#[target_feature(enable = "neon")]
607#[stable(feature = "neon_intrinsics", since = "1.59.0")]
608#[cfg_attr(test, assert_instr(addv))]
609pub fn vaddv_s8(a: int8x8_t) -> i8 {
610    unsafe { simd_reduce_add_unordered(a) }
611}
612#[doc = "Add across vector"]
613#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s8)"]
614#[inline]
615#[target_feature(enable = "neon")]
616#[stable(feature = "neon_intrinsics", since = "1.59.0")]
617#[cfg_attr(test, assert_instr(addv))]
618pub fn vaddvq_s8(a: int8x16_t) -> i8 {
619    unsafe { simd_reduce_add_unordered(a) }
620}
621#[doc = "Add across vector"]
622#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s16)"]
623#[inline]
624#[target_feature(enable = "neon")]
625#[stable(feature = "neon_intrinsics", since = "1.59.0")]
626#[cfg_attr(test, assert_instr(addv))]
627pub fn vaddv_s16(a: int16x4_t) -> i16 {
628    unsafe { simd_reduce_add_unordered(a) }
629}
630#[doc = "Add across vector"]
631#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s16)"]
632#[inline]
633#[target_feature(enable = "neon")]
634#[stable(feature = "neon_intrinsics", since = "1.59.0")]
635#[cfg_attr(test, assert_instr(addv))]
636pub fn vaddvq_s16(a: int16x8_t) -> i16 {
637    unsafe { simd_reduce_add_unordered(a) }
638}
639#[doc = "Add across vector"]
640#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s32)"]
641#[inline]
642#[target_feature(enable = "neon")]
643#[stable(feature = "neon_intrinsics", since = "1.59.0")]
644#[cfg_attr(test, assert_instr(addv))]
645pub fn vaddvq_s32(a: int32x4_t) -> i32 {
646    unsafe { simd_reduce_add_unordered(a) }
647}
648#[doc = "Add across vector"]
649#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u32)"]
650#[inline]
651#[target_feature(enable = "neon")]
652#[stable(feature = "neon_intrinsics", since = "1.59.0")]
653#[cfg_attr(test, assert_instr(addp))]
654pub fn vaddv_u32(a: uint32x2_t) -> u32 {
655    unsafe { simd_reduce_add_unordered(a) }
656}
657#[doc = "Add across vector"]
658#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u8)"]
659#[inline]
660#[target_feature(enable = "neon")]
661#[stable(feature = "neon_intrinsics", since = "1.59.0")]
662#[cfg_attr(test, assert_instr(addv))]
663pub fn vaddv_u8(a: uint8x8_t) -> u8 {
664    unsafe { simd_reduce_add_unordered(a) }
665}
666#[doc = "Add across vector"]
667#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u8)"]
668#[inline]
669#[target_feature(enable = "neon")]
670#[stable(feature = "neon_intrinsics", since = "1.59.0")]
671#[cfg_attr(test, assert_instr(addv))]
672pub fn vaddvq_u8(a: uint8x16_t) -> u8 {
673    unsafe { simd_reduce_add_unordered(a) }
674}
675#[doc = "Add across vector"]
676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u16)"]
677#[inline]
678#[target_feature(enable = "neon")]
679#[stable(feature = "neon_intrinsics", since = "1.59.0")]
680#[cfg_attr(test, assert_instr(addv))]
681pub fn vaddv_u16(a: uint16x4_t) -> u16 {
682    unsafe { simd_reduce_add_unordered(a) }
683}
684#[doc = "Add across vector"]
685#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u16)"]
686#[inline]
687#[target_feature(enable = "neon")]
688#[stable(feature = "neon_intrinsics", since = "1.59.0")]
689#[cfg_attr(test, assert_instr(addv))]
690pub fn vaddvq_u16(a: uint16x8_t) -> u16 {
691    unsafe { simd_reduce_add_unordered(a) }
692}
693#[doc = "Add across vector"]
694#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u32)"]
695#[inline]
696#[target_feature(enable = "neon")]
697#[stable(feature = "neon_intrinsics", since = "1.59.0")]
698#[cfg_attr(test, assert_instr(addv))]
699pub fn vaddvq_u32(a: uint32x4_t) -> u32 {
700    unsafe { simd_reduce_add_unordered(a) }
701}
702#[doc = "Add across vector"]
703#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s64)"]
704#[inline]
705#[target_feature(enable = "neon")]
706#[stable(feature = "neon_intrinsics", since = "1.59.0")]
707#[cfg_attr(test, assert_instr(addp))]
708pub fn vaddvq_s64(a: int64x2_t) -> i64 {
709    unsafe { simd_reduce_add_unordered(a) }
710}
711#[doc = "Add across vector"]
712#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u64)"]
713#[inline]
714#[target_feature(enable = "neon")]
715#[stable(feature = "neon_intrinsics", since = "1.59.0")]
716#[cfg_attr(test, assert_instr(addp))]
717pub fn vaddvq_u64(a: uint64x2_t) -> u64 {
718    unsafe { simd_reduce_add_unordered(a) }
719}
720#[doc = "Multi-vector floating-point absolute maximum"]
721#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamax_f32)"]
722#[inline]
723#[target_feature(enable = "neon,faminmax")]
724#[cfg_attr(test, assert_instr(nop))]
725#[unstable(feature = "faminmax", issue = "137933")]
726pub fn vamax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
727    unsafe extern "unadjusted" {
728        #[cfg_attr(
729            any(target_arch = "aarch64", target_arch = "arm64ec"),
730            link_name = "llvm.aarch64.neon.famax.v2f32"
731        )]
732        fn _vamax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
733    }
734    unsafe { _vamax_f32(a, b) }
735}
736#[doc = "Multi-vector floating-point absolute maximum"]
737#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamaxq_f32)"]
738#[inline]
739#[target_feature(enable = "neon,faminmax")]
740#[cfg_attr(test, assert_instr(nop))]
741#[unstable(feature = "faminmax", issue = "137933")]
742pub fn vamaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
743    unsafe extern "unadjusted" {
744        #[cfg_attr(
745            any(target_arch = "aarch64", target_arch = "arm64ec"),
746            link_name = "llvm.aarch64.neon.famax.v4f32"
747        )]
748        fn _vamaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
749    }
750    unsafe { _vamaxq_f32(a, b) }
751}
752#[doc = "Multi-vector floating-point absolute maximum"]
753#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamaxq_f64)"]
754#[inline]
755#[target_feature(enable = "neon,faminmax")]
756#[cfg_attr(test, assert_instr(nop))]
757#[unstable(feature = "faminmax", issue = "137933")]
758pub fn vamaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
759    unsafe extern "unadjusted" {
760        #[cfg_attr(
761            any(target_arch = "aarch64", target_arch = "arm64ec"),
762            link_name = "llvm.aarch64.neon.famax.v2f64"
763        )]
764        fn _vamaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
765    }
766    unsafe { _vamaxq_f64(a, b) }
767}
768#[doc = "Multi-vector floating-point absolute minimum"]
769#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamin_f32)"]
770#[inline]
771#[target_feature(enable = "neon,faminmax")]
772#[cfg_attr(test, assert_instr(nop))]
773#[unstable(feature = "faminmax", issue = "137933")]
774pub fn vamin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
775    unsafe extern "unadjusted" {
776        #[cfg_attr(
777            any(target_arch = "aarch64", target_arch = "arm64ec"),
778            link_name = "llvm.aarch64.neon.famin.v2f32"
779        )]
780        fn _vamin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
781    }
782    unsafe { _vamin_f32(a, b) }
783}
784#[doc = "Multi-vector floating-point absolute minimum"]
785#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaminq_f32)"]
786#[inline]
787#[target_feature(enable = "neon,faminmax")]
788#[cfg_attr(test, assert_instr(nop))]
789#[unstable(feature = "faminmax", issue = "137933")]
790pub fn vaminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
791    unsafe extern "unadjusted" {
792        #[cfg_attr(
793            any(target_arch = "aarch64", target_arch = "arm64ec"),
794            link_name = "llvm.aarch64.neon.famin.v4f32"
795        )]
796        fn _vaminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
797    }
798    unsafe { _vaminq_f32(a, b) }
799}
800#[doc = "Multi-vector floating-point absolute minimum"]
801#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaminq_f64)"]
802#[inline]
803#[target_feature(enable = "neon,faminmax")]
804#[cfg_attr(test, assert_instr(nop))]
805#[unstable(feature = "faminmax", issue = "137933")]
806pub fn vaminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
807    unsafe extern "unadjusted" {
808        #[cfg_attr(
809            any(target_arch = "aarch64", target_arch = "arm64ec"),
810            link_name = "llvm.aarch64.neon.famin.v2f64"
811        )]
812        fn _vaminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
813    }
814    unsafe { _vaminq_f64(a, b) }
815}
816#[doc = "Bit clear and exclusive OR"]
817#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s8)"]
818#[inline]
819#[target_feature(enable = "neon,sha3")]
820#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
821#[cfg_attr(test, assert_instr(bcax))]
822pub fn vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
823    unsafe extern "unadjusted" {
824        #[cfg_attr(
825            any(target_arch = "aarch64", target_arch = "arm64ec"),
826            link_name = "llvm.aarch64.crypto.bcaxs.v16i8"
827        )]
828        fn _vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t;
829    }
830    unsafe { _vbcaxq_s8(a, b, c) }
831}
832#[doc = "Bit clear and exclusive OR"]
833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s16)"]
834#[inline]
835#[target_feature(enable = "neon,sha3")]
836#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
837#[cfg_attr(test, assert_instr(bcax))]
838pub fn vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
839    unsafe extern "unadjusted" {
840        #[cfg_attr(
841            any(target_arch = "aarch64", target_arch = "arm64ec"),
842            link_name = "llvm.aarch64.crypto.bcaxs.v8i16"
843        )]
844        fn _vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
845    }
846    unsafe { _vbcaxq_s16(a, b, c) }
847}
848#[doc = "Bit clear and exclusive OR"]
849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s32)"]
850#[inline]
851#[target_feature(enable = "neon,sha3")]
852#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
853#[cfg_attr(test, assert_instr(bcax))]
854pub fn vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
855    unsafe extern "unadjusted" {
856        #[cfg_attr(
857            any(target_arch = "aarch64", target_arch = "arm64ec"),
858            link_name = "llvm.aarch64.crypto.bcaxs.v4i32"
859        )]
860        fn _vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
861    }
862    unsafe { _vbcaxq_s32(a, b, c) }
863}
864#[doc = "Bit clear and exclusive OR"]
865#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s64)"]
866#[inline]
867#[target_feature(enable = "neon,sha3")]
868#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
869#[cfg_attr(test, assert_instr(bcax))]
870pub fn vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t {
871    unsafe extern "unadjusted" {
872        #[cfg_attr(
873            any(target_arch = "aarch64", target_arch = "arm64ec"),
874            link_name = "llvm.aarch64.crypto.bcaxs.v2i64"
875        )]
876        fn _vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t;
877    }
878    unsafe { _vbcaxq_s64(a, b, c) }
879}
880#[doc = "Bit clear and exclusive OR"]
881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u8)"]
882#[inline]
883#[target_feature(enable = "neon,sha3")]
884#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
885#[cfg_attr(test, assert_instr(bcax))]
886pub fn vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
887    unsafe extern "unadjusted" {
888        #[cfg_attr(
889            any(target_arch = "aarch64", target_arch = "arm64ec"),
890            link_name = "llvm.aarch64.crypto.bcaxu.v16i8"
891        )]
892        fn _vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t;
893    }
894    unsafe { _vbcaxq_u8(a, b, c) }
895}
896#[doc = "Bit clear and exclusive OR"]
897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u16)"]
898#[inline]
899#[target_feature(enable = "neon,sha3")]
900#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
901#[cfg_attr(test, assert_instr(bcax))]
902pub fn vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
903    unsafe extern "unadjusted" {
904        #[cfg_attr(
905            any(target_arch = "aarch64", target_arch = "arm64ec"),
906            link_name = "llvm.aarch64.crypto.bcaxu.v8i16"
907        )]
908        fn _vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t;
909    }
910    unsafe { _vbcaxq_u16(a, b, c) }
911}
912#[doc = "Bit clear and exclusive OR"]
913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u32)"]
914#[inline]
915#[target_feature(enable = "neon,sha3")]
916#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
917#[cfg_attr(test, assert_instr(bcax))]
918pub fn vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
919    unsafe extern "unadjusted" {
920        #[cfg_attr(
921            any(target_arch = "aarch64", target_arch = "arm64ec"),
922            link_name = "llvm.aarch64.crypto.bcaxu.v4i32"
923        )]
924        fn _vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
925    }
926    unsafe { _vbcaxq_u32(a, b, c) }
927}
928#[doc = "Bit clear and exclusive OR"]
929#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u64)"]
930#[inline]
931#[target_feature(enable = "neon,sha3")]
932#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
933#[cfg_attr(test, assert_instr(bcax))]
934pub fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
935    unsafe extern "unadjusted" {
936        #[cfg_attr(
937            any(target_arch = "aarch64", target_arch = "arm64ec"),
938            link_name = "llvm.aarch64.crypto.bcaxu.v2i64"
939        )]
940        fn _vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
941    }
942    unsafe { _vbcaxq_u64(a, b, c) }
943}
944#[doc = "Floating-point complex add"]
945#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f16)"]
946#[inline]
947#[target_feature(enable = "neon,fp16")]
948#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
949#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
950#[cfg_attr(test, assert_instr(fcadd))]
951pub fn vcadd_rot270_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
952    unsafe extern "unadjusted" {
953        #[cfg_attr(
954            any(target_arch = "aarch64", target_arch = "arm64ec"),
955            link_name = "llvm.aarch64.neon.vcadd.rot270.v4f16"
956        )]
957        fn _vcadd_rot270_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
958    }
959    unsafe { _vcadd_rot270_f16(a, b) }
960}
961#[doc = "Floating-point complex add"]
962#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f16)"]
963#[inline]
964#[target_feature(enable = "neon,fp16")]
965#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
966#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
967#[cfg_attr(test, assert_instr(fcadd))]
968pub fn vcaddq_rot270_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
969    unsafe extern "unadjusted" {
970        #[cfg_attr(
971            any(target_arch = "aarch64", target_arch = "arm64ec"),
972            link_name = "llvm.aarch64.neon.vcadd.rot270.v8f16"
973        )]
974        fn _vcaddq_rot270_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
975    }
976    unsafe { _vcaddq_rot270_f16(a, b) }
977}
978#[doc = "Floating-point complex add"]
979#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f32)"]
980#[inline]
981#[target_feature(enable = "neon,fcma")]
982#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
983#[cfg_attr(test, assert_instr(fcadd))]
984pub fn vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
985    unsafe extern "unadjusted" {
986        #[cfg_attr(
987            any(target_arch = "aarch64", target_arch = "arm64ec"),
988            link_name = "llvm.aarch64.neon.vcadd.rot270.v2f32"
989        )]
990        fn _vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
991    }
992    unsafe { _vcadd_rot270_f32(a, b) }
993}
994#[doc = "Floating-point complex add"]
995#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f32)"]
996#[inline]
997#[target_feature(enable = "neon,fcma")]
998#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
999#[cfg_attr(test, assert_instr(fcadd))]
1000pub fn vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
1001    unsafe extern "unadjusted" {
1002        #[cfg_attr(
1003            any(target_arch = "aarch64", target_arch = "arm64ec"),
1004            link_name = "llvm.aarch64.neon.vcadd.rot270.v4f32"
1005        )]
1006        fn _vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
1007    }
1008    unsafe { _vcaddq_rot270_f32(a, b) }
1009}
1010#[doc = "Floating-point complex add"]
1011#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f64)"]
1012#[inline]
1013#[target_feature(enable = "neon,fcma")]
1014#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1015#[cfg_attr(test, assert_instr(fcadd))]
1016pub fn vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
1017    unsafe extern "unadjusted" {
1018        #[cfg_attr(
1019            any(target_arch = "aarch64", target_arch = "arm64ec"),
1020            link_name = "llvm.aarch64.neon.vcadd.rot270.v2f64"
1021        )]
1022        fn _vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
1023    }
1024    unsafe { _vcaddq_rot270_f64(a, b) }
1025}
1026#[doc = "Floating-point complex add"]
1027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f16)"]
1028#[inline]
1029#[target_feature(enable = "neon,fp16")]
1030#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1031#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1032#[cfg_attr(test, assert_instr(fcadd))]
1033pub fn vcadd_rot90_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
1034    unsafe extern "unadjusted" {
1035        #[cfg_attr(
1036            any(target_arch = "aarch64", target_arch = "arm64ec"),
1037            link_name = "llvm.aarch64.neon.vcadd.rot90.v4f16"
1038        )]
1039        fn _vcadd_rot90_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
1040    }
1041    unsafe { _vcadd_rot90_f16(a, b) }
1042}
1043#[doc = "Floating-point complex add"]
1044#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f16)"]
1045#[inline]
1046#[target_feature(enable = "neon,fp16")]
1047#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1048#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1049#[cfg_attr(test, assert_instr(fcadd))]
1050pub fn vcaddq_rot90_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
1051    unsafe extern "unadjusted" {
1052        #[cfg_attr(
1053            any(target_arch = "aarch64", target_arch = "arm64ec"),
1054            link_name = "llvm.aarch64.neon.vcadd.rot90.v8f16"
1055        )]
1056        fn _vcaddq_rot90_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
1057    }
1058    unsafe { _vcaddq_rot90_f16(a, b) }
1059}
1060#[doc = "Floating-point complex add"]
1061#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f32)"]
1062#[inline]
1063#[target_feature(enable = "neon,fcma")]
1064#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1065#[cfg_attr(test, assert_instr(fcadd))]
1066pub fn vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
1067    unsafe extern "unadjusted" {
1068        #[cfg_attr(
1069            any(target_arch = "aarch64", target_arch = "arm64ec"),
1070            link_name = "llvm.aarch64.neon.vcadd.rot90.v2f32"
1071        )]
1072        fn _vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
1073    }
1074    unsafe { _vcadd_rot90_f32(a, b) }
1075}
1076#[doc = "Floating-point complex add"]
1077#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f32)"]
1078#[inline]
1079#[target_feature(enable = "neon,fcma")]
1080#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1081#[cfg_attr(test, assert_instr(fcadd))]
1082pub fn vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
1083    unsafe extern "unadjusted" {
1084        #[cfg_attr(
1085            any(target_arch = "aarch64", target_arch = "arm64ec"),
1086            link_name = "llvm.aarch64.neon.vcadd.rot90.v4f32"
1087        )]
1088        fn _vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
1089    }
1090    unsafe { _vcaddq_rot90_f32(a, b) }
1091}
1092#[doc = "Floating-point complex add"]
1093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f64)"]
1094#[inline]
1095#[target_feature(enable = "neon,fcma")]
1096#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1097#[cfg_attr(test, assert_instr(fcadd))]
1098pub fn vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
1099    unsafe extern "unadjusted" {
1100        #[cfg_attr(
1101            any(target_arch = "aarch64", target_arch = "arm64ec"),
1102            link_name = "llvm.aarch64.neon.vcadd.rot90.v2f64"
1103        )]
1104        fn _vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
1105    }
1106    unsafe { _vcaddq_rot90_f64(a, b) }
1107}
1108#[doc = "Floating-point absolute compare greater than or equal"]
1109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f64)"]
1110#[inline]
1111#[target_feature(enable = "neon")]
1112#[cfg_attr(test, assert_instr(facge))]
1113#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1114pub fn vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1115    unsafe extern "unadjusted" {
1116        #[cfg_attr(
1117            any(target_arch = "aarch64", target_arch = "arm64ec"),
1118            link_name = "llvm.aarch64.neon.facge.v1i64.v1f64"
1119        )]
1120        fn _vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t;
1121    }
1122    unsafe { _vcage_f64(a, b) }
1123}
1124#[doc = "Floating-point absolute compare greater than or equal"]
1125#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f64)"]
1126#[inline]
1127#[target_feature(enable = "neon")]
1128#[cfg_attr(test, assert_instr(facge))]
1129#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1130pub fn vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1131    unsafe extern "unadjusted" {
1132        #[cfg_attr(
1133            any(target_arch = "aarch64", target_arch = "arm64ec"),
1134            link_name = "llvm.aarch64.neon.facge.v2i64.v2f64"
1135        )]
1136        fn _vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t;
1137    }
1138    unsafe { _vcageq_f64(a, b) }
1139}
1140#[doc = "Floating-point absolute compare greater than or equal"]
1141#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaged_f64)"]
1142#[inline]
1143#[target_feature(enable = "neon")]
1144#[cfg_attr(test, assert_instr(facge))]
1145#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1146pub fn vcaged_f64(a: f64, b: f64) -> u64 {
1147    unsafe extern "unadjusted" {
1148        #[cfg_attr(
1149            any(target_arch = "aarch64", target_arch = "arm64ec"),
1150            link_name = "llvm.aarch64.neon.facge.i64.f64"
1151        )]
1152        fn _vcaged_f64(a: f64, b: f64) -> u64;
1153    }
1154    unsafe { _vcaged_f64(a, b) }
1155}
1156#[doc = "Floating-point absolute compare greater than or equal"]
1157#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcages_f32)"]
1158#[inline]
1159#[target_feature(enable = "neon")]
1160#[cfg_attr(test, assert_instr(facge))]
1161#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1162pub fn vcages_f32(a: f32, b: f32) -> u32 {
1163    unsafe extern "unadjusted" {
1164        #[cfg_attr(
1165            any(target_arch = "aarch64", target_arch = "arm64ec"),
1166            link_name = "llvm.aarch64.neon.facge.i32.f32"
1167        )]
1168        fn _vcages_f32(a: f32, b: f32) -> u32;
1169    }
1170    unsafe { _vcages_f32(a, b) }
1171}
1172#[doc = "Floating-point absolute compare greater than or equal"]
1173#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageh_f16)"]
1174#[inline]
1175#[cfg_attr(test, assert_instr(facge))]
1176#[target_feature(enable = "neon,fp16")]
1177#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1178pub fn vcageh_f16(a: f16, b: f16) -> u16 {
1179    unsafe extern "unadjusted" {
1180        #[cfg_attr(
1181            any(target_arch = "aarch64", target_arch = "arm64ec"),
1182            link_name = "llvm.aarch64.neon.facge.i32.f16"
1183        )]
1184        fn _vcageh_f16(a: f16, b: f16) -> i32;
1185    }
1186    unsafe { _vcageh_f16(a, b) as u16 }
1187}
1188#[doc = "Floating-point absolute compare greater than"]
1189#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f64)"]
1190#[inline]
1191#[target_feature(enable = "neon")]
1192#[cfg_attr(test, assert_instr(facgt))]
1193#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1194pub fn vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1195    unsafe extern "unadjusted" {
1196        #[cfg_attr(
1197            any(target_arch = "aarch64", target_arch = "arm64ec"),
1198            link_name = "llvm.aarch64.neon.facgt.v1i64.v1f64"
1199        )]
1200        fn _vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t;
1201    }
1202    unsafe { _vcagt_f64(a, b) }
1203}
1204#[doc = "Floating-point absolute compare greater than"]
1205#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f64)"]
1206#[inline]
1207#[target_feature(enable = "neon")]
1208#[cfg_attr(test, assert_instr(facgt))]
1209#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1210pub fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1211    unsafe extern "unadjusted" {
1212        #[cfg_attr(
1213            any(target_arch = "aarch64", target_arch = "arm64ec"),
1214            link_name = "llvm.aarch64.neon.facgt.v2i64.v2f64"
1215        )]
1216        fn _vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t;
1217    }
1218    unsafe { _vcagtq_f64(a, b) }
1219}
1220#[doc = "Floating-point absolute compare greater than"]
1221#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtd_f64)"]
1222#[inline]
1223#[target_feature(enable = "neon")]
1224#[cfg_attr(test, assert_instr(facgt))]
1225#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1226pub fn vcagtd_f64(a: f64, b: f64) -> u64 {
1227    unsafe extern "unadjusted" {
1228        #[cfg_attr(
1229            any(target_arch = "aarch64", target_arch = "arm64ec"),
1230            link_name = "llvm.aarch64.neon.facgt.i64.f64"
1231        )]
1232        fn _vcagtd_f64(a: f64, b: f64) -> u64;
1233    }
1234    unsafe { _vcagtd_f64(a, b) }
1235}
1236#[doc = "Floating-point absolute compare greater than"]
1237#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagts_f32)"]
1238#[inline]
1239#[target_feature(enable = "neon")]
1240#[cfg_attr(test, assert_instr(facgt))]
1241#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1242pub fn vcagts_f32(a: f32, b: f32) -> u32 {
1243    unsafe extern "unadjusted" {
1244        #[cfg_attr(
1245            any(target_arch = "aarch64", target_arch = "arm64ec"),
1246            link_name = "llvm.aarch64.neon.facgt.i32.f32"
1247        )]
1248        fn _vcagts_f32(a: f32, b: f32) -> u32;
1249    }
1250    unsafe { _vcagts_f32(a, b) }
1251}
1252#[doc = "Floating-point absolute compare greater than"]
1253#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagth_f16)"]
1254#[inline]
1255#[cfg_attr(test, assert_instr(facgt))]
1256#[target_feature(enable = "neon,fp16")]
1257#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1258pub fn vcagth_f16(a: f16, b: f16) -> u16 {
1259    unsafe extern "unadjusted" {
1260        #[cfg_attr(
1261            any(target_arch = "aarch64", target_arch = "arm64ec"),
1262            link_name = "llvm.aarch64.neon.facgt.i32.f16"
1263        )]
1264        fn _vcagth_f16(a: f16, b: f16) -> i32;
1265    }
1266    unsafe { _vcagth_f16(a, b) as u16 }
1267}
1268#[doc = "Floating-point absolute compare less than or equal"]
1269#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f64)"]
1270#[inline]
1271#[target_feature(enable = "neon")]
1272#[cfg_attr(test, assert_instr(facge))]
1273#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1274pub fn vcale_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1275    vcage_f64(b, a)
1276}
1277#[doc = "Floating-point absolute compare less than or equal"]
1278#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f64)"]
1279#[inline]
1280#[target_feature(enable = "neon")]
1281#[cfg_attr(test, assert_instr(facge))]
1282#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1283pub fn vcaleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1284    vcageq_f64(b, a)
1285}
1286#[doc = "Floating-point absolute compare less than or equal"]
1287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaled_f64)"]
1288#[inline]
1289#[target_feature(enable = "neon")]
1290#[cfg_attr(test, assert_instr(facge))]
1291#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1292pub fn vcaled_f64(a: f64, b: f64) -> u64 {
1293    vcaged_f64(b, a)
1294}
1295#[doc = "Floating-point absolute compare less than or equal"]
1296#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcales_f32)"]
1297#[inline]
1298#[target_feature(enable = "neon")]
1299#[cfg_attr(test, assert_instr(facge))]
1300#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1301pub fn vcales_f32(a: f32, b: f32) -> u32 {
1302    vcages_f32(b, a)
1303}
1304#[doc = "Floating-point absolute compare less than or equal"]
1305#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleh_f16)"]
1306#[inline]
1307#[cfg_attr(test, assert_instr(facge))]
1308#[target_feature(enable = "neon,fp16")]
1309#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1310pub fn vcaleh_f16(a: f16, b: f16) -> u16 {
1311    vcageh_f16(b, a)
1312}
1313#[doc = "Floating-point absolute compare less than"]
1314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f64)"]
1315#[inline]
1316#[target_feature(enable = "neon")]
1317#[cfg_attr(test, assert_instr(facgt))]
1318#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1319pub fn vcalt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1320    vcagt_f64(b, a)
1321}
1322#[doc = "Floating-point absolute compare less than"]
1323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f64)"]
1324#[inline]
1325#[target_feature(enable = "neon")]
1326#[cfg_attr(test, assert_instr(facgt))]
1327#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1328pub fn vcaltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1329    vcagtq_f64(b, a)
1330}
1331#[doc = "Floating-point absolute compare less than"]
1332#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltd_f64)"]
1333#[inline]
1334#[target_feature(enable = "neon")]
1335#[cfg_attr(test, assert_instr(facgt))]
1336#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1337pub fn vcaltd_f64(a: f64, b: f64) -> u64 {
1338    vcagtd_f64(b, a)
1339}
1340#[doc = "Floating-point absolute compare less than"]
1341#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalts_f32)"]
1342#[inline]
1343#[target_feature(enable = "neon")]
1344#[cfg_attr(test, assert_instr(facgt))]
1345#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1346pub fn vcalts_f32(a: f32, b: f32) -> u32 {
1347    vcagts_f32(b, a)
1348}
1349#[doc = "Floating-point absolute compare less than"]
1350#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalth_f16)"]
1351#[inline]
1352#[cfg_attr(test, assert_instr(facgt))]
1353#[target_feature(enable = "neon,fp16")]
1354#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1355pub fn vcalth_f16(a: f16, b: f16) -> u16 {
1356    vcagth_f16(b, a)
1357}
1358#[doc = "Floating-point compare equal"]
1359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f64)"]
1360#[inline]
1361#[target_feature(enable = "neon")]
1362#[cfg_attr(test, assert_instr(fcmeq))]
1363#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1364pub fn vceq_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1365    unsafe { simd_eq(a, b) }
1366}
1367#[doc = "Floating-point compare equal"]
1368#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f64)"]
1369#[inline]
1370#[target_feature(enable = "neon")]
1371#[cfg_attr(test, assert_instr(fcmeq))]
1372#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1373pub fn vceqq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1374    unsafe { simd_eq(a, b) }
1375}
1376#[doc = "Compare bitwise Equal (vector)"]
1377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s64)"]
1378#[inline]
1379#[target_feature(enable = "neon")]
1380#[cfg_attr(test, assert_instr(cmeq))]
1381#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1382pub fn vceq_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
1383    unsafe { simd_eq(a, b) }
1384}
1385#[doc = "Compare bitwise Equal (vector)"]
1386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s64)"]
1387#[inline]
1388#[target_feature(enable = "neon")]
1389#[cfg_attr(test, assert_instr(cmeq))]
1390#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1391pub fn vceqq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
1392    unsafe { simd_eq(a, b) }
1393}
1394#[doc = "Compare bitwise Equal (vector)"]
1395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u64)"]
1396#[inline]
1397#[target_feature(enable = "neon")]
1398#[cfg_attr(test, assert_instr(cmeq))]
1399#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1400pub fn vceq_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
1401    unsafe { simd_eq(a, b) }
1402}
1403#[doc = "Compare bitwise Equal (vector)"]
1404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u64)"]
1405#[inline]
1406#[target_feature(enable = "neon")]
1407#[cfg_attr(test, assert_instr(cmeq))]
1408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1409pub fn vceqq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1410    unsafe { simd_eq(a, b) }
1411}
1412#[doc = "Compare bitwise Equal (vector)"]
1413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_p64)"]
1414#[inline]
1415#[target_feature(enable = "neon")]
1416#[cfg_attr(test, assert_instr(cmeq))]
1417#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1418pub fn vceq_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t {
1419    unsafe { simd_eq(a, b) }
1420}
1421#[doc = "Compare bitwise Equal (vector)"]
1422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p64)"]
1423#[inline]
1424#[target_feature(enable = "neon")]
1425#[cfg_attr(test, assert_instr(cmeq))]
1426#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1427pub fn vceqq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t {
1428    unsafe { simd_eq(a, b) }
1429}
1430#[doc = "Floating-point compare equal"]
1431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_f64)"]
1432#[inline]
1433#[target_feature(enable = "neon")]
1434#[cfg_attr(test, assert_instr(fcmp))]
1435#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1436pub fn vceqd_f64(a: f64, b: f64) -> u64 {
1437    unsafe { simd_extract!(vceq_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
1438}
1439#[doc = "Floating-point compare equal"]
1440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqs_f32)"]
1441#[inline]
1442#[target_feature(enable = "neon")]
1443#[cfg_attr(test, assert_instr(fcmp))]
1444#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1445pub fn vceqs_f32(a: f32, b: f32) -> u32 {
1446    unsafe { simd_extract!(vceq_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
1447}
1448#[doc = "Compare bitwise equal"]
1449#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_s64)"]
1450#[inline]
1451#[target_feature(enable = "neon")]
1452#[cfg_attr(test, assert_instr(cmp))]
1453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1454pub fn vceqd_s64(a: i64, b: i64) -> u64 {
1455    unsafe { transmute(vceq_s64(transmute(a), transmute(b))) }
1456}
1457#[doc = "Compare bitwise equal"]
1458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_u64)"]
1459#[inline]
1460#[target_feature(enable = "neon")]
1461#[cfg_attr(test, assert_instr(cmp))]
1462#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1463pub fn vceqd_u64(a: u64, b: u64) -> u64 {
1464    unsafe { transmute(vceq_u64(transmute(a), transmute(b))) }
1465}
1466#[doc = "Floating-point compare equal"]
1467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqh_f16)"]
1468#[inline]
1469#[cfg_attr(test, assert_instr(fcmp))]
1470#[target_feature(enable = "neon,fp16")]
1471#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1472pub fn vceqh_f16(a: f16, b: f16) -> u16 {
1473    unsafe { simd_extract!(vceq_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
1474}
1475#[doc = "Floating-point compare bitwise equal to zero"]
1476#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f16)"]
1477#[inline]
1478#[cfg_attr(test, assert_instr(fcmeq))]
1479#[target_feature(enable = "neon,fp16")]
1480#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1481pub fn vceqz_f16(a: float16x4_t) -> uint16x4_t {
1482    let b: f16x4 = f16x4::new(0.0, 0.0, 0.0, 0.0);
1483    unsafe { simd_eq(a, transmute(b)) }
1484}
1485#[doc = "Floating-point compare bitwise equal to zero"]
1486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f16)"]
1487#[inline]
1488#[cfg_attr(test, assert_instr(fcmeq))]
1489#[target_feature(enable = "neon,fp16")]
1490#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1491pub fn vceqzq_f16(a: float16x8_t) -> uint16x8_t {
1492    let b: f16x8 = f16x8::new(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0);
1493    unsafe { simd_eq(a, transmute(b)) }
1494}
1495#[doc = "Floating-point compare bitwise equal to zero"]
1496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f32)"]
1497#[inline]
1498#[target_feature(enable = "neon")]
1499#[cfg_attr(test, assert_instr(fcmeq))]
1500#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1501pub fn vceqz_f32(a: float32x2_t) -> uint32x2_t {
1502    let b: f32x2 = f32x2::new(0.0, 0.0);
1503    unsafe { simd_eq(a, transmute(b)) }
1504}
1505#[doc = "Floating-point compare bitwise equal to zero"]
1506#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f32)"]
1507#[inline]
1508#[target_feature(enable = "neon")]
1509#[cfg_attr(test, assert_instr(fcmeq))]
1510#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1511pub fn vceqzq_f32(a: float32x4_t) -> uint32x4_t {
1512    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
1513    unsafe { simd_eq(a, transmute(b)) }
1514}
1515#[doc = "Floating-point compare bitwise equal to zero"]
1516#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f64)"]
1517#[inline]
1518#[target_feature(enable = "neon")]
1519#[cfg_attr(test, assert_instr(fcmeq))]
1520#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1521pub fn vceqz_f64(a: float64x1_t) -> uint64x1_t {
1522    let b: f64 = 0.0;
1523    unsafe { simd_eq(a, transmute(b)) }
1524}
1525#[doc = "Floating-point compare bitwise equal to zero"]
1526#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f64)"]
1527#[inline]
1528#[target_feature(enable = "neon")]
1529#[cfg_attr(test, assert_instr(fcmeq))]
1530#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1531pub fn vceqzq_f64(a: float64x2_t) -> uint64x2_t {
1532    let b: f64x2 = f64x2::new(0.0, 0.0);
1533    unsafe { simd_eq(a, transmute(b)) }
1534}
1535#[doc = "Signed compare bitwise equal to zero"]
1536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s8)"]
1537#[inline]
1538#[target_feature(enable = "neon")]
1539#[cfg_attr(test, assert_instr(cmeq))]
1540#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1541pub fn vceqz_s8(a: int8x8_t) -> uint8x8_t {
1542    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1543    unsafe { simd_eq(a, transmute(b)) }
1544}
1545#[doc = "Signed compare bitwise equal to zero"]
1546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s8)"]
1547#[inline]
1548#[target_feature(enable = "neon")]
1549#[cfg_attr(test, assert_instr(cmeq))]
1550#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1551pub fn vceqzq_s8(a: int8x16_t) -> uint8x16_t {
1552    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1553    unsafe { simd_eq(a, transmute(b)) }
1554}
1555#[doc = "Signed compare bitwise equal to zero"]
1556#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s16)"]
1557#[inline]
1558#[target_feature(enable = "neon")]
1559#[cfg_attr(test, assert_instr(cmeq))]
1560#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1561pub fn vceqz_s16(a: int16x4_t) -> uint16x4_t {
1562    let b: i16x4 = i16x4::new(0, 0, 0, 0);
1563    unsafe { simd_eq(a, transmute(b)) }
1564}
1565#[doc = "Signed compare bitwise equal to zero"]
1566#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s16)"]
1567#[inline]
1568#[target_feature(enable = "neon")]
1569#[cfg_attr(test, assert_instr(cmeq))]
1570#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1571pub fn vceqzq_s16(a: int16x8_t) -> uint16x8_t {
1572    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1573    unsafe { simd_eq(a, transmute(b)) }
1574}
1575#[doc = "Signed compare bitwise equal to zero"]
1576#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s32)"]
1577#[inline]
1578#[target_feature(enable = "neon")]
1579#[cfg_attr(test, assert_instr(cmeq))]
1580#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1581pub fn vceqz_s32(a: int32x2_t) -> uint32x2_t {
1582    let b: i32x2 = i32x2::new(0, 0);
1583    unsafe { simd_eq(a, transmute(b)) }
1584}
1585#[doc = "Signed compare bitwise equal to zero"]
1586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s32)"]
1587#[inline]
1588#[target_feature(enable = "neon")]
1589#[cfg_attr(test, assert_instr(cmeq))]
1590#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1591pub fn vceqzq_s32(a: int32x4_t) -> uint32x4_t {
1592    let b: i32x4 = i32x4::new(0, 0, 0, 0);
1593    unsafe { simd_eq(a, transmute(b)) }
1594}
1595#[doc = "Signed compare bitwise equal to zero"]
1596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s64)"]
1597#[inline]
1598#[target_feature(enable = "neon")]
1599#[cfg_attr(test, assert_instr(cmeq))]
1600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1601pub fn vceqz_s64(a: int64x1_t) -> uint64x1_t {
1602    let b: i64x1 = i64x1::new(0);
1603    unsafe { simd_eq(a, transmute(b)) }
1604}
1605#[doc = "Signed compare bitwise equal to zero"]
1606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s64)"]
1607#[inline]
1608#[target_feature(enable = "neon")]
1609#[cfg_attr(test, assert_instr(cmeq))]
1610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1611pub fn vceqzq_s64(a: int64x2_t) -> uint64x2_t {
1612    let b: i64x2 = i64x2::new(0, 0);
1613    unsafe { simd_eq(a, transmute(b)) }
1614}
1615#[doc = "Signed compare bitwise equal to zero"]
1616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p8)"]
1617#[inline]
1618#[target_feature(enable = "neon")]
1619#[cfg_attr(test, assert_instr(cmeq))]
1620#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1621pub fn vceqz_p8(a: poly8x8_t) -> uint8x8_t {
1622    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1623    unsafe { simd_eq(a, transmute(b)) }
1624}
1625#[doc = "Signed compare bitwise equal to zero"]
1626#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p8)"]
1627#[inline]
1628#[target_feature(enable = "neon")]
1629#[cfg_attr(test, assert_instr(cmeq))]
1630#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1631pub fn vceqzq_p8(a: poly8x16_t) -> uint8x16_t {
1632    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1633    unsafe { simd_eq(a, transmute(b)) }
1634}
1635#[doc = "Signed compare bitwise equal to zero"]
1636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p64)"]
1637#[inline]
1638#[target_feature(enable = "neon")]
1639#[cfg_attr(test, assert_instr(cmeq))]
1640#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1641pub fn vceqz_p64(a: poly64x1_t) -> uint64x1_t {
1642    let b: i64x1 = i64x1::new(0);
1643    unsafe { simd_eq(a, transmute(b)) }
1644}
1645#[doc = "Signed compare bitwise equal to zero"]
1646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p64)"]
1647#[inline]
1648#[target_feature(enable = "neon")]
1649#[cfg_attr(test, assert_instr(cmeq))]
1650#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1651pub fn vceqzq_p64(a: poly64x2_t) -> uint64x2_t {
1652    let b: i64x2 = i64x2::new(0, 0);
1653    unsafe { simd_eq(a, transmute(b)) }
1654}
1655#[doc = "Unsigned compare bitwise equal to zero"]
1656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u8)"]
1657#[inline]
1658#[target_feature(enable = "neon")]
1659#[cfg_attr(test, assert_instr(cmeq))]
1660#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1661pub fn vceqz_u8(a: uint8x8_t) -> uint8x8_t {
1662    let b: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1663    unsafe { simd_eq(a, transmute(b)) }
1664}
1665#[doc = "Unsigned compare bitwise equal to zero"]
1666#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u8)"]
1667#[inline]
1668#[target_feature(enable = "neon")]
1669#[cfg_attr(test, assert_instr(cmeq))]
1670#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1671pub fn vceqzq_u8(a: uint8x16_t) -> uint8x16_t {
1672    let b: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1673    unsafe { simd_eq(a, transmute(b)) }
1674}
1675#[doc = "Unsigned compare bitwise equal to zero"]
1676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u16)"]
1677#[inline]
1678#[target_feature(enable = "neon")]
1679#[cfg_attr(test, assert_instr(cmeq))]
1680#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1681pub fn vceqz_u16(a: uint16x4_t) -> uint16x4_t {
1682    let b: u16x4 = u16x4::new(0, 0, 0, 0);
1683    unsafe { simd_eq(a, transmute(b)) }
1684}
1685#[doc = "Unsigned compare bitwise equal to zero"]
1686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u16)"]
1687#[inline]
1688#[target_feature(enable = "neon")]
1689#[cfg_attr(test, assert_instr(cmeq))]
1690#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1691pub fn vceqzq_u16(a: uint16x8_t) -> uint16x8_t {
1692    let b: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1693    unsafe { simd_eq(a, transmute(b)) }
1694}
1695#[doc = "Unsigned compare bitwise equal to zero"]
1696#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u32)"]
1697#[inline]
1698#[target_feature(enable = "neon")]
1699#[cfg_attr(test, assert_instr(cmeq))]
1700#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1701pub fn vceqz_u32(a: uint32x2_t) -> uint32x2_t {
1702    let b: u32x2 = u32x2::new(0, 0);
1703    unsafe { simd_eq(a, transmute(b)) }
1704}
1705#[doc = "Unsigned compare bitwise equal to zero"]
1706#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u32)"]
1707#[inline]
1708#[target_feature(enable = "neon")]
1709#[cfg_attr(test, assert_instr(cmeq))]
1710#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1711pub fn vceqzq_u32(a: uint32x4_t) -> uint32x4_t {
1712    let b: u32x4 = u32x4::new(0, 0, 0, 0);
1713    unsafe { simd_eq(a, transmute(b)) }
1714}
1715#[doc = "Unsigned compare bitwise equal to zero"]
1716#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u64)"]
1717#[inline]
1718#[target_feature(enable = "neon")]
1719#[cfg_attr(test, assert_instr(cmeq))]
1720#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1721pub fn vceqz_u64(a: uint64x1_t) -> uint64x1_t {
1722    let b: u64x1 = u64x1::new(0);
1723    unsafe { simd_eq(a, transmute(b)) }
1724}
1725#[doc = "Unsigned compare bitwise equal to zero"]
1726#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u64)"]
1727#[inline]
1728#[target_feature(enable = "neon")]
1729#[cfg_attr(test, assert_instr(cmeq))]
1730#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1731pub fn vceqzq_u64(a: uint64x2_t) -> uint64x2_t {
1732    let b: u64x2 = u64x2::new(0, 0);
1733    unsafe { simd_eq(a, transmute(b)) }
1734}
1735#[doc = "Compare bitwise equal to zero"]
1736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_s64)"]
1737#[inline]
1738#[target_feature(enable = "neon")]
1739#[cfg_attr(test, assert_instr(cmp))]
1740#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1741pub fn vceqzd_s64(a: i64) -> u64 {
1742    unsafe { transmute(vceqz_s64(transmute(a))) }
1743}
1744#[doc = "Compare bitwise equal to zero"]
1745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_u64)"]
1746#[inline]
1747#[target_feature(enable = "neon")]
1748#[cfg_attr(test, assert_instr(cmp))]
1749#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1750pub fn vceqzd_u64(a: u64) -> u64 {
1751    unsafe { transmute(vceqz_u64(transmute(a))) }
1752}
1753#[doc = "Floating-point compare bitwise equal to zero"]
1754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzh_f16)"]
1755#[inline]
1756#[cfg_attr(test, assert_instr(fcmp))]
1757#[target_feature(enable = "neon,fp16")]
1758#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1759pub fn vceqzh_f16(a: f16) -> u16 {
1760    unsafe { simd_extract!(vceqz_f16(vdup_n_f16(a)), 0) }
1761}
1762#[doc = "Floating-point compare bitwise equal to zero"]
1763#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzs_f32)"]
1764#[inline]
1765#[target_feature(enable = "neon")]
1766#[cfg_attr(test, assert_instr(fcmp))]
1767#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1768pub fn vceqzs_f32(a: f32) -> u32 {
1769    unsafe { simd_extract!(vceqz_f32(vdup_n_f32(a)), 0) }
1770}
1771#[doc = "Floating-point compare bitwise equal to zero"]
1772#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_f64)"]
1773#[inline]
1774#[target_feature(enable = "neon")]
1775#[cfg_attr(test, assert_instr(fcmp))]
1776#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1777pub fn vceqzd_f64(a: f64) -> u64 {
1778    unsafe { simd_extract!(vceqz_f64(vdup_n_f64(a)), 0) }
1779}
1780#[doc = "Floating-point compare greater than or equal"]
1781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f64)"]
1782#[inline]
1783#[target_feature(enable = "neon")]
1784#[cfg_attr(test, assert_instr(fcmge))]
1785#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1786pub fn vcge_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1787    unsafe { simd_ge(a, b) }
1788}
1789#[doc = "Floating-point compare greater than or equal"]
1790#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f64)"]
1791#[inline]
1792#[target_feature(enable = "neon")]
1793#[cfg_attr(test, assert_instr(fcmge))]
1794#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1795pub fn vcgeq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1796    unsafe { simd_ge(a, b) }
1797}
1798#[doc = "Compare signed greater than or equal"]
1799#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s64)"]
1800#[inline]
1801#[target_feature(enable = "neon")]
1802#[cfg_attr(test, assert_instr(cmge))]
1803#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1804pub fn vcge_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
1805    unsafe { simd_ge(a, b) }
1806}
1807#[doc = "Compare signed greater than or equal"]
1808#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s64)"]
1809#[inline]
1810#[target_feature(enable = "neon")]
1811#[cfg_attr(test, assert_instr(cmge))]
1812#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1813pub fn vcgeq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
1814    unsafe { simd_ge(a, b) }
1815}
1816#[doc = "Compare unsigned greater than or equal"]
1817#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u64)"]
1818#[inline]
1819#[target_feature(enable = "neon")]
1820#[cfg_attr(test, assert_instr(cmhs))]
1821#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1822pub fn vcge_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
1823    unsafe { simd_ge(a, b) }
1824}
1825#[doc = "Compare unsigned greater than or equal"]
1826#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u64)"]
1827#[inline]
1828#[target_feature(enable = "neon")]
1829#[cfg_attr(test, assert_instr(cmhs))]
1830#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1831pub fn vcgeq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1832    unsafe { simd_ge(a, b) }
1833}
1834#[doc = "Floating-point compare greater than or equal"]
1835#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_f64)"]
1836#[inline]
1837#[target_feature(enable = "neon")]
1838#[cfg_attr(test, assert_instr(fcmp))]
1839#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1840pub fn vcged_f64(a: f64, b: f64) -> u64 {
1841    unsafe { simd_extract!(vcge_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
1842}
1843#[doc = "Floating-point compare greater than or equal"]
1844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcges_f32)"]
1845#[inline]
1846#[target_feature(enable = "neon")]
1847#[cfg_attr(test, assert_instr(fcmp))]
1848#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1849pub fn vcges_f32(a: f32, b: f32) -> u32 {
1850    unsafe { simd_extract!(vcge_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
1851}
1852#[doc = "Compare greater than or equal"]
1853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_s64)"]
1854#[inline]
1855#[target_feature(enable = "neon")]
1856#[cfg_attr(test, assert_instr(cmp))]
1857#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1858pub fn vcged_s64(a: i64, b: i64) -> u64 {
1859    unsafe { transmute(vcge_s64(transmute(a), transmute(b))) }
1860}
1861#[doc = "Compare greater than or equal"]
1862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_u64)"]
1863#[inline]
1864#[target_feature(enable = "neon")]
1865#[cfg_attr(test, assert_instr(cmp))]
1866#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1867pub fn vcged_u64(a: u64, b: u64) -> u64 {
1868    unsafe { transmute(vcge_u64(transmute(a), transmute(b))) }
1869}
1870#[doc = "Floating-point compare greater than or equal"]
1871#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeh_f16)"]
1872#[inline]
1873#[cfg_attr(test, assert_instr(fcmp))]
1874#[target_feature(enable = "neon,fp16")]
1875#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1876pub fn vcgeh_f16(a: f16, b: f16) -> u16 {
1877    unsafe { simd_extract!(vcge_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
1878}
1879#[doc = "Floating-point compare greater than or equal to zero"]
1880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f32)"]
1881#[inline]
1882#[target_feature(enable = "neon")]
1883#[cfg_attr(test, assert_instr(fcmge))]
1884#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1885pub fn vcgez_f32(a: float32x2_t) -> uint32x2_t {
1886    let b: f32x2 = f32x2::new(0.0, 0.0);
1887    unsafe { simd_ge(a, transmute(b)) }
1888}
1889#[doc = "Floating-point compare greater than or equal to zero"]
1890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f32)"]
1891#[inline]
1892#[target_feature(enable = "neon")]
1893#[cfg_attr(test, assert_instr(fcmge))]
1894#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1895pub fn vcgezq_f32(a: float32x4_t) -> uint32x4_t {
1896    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
1897    unsafe { simd_ge(a, transmute(b)) }
1898}
1899#[doc = "Floating-point compare greater than or equal to zero"]
1900#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f64)"]
1901#[inline]
1902#[target_feature(enable = "neon")]
1903#[cfg_attr(test, assert_instr(fcmge))]
1904#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1905pub fn vcgez_f64(a: float64x1_t) -> uint64x1_t {
1906    let b: f64 = 0.0;
1907    unsafe { simd_ge(a, transmute(b)) }
1908}
1909#[doc = "Floating-point compare greater than or equal to zero"]
1910#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f64)"]
1911#[inline]
1912#[target_feature(enable = "neon")]
1913#[cfg_attr(test, assert_instr(fcmge))]
1914#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1915pub fn vcgezq_f64(a: float64x2_t) -> uint64x2_t {
1916    let b: f64x2 = f64x2::new(0.0, 0.0);
1917    unsafe { simd_ge(a, transmute(b)) }
1918}
1919#[doc = "Compare signed greater than or equal to zero"]
1920#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s8)"]
1921#[inline]
1922#[target_feature(enable = "neon")]
1923#[cfg_attr(test, assert_instr(cmge))]
1924#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1925pub fn vcgez_s8(a: int8x8_t) -> uint8x8_t {
1926    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1927    unsafe { simd_ge(a, transmute(b)) }
1928}
1929#[doc = "Compare signed greater than or equal to zero"]
1930#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s8)"]
1931#[inline]
1932#[target_feature(enable = "neon")]
1933#[cfg_attr(test, assert_instr(cmge))]
1934#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1935pub fn vcgezq_s8(a: int8x16_t) -> uint8x16_t {
1936    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1937    unsafe { simd_ge(a, transmute(b)) }
1938}
1939#[doc = "Compare signed greater than or equal to zero"]
1940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s16)"]
1941#[inline]
1942#[target_feature(enable = "neon")]
1943#[cfg_attr(test, assert_instr(cmge))]
1944#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1945pub fn vcgez_s16(a: int16x4_t) -> uint16x4_t {
1946    let b: i16x4 = i16x4::new(0, 0, 0, 0);
1947    unsafe { simd_ge(a, transmute(b)) }
1948}
1949#[doc = "Compare signed greater than or equal to zero"]
1950#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s16)"]
1951#[inline]
1952#[target_feature(enable = "neon")]
1953#[cfg_attr(test, assert_instr(cmge))]
1954#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1955pub fn vcgezq_s16(a: int16x8_t) -> uint16x8_t {
1956    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1957    unsafe { simd_ge(a, transmute(b)) }
1958}
1959#[doc = "Compare signed greater than or equal to zero"]
1960#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s32)"]
1961#[inline]
1962#[target_feature(enable = "neon")]
1963#[cfg_attr(test, assert_instr(cmge))]
1964#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1965pub fn vcgez_s32(a: int32x2_t) -> uint32x2_t {
1966    let b: i32x2 = i32x2::new(0, 0);
1967    unsafe { simd_ge(a, transmute(b)) }
1968}
1969#[doc = "Compare signed greater than or equal to zero"]
1970#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s32)"]
1971#[inline]
1972#[target_feature(enable = "neon")]
1973#[cfg_attr(test, assert_instr(cmge))]
1974#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1975pub fn vcgezq_s32(a: int32x4_t) -> uint32x4_t {
1976    let b: i32x4 = i32x4::new(0, 0, 0, 0);
1977    unsafe { simd_ge(a, transmute(b)) }
1978}
1979#[doc = "Compare signed greater than or equal to zero"]
1980#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s64)"]
1981#[inline]
1982#[target_feature(enable = "neon")]
1983#[cfg_attr(test, assert_instr(cmge))]
1984#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1985pub fn vcgez_s64(a: int64x1_t) -> uint64x1_t {
1986    let b: i64x1 = i64x1::new(0);
1987    unsafe { simd_ge(a, transmute(b)) }
1988}
1989#[doc = "Compare signed greater than or equal to zero"]
1990#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s64)"]
1991#[inline]
1992#[target_feature(enable = "neon")]
1993#[cfg_attr(test, assert_instr(cmge))]
1994#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1995pub fn vcgezq_s64(a: int64x2_t) -> uint64x2_t {
1996    let b: i64x2 = i64x2::new(0, 0);
1997    unsafe { simd_ge(a, transmute(b)) }
1998}
1999#[doc = "Floating-point compare greater than or equal to zero"]
2000#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_f64)"]
2001#[inline]
2002#[target_feature(enable = "neon")]
2003#[cfg_attr(test, assert_instr(fcmp))]
2004#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2005pub fn vcgezd_f64(a: f64) -> u64 {
2006    unsafe { simd_extract!(vcgez_f64(vdup_n_f64(a)), 0) }
2007}
2008#[doc = "Floating-point compare greater than or equal to zero"]
2009#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezs_f32)"]
2010#[inline]
2011#[target_feature(enable = "neon")]
2012#[cfg_attr(test, assert_instr(fcmp))]
2013#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2014pub fn vcgezs_f32(a: f32) -> u32 {
2015    unsafe { simd_extract!(vcgez_f32(vdup_n_f32(a)), 0) }
2016}
2017#[doc = "Compare signed greater than or equal to zero"]
2018#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_s64)"]
2019#[inline]
2020#[target_feature(enable = "neon")]
2021#[cfg_attr(test, assert_instr(nop))]
2022#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2023pub fn vcgezd_s64(a: i64) -> u64 {
2024    unsafe { transmute(vcgez_s64(transmute(a))) }
2025}
2026#[doc = "Floating-point compare greater than or equal to zero"]
2027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezh_f16)"]
2028#[inline]
2029#[cfg_attr(test, assert_instr(fcmp))]
2030#[target_feature(enable = "neon,fp16")]
2031#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2032pub fn vcgezh_f16(a: f16) -> u16 {
2033    unsafe { simd_extract!(vcgez_f16(vdup_n_f16(a)), 0) }
2034}
2035#[doc = "Floating-point compare greater than"]
2036#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f64)"]
2037#[inline]
2038#[target_feature(enable = "neon")]
2039#[cfg_attr(test, assert_instr(fcmgt))]
2040#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2041pub fn vcgt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2042    unsafe { simd_gt(a, b) }
2043}
2044#[doc = "Floating-point compare greater than"]
2045#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f64)"]
2046#[inline]
2047#[target_feature(enable = "neon")]
2048#[cfg_attr(test, assert_instr(fcmgt))]
2049#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2050pub fn vcgtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2051    unsafe { simd_gt(a, b) }
2052}
2053#[doc = "Compare signed greater than"]
2054#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s64)"]
2055#[inline]
2056#[target_feature(enable = "neon")]
2057#[cfg_attr(test, assert_instr(cmgt))]
2058#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2059pub fn vcgt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2060    unsafe { simd_gt(a, b) }
2061}
2062#[doc = "Compare signed greater than"]
2063#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s64)"]
2064#[inline]
2065#[target_feature(enable = "neon")]
2066#[cfg_attr(test, assert_instr(cmgt))]
2067#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2068pub fn vcgtq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2069    unsafe { simd_gt(a, b) }
2070}
2071#[doc = "Compare unsigned greater than"]
2072#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u64)"]
2073#[inline]
2074#[target_feature(enable = "neon")]
2075#[cfg_attr(test, assert_instr(cmhi))]
2076#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2077pub fn vcgt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2078    unsafe { simd_gt(a, b) }
2079}
2080#[doc = "Compare unsigned greater than"]
2081#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u64)"]
2082#[inline]
2083#[target_feature(enable = "neon")]
2084#[cfg_attr(test, assert_instr(cmhi))]
2085#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2086pub fn vcgtq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2087    unsafe { simd_gt(a, b) }
2088}
2089#[doc = "Floating-point compare greater than"]
2090#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_f64)"]
2091#[inline]
2092#[target_feature(enable = "neon")]
2093#[cfg_attr(test, assert_instr(fcmp))]
2094#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2095pub fn vcgtd_f64(a: f64, b: f64) -> u64 {
2096    unsafe { simd_extract!(vcgt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2097}
2098#[doc = "Floating-point compare greater than"]
2099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgts_f32)"]
2100#[inline]
2101#[target_feature(enable = "neon")]
2102#[cfg_attr(test, assert_instr(fcmp))]
2103#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2104pub fn vcgts_f32(a: f32, b: f32) -> u32 {
2105    unsafe { simd_extract!(vcgt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2106}
2107#[doc = "Compare greater than"]
2108#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_s64)"]
2109#[inline]
2110#[target_feature(enable = "neon")]
2111#[cfg_attr(test, assert_instr(cmp))]
2112#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2113pub fn vcgtd_s64(a: i64, b: i64) -> u64 {
2114    unsafe { transmute(vcgt_s64(transmute(a), transmute(b))) }
2115}
2116#[doc = "Compare greater than"]
2117#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_u64)"]
2118#[inline]
2119#[target_feature(enable = "neon")]
2120#[cfg_attr(test, assert_instr(cmp))]
2121#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2122pub fn vcgtd_u64(a: u64, b: u64) -> u64 {
2123    unsafe { transmute(vcgt_u64(transmute(a), transmute(b))) }
2124}
2125#[doc = "Floating-point compare greater than"]
2126#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgth_f16)"]
2127#[inline]
2128#[cfg_attr(test, assert_instr(fcmp))]
2129#[target_feature(enable = "neon,fp16")]
2130#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2131pub fn vcgth_f16(a: f16, b: f16) -> u16 {
2132    unsafe { simd_extract!(vcgt_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2133}
2134#[doc = "Floating-point compare greater than zero"]
2135#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f32)"]
2136#[inline]
2137#[target_feature(enable = "neon")]
2138#[cfg_attr(test, assert_instr(fcmgt))]
2139#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2140pub fn vcgtz_f32(a: float32x2_t) -> uint32x2_t {
2141    let b: f32x2 = f32x2::new(0.0, 0.0);
2142    unsafe { simd_gt(a, transmute(b)) }
2143}
2144#[doc = "Floating-point compare greater than zero"]
2145#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f32)"]
2146#[inline]
2147#[target_feature(enable = "neon")]
2148#[cfg_attr(test, assert_instr(fcmgt))]
2149#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2150pub fn vcgtzq_f32(a: float32x4_t) -> uint32x4_t {
2151    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2152    unsafe { simd_gt(a, transmute(b)) }
2153}
2154#[doc = "Floating-point compare greater than zero"]
2155#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f64)"]
2156#[inline]
2157#[target_feature(enable = "neon")]
2158#[cfg_attr(test, assert_instr(fcmgt))]
2159#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2160pub fn vcgtz_f64(a: float64x1_t) -> uint64x1_t {
2161    let b: f64 = 0.0;
2162    unsafe { simd_gt(a, transmute(b)) }
2163}
2164#[doc = "Floating-point compare greater than zero"]
2165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f64)"]
2166#[inline]
2167#[target_feature(enable = "neon")]
2168#[cfg_attr(test, assert_instr(fcmgt))]
2169#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2170pub fn vcgtzq_f64(a: float64x2_t) -> uint64x2_t {
2171    let b: f64x2 = f64x2::new(0.0, 0.0);
2172    unsafe { simd_gt(a, transmute(b)) }
2173}
2174#[doc = "Compare signed greater than zero"]
2175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s8)"]
2176#[inline]
2177#[target_feature(enable = "neon")]
2178#[cfg_attr(test, assert_instr(cmgt))]
2179#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2180pub fn vcgtz_s8(a: int8x8_t) -> uint8x8_t {
2181    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2182    unsafe { simd_gt(a, transmute(b)) }
2183}
2184#[doc = "Compare signed greater than zero"]
2185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s8)"]
2186#[inline]
2187#[target_feature(enable = "neon")]
2188#[cfg_attr(test, assert_instr(cmgt))]
2189#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2190pub fn vcgtzq_s8(a: int8x16_t) -> uint8x16_t {
2191    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2192    unsafe { simd_gt(a, transmute(b)) }
2193}
2194#[doc = "Compare signed greater than zero"]
2195#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s16)"]
2196#[inline]
2197#[target_feature(enable = "neon")]
2198#[cfg_attr(test, assert_instr(cmgt))]
2199#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2200pub fn vcgtz_s16(a: int16x4_t) -> uint16x4_t {
2201    let b: i16x4 = i16x4::new(0, 0, 0, 0);
2202    unsafe { simd_gt(a, transmute(b)) }
2203}
2204#[doc = "Compare signed greater than zero"]
2205#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s16)"]
2206#[inline]
2207#[target_feature(enable = "neon")]
2208#[cfg_attr(test, assert_instr(cmgt))]
2209#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2210pub fn vcgtzq_s16(a: int16x8_t) -> uint16x8_t {
2211    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2212    unsafe { simd_gt(a, transmute(b)) }
2213}
2214#[doc = "Compare signed greater than zero"]
2215#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s32)"]
2216#[inline]
2217#[target_feature(enable = "neon")]
2218#[cfg_attr(test, assert_instr(cmgt))]
2219#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2220pub fn vcgtz_s32(a: int32x2_t) -> uint32x2_t {
2221    let b: i32x2 = i32x2::new(0, 0);
2222    unsafe { simd_gt(a, transmute(b)) }
2223}
2224#[doc = "Compare signed greater than zero"]
2225#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s32)"]
2226#[inline]
2227#[target_feature(enable = "neon")]
2228#[cfg_attr(test, assert_instr(cmgt))]
2229#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2230pub fn vcgtzq_s32(a: int32x4_t) -> uint32x4_t {
2231    let b: i32x4 = i32x4::new(0, 0, 0, 0);
2232    unsafe { simd_gt(a, transmute(b)) }
2233}
2234#[doc = "Compare signed greater than zero"]
2235#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s64)"]
2236#[inline]
2237#[target_feature(enable = "neon")]
2238#[cfg_attr(test, assert_instr(cmgt))]
2239#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2240pub fn vcgtz_s64(a: int64x1_t) -> uint64x1_t {
2241    let b: i64x1 = i64x1::new(0);
2242    unsafe { simd_gt(a, transmute(b)) }
2243}
2244#[doc = "Compare signed greater than zero"]
2245#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s64)"]
2246#[inline]
2247#[target_feature(enable = "neon")]
2248#[cfg_attr(test, assert_instr(cmgt))]
2249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2250pub fn vcgtzq_s64(a: int64x2_t) -> uint64x2_t {
2251    let b: i64x2 = i64x2::new(0, 0);
2252    unsafe { simd_gt(a, transmute(b)) }
2253}
2254#[doc = "Floating-point compare greater than zero"]
2255#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_f64)"]
2256#[inline]
2257#[target_feature(enable = "neon")]
2258#[cfg_attr(test, assert_instr(fcmp))]
2259#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2260pub fn vcgtzd_f64(a: f64) -> u64 {
2261    unsafe { simd_extract!(vcgtz_f64(vdup_n_f64(a)), 0) }
2262}
2263#[doc = "Floating-point compare greater than zero"]
2264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzs_f32)"]
2265#[inline]
2266#[target_feature(enable = "neon")]
2267#[cfg_attr(test, assert_instr(fcmp))]
2268#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2269pub fn vcgtzs_f32(a: f32) -> u32 {
2270    unsafe { simd_extract!(vcgtz_f32(vdup_n_f32(a)), 0) }
2271}
2272#[doc = "Compare signed greater than zero"]
2273#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_s64)"]
2274#[inline]
2275#[target_feature(enable = "neon")]
2276#[cfg_attr(test, assert_instr(cmp))]
2277#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2278pub fn vcgtzd_s64(a: i64) -> u64 {
2279    unsafe { transmute(vcgtz_s64(transmute(a))) }
2280}
2281#[doc = "Floating-point compare greater than zero"]
2282#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzh_f16)"]
2283#[inline]
2284#[cfg_attr(test, assert_instr(fcmp))]
2285#[target_feature(enable = "neon,fp16")]
2286#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2287pub fn vcgtzh_f16(a: f16) -> u16 {
2288    unsafe { simd_extract!(vcgtz_f16(vdup_n_f16(a)), 0) }
2289}
2290#[doc = "Floating-point compare less than or equal"]
2291#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f64)"]
2292#[inline]
2293#[target_feature(enable = "neon")]
2294#[cfg_attr(test, assert_instr(fcmge))]
2295#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2296pub fn vcle_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2297    unsafe { simd_le(a, b) }
2298}
2299#[doc = "Floating-point compare less than or equal"]
2300#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f64)"]
2301#[inline]
2302#[target_feature(enable = "neon")]
2303#[cfg_attr(test, assert_instr(fcmge))]
2304#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2305pub fn vcleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2306    unsafe { simd_le(a, b) }
2307}
2308#[doc = "Compare signed less than or equal"]
2309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s64)"]
2310#[inline]
2311#[target_feature(enable = "neon")]
2312#[cfg_attr(test, assert_instr(cmge))]
2313#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2314pub fn vcle_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2315    unsafe { simd_le(a, b) }
2316}
2317#[doc = "Compare signed less than or equal"]
2318#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s64)"]
2319#[inline]
2320#[target_feature(enable = "neon")]
2321#[cfg_attr(test, assert_instr(cmge))]
2322#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2323pub fn vcleq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2324    unsafe { simd_le(a, b) }
2325}
2326#[doc = "Compare unsigned less than or equal"]
2327#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u64)"]
2328#[inline]
2329#[target_feature(enable = "neon")]
2330#[cfg_attr(test, assert_instr(cmhs))]
2331#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2332pub fn vcle_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2333    unsafe { simd_le(a, b) }
2334}
2335#[doc = "Compare unsigned less than or equal"]
2336#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u64)"]
2337#[inline]
2338#[target_feature(enable = "neon")]
2339#[cfg_attr(test, assert_instr(cmhs))]
2340#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2341pub fn vcleq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2342    unsafe { simd_le(a, b) }
2343}
2344#[doc = "Floating-point compare less than or equal"]
2345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_f64)"]
2346#[inline]
2347#[target_feature(enable = "neon")]
2348#[cfg_attr(test, assert_instr(fcmp))]
2349#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2350pub fn vcled_f64(a: f64, b: f64) -> u64 {
2351    unsafe { simd_extract!(vcle_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2352}
2353#[doc = "Floating-point compare less than or equal"]
2354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcles_f32)"]
2355#[inline]
2356#[target_feature(enable = "neon")]
2357#[cfg_attr(test, assert_instr(fcmp))]
2358#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2359pub fn vcles_f32(a: f32, b: f32) -> u32 {
2360    unsafe { simd_extract!(vcle_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2361}
2362#[doc = "Compare less than or equal"]
2363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_u64)"]
2364#[inline]
2365#[target_feature(enable = "neon")]
2366#[cfg_attr(test, assert_instr(cmp))]
2367#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2368pub fn vcled_u64(a: u64, b: u64) -> u64 {
2369    unsafe { transmute(vcle_u64(transmute(a), transmute(b))) }
2370}
2371#[doc = "Compare less than or equal"]
2372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_s64)"]
2373#[inline]
2374#[target_feature(enable = "neon")]
2375#[cfg_attr(test, assert_instr(cmp))]
2376#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2377pub fn vcled_s64(a: i64, b: i64) -> u64 {
2378    unsafe { transmute(vcle_s64(transmute(a), transmute(b))) }
2379}
2380#[doc = "Floating-point compare less than or equal"]
2381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleh_f16)"]
2382#[inline]
2383#[cfg_attr(test, assert_instr(fcmp))]
2384#[target_feature(enable = "neon,fp16")]
2385#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2386pub fn vcleh_f16(a: f16, b: f16) -> u16 {
2387    unsafe { simd_extract!(vcle_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2388}
2389#[doc = "Floating-point compare less than or equal to zero"]
2390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f32)"]
2391#[inline]
2392#[target_feature(enable = "neon")]
2393#[cfg_attr(test, assert_instr(fcmle))]
2394#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2395pub fn vclez_f32(a: float32x2_t) -> uint32x2_t {
2396    let b: f32x2 = f32x2::new(0.0, 0.0);
2397    unsafe { simd_le(a, transmute(b)) }
2398}
2399#[doc = "Floating-point compare less than or equal to zero"]
2400#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f32)"]
2401#[inline]
2402#[target_feature(enable = "neon")]
2403#[cfg_attr(test, assert_instr(fcmle))]
2404#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2405pub fn vclezq_f32(a: float32x4_t) -> uint32x4_t {
2406    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2407    unsafe { simd_le(a, transmute(b)) }
2408}
2409#[doc = "Floating-point compare less than or equal to zero"]
2410#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f64)"]
2411#[inline]
2412#[target_feature(enable = "neon")]
2413#[cfg_attr(test, assert_instr(fcmle))]
2414#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2415pub fn vclez_f64(a: float64x1_t) -> uint64x1_t {
2416    let b: f64 = 0.0;
2417    unsafe { simd_le(a, transmute(b)) }
2418}
2419#[doc = "Floating-point compare less than or equal to zero"]
2420#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f64)"]
2421#[inline]
2422#[target_feature(enable = "neon")]
2423#[cfg_attr(test, assert_instr(fcmle))]
2424#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2425pub fn vclezq_f64(a: float64x2_t) -> uint64x2_t {
2426    let b: f64x2 = f64x2::new(0.0, 0.0);
2427    unsafe { simd_le(a, transmute(b)) }
2428}
2429#[doc = "Compare signed less than or equal to zero"]
2430#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s8)"]
2431#[inline]
2432#[target_feature(enable = "neon")]
2433#[cfg_attr(test, assert_instr(cmle))]
2434#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2435pub fn vclez_s8(a: int8x8_t) -> uint8x8_t {
2436    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2437    unsafe { simd_le(a, transmute(b)) }
2438}
2439#[doc = "Compare signed less than or equal to zero"]
2440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s8)"]
2441#[inline]
2442#[target_feature(enable = "neon")]
2443#[cfg_attr(test, assert_instr(cmle))]
2444#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2445pub fn vclezq_s8(a: int8x16_t) -> uint8x16_t {
2446    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2447    unsafe { simd_le(a, transmute(b)) }
2448}
2449#[doc = "Compare signed less than or equal to zero"]
2450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s16)"]
2451#[inline]
2452#[target_feature(enable = "neon")]
2453#[cfg_attr(test, assert_instr(cmle))]
2454#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2455pub fn vclez_s16(a: int16x4_t) -> uint16x4_t {
2456    let b: i16x4 = i16x4::new(0, 0, 0, 0);
2457    unsafe { simd_le(a, transmute(b)) }
2458}
2459#[doc = "Compare signed less than or equal to zero"]
2460#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s16)"]
2461#[inline]
2462#[target_feature(enable = "neon")]
2463#[cfg_attr(test, assert_instr(cmle))]
2464#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2465pub fn vclezq_s16(a: int16x8_t) -> uint16x8_t {
2466    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2467    unsafe { simd_le(a, transmute(b)) }
2468}
2469#[doc = "Compare signed less than or equal to zero"]
2470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s32)"]
2471#[inline]
2472#[target_feature(enable = "neon")]
2473#[cfg_attr(test, assert_instr(cmle))]
2474#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2475pub fn vclez_s32(a: int32x2_t) -> uint32x2_t {
2476    let b: i32x2 = i32x2::new(0, 0);
2477    unsafe { simd_le(a, transmute(b)) }
2478}
2479#[doc = "Compare signed less than or equal to zero"]
2480#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s32)"]
2481#[inline]
2482#[target_feature(enable = "neon")]
2483#[cfg_attr(test, assert_instr(cmle))]
2484#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2485pub fn vclezq_s32(a: int32x4_t) -> uint32x4_t {
2486    let b: i32x4 = i32x4::new(0, 0, 0, 0);
2487    unsafe { simd_le(a, transmute(b)) }
2488}
2489#[doc = "Compare signed less than or equal to zero"]
2490#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s64)"]
2491#[inline]
2492#[target_feature(enable = "neon")]
2493#[cfg_attr(test, assert_instr(cmle))]
2494#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2495pub fn vclez_s64(a: int64x1_t) -> uint64x1_t {
2496    let b: i64x1 = i64x1::new(0);
2497    unsafe { simd_le(a, transmute(b)) }
2498}
2499#[doc = "Compare signed less than or equal to zero"]
2500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s64)"]
2501#[inline]
2502#[target_feature(enable = "neon")]
2503#[cfg_attr(test, assert_instr(cmle))]
2504#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2505pub fn vclezq_s64(a: int64x2_t) -> uint64x2_t {
2506    let b: i64x2 = i64x2::new(0, 0);
2507    unsafe { simd_le(a, transmute(b)) }
2508}
2509#[doc = "Floating-point compare less than or equal to zero"]
2510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_f64)"]
2511#[inline]
2512#[target_feature(enable = "neon")]
2513#[cfg_attr(test, assert_instr(fcmp))]
2514#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2515pub fn vclezd_f64(a: f64) -> u64 {
2516    unsafe { simd_extract!(vclez_f64(vdup_n_f64(a)), 0) }
2517}
2518#[doc = "Floating-point compare less than or equal to zero"]
2519#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezs_f32)"]
2520#[inline]
2521#[target_feature(enable = "neon")]
2522#[cfg_attr(test, assert_instr(fcmp))]
2523#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2524pub fn vclezs_f32(a: f32) -> u32 {
2525    unsafe { simd_extract!(vclez_f32(vdup_n_f32(a)), 0) }
2526}
2527#[doc = "Compare less than or equal to zero"]
2528#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_s64)"]
2529#[inline]
2530#[target_feature(enable = "neon")]
2531#[cfg_attr(test, assert_instr(cmp))]
2532#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2533pub fn vclezd_s64(a: i64) -> u64 {
2534    unsafe { transmute(vclez_s64(transmute(a))) }
2535}
2536#[doc = "Floating-point compare less than or equal to zero"]
2537#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezh_f16)"]
2538#[inline]
2539#[cfg_attr(test, assert_instr(fcmp))]
2540#[target_feature(enable = "neon,fp16")]
2541#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2542pub fn vclezh_f16(a: f16) -> u16 {
2543    unsafe { simd_extract!(vclez_f16(vdup_n_f16(a)), 0) }
2544}
2545#[doc = "Floating-point compare less than"]
2546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f64)"]
2547#[inline]
2548#[target_feature(enable = "neon")]
2549#[cfg_attr(test, assert_instr(fcmgt))]
2550#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2551pub fn vclt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2552    unsafe { simd_lt(a, b) }
2553}
2554#[doc = "Floating-point compare less than"]
2555#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f64)"]
2556#[inline]
2557#[target_feature(enable = "neon")]
2558#[cfg_attr(test, assert_instr(fcmgt))]
2559#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2560pub fn vcltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2561    unsafe { simd_lt(a, b) }
2562}
2563#[doc = "Compare signed less than"]
2564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s64)"]
2565#[inline]
2566#[target_feature(enable = "neon")]
2567#[cfg_attr(test, assert_instr(cmgt))]
2568#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2569pub fn vclt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2570    unsafe { simd_lt(a, b) }
2571}
2572#[doc = "Compare signed less than"]
2573#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s64)"]
2574#[inline]
2575#[target_feature(enable = "neon")]
2576#[cfg_attr(test, assert_instr(cmgt))]
2577#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2578pub fn vcltq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2579    unsafe { simd_lt(a, b) }
2580}
2581#[doc = "Compare unsigned less than"]
2582#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u64)"]
2583#[inline]
2584#[target_feature(enable = "neon")]
2585#[cfg_attr(test, assert_instr(cmhi))]
2586#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2587pub fn vclt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2588    unsafe { simd_lt(a, b) }
2589}
2590#[doc = "Compare unsigned less than"]
2591#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u64)"]
2592#[inline]
2593#[target_feature(enable = "neon")]
2594#[cfg_attr(test, assert_instr(cmhi))]
2595#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2596pub fn vcltq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2597    unsafe { simd_lt(a, b) }
2598}
2599#[doc = "Compare less than"]
2600#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_u64)"]
2601#[inline]
2602#[target_feature(enable = "neon")]
2603#[cfg_attr(test, assert_instr(cmp))]
2604#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2605pub fn vcltd_u64(a: u64, b: u64) -> u64 {
2606    unsafe { transmute(vclt_u64(transmute(a), transmute(b))) }
2607}
2608#[doc = "Compare less than"]
2609#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_s64)"]
2610#[inline]
2611#[target_feature(enable = "neon")]
2612#[cfg_attr(test, assert_instr(cmp))]
2613#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2614pub fn vcltd_s64(a: i64, b: i64) -> u64 {
2615    unsafe { transmute(vclt_s64(transmute(a), transmute(b))) }
2616}
2617#[doc = "Floating-point compare less than"]
2618#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclth_f16)"]
2619#[inline]
2620#[cfg_attr(test, assert_instr(fcmp))]
2621#[target_feature(enable = "neon,fp16")]
2622#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2623pub fn vclth_f16(a: f16, b: f16) -> u16 {
2624    unsafe { simd_extract!(vclt_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2625}
2626#[doc = "Floating-point compare less than"]
2627#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclts_f32)"]
2628#[inline]
2629#[target_feature(enable = "neon")]
2630#[cfg_attr(test, assert_instr(fcmp))]
2631#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2632pub fn vclts_f32(a: f32, b: f32) -> u32 {
2633    unsafe { simd_extract!(vclt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2634}
2635#[doc = "Floating-point compare less than"]
2636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_f64)"]
2637#[inline]
2638#[target_feature(enable = "neon")]
2639#[cfg_attr(test, assert_instr(fcmp))]
2640#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2641pub fn vcltd_f64(a: f64, b: f64) -> u64 {
2642    unsafe { simd_extract!(vclt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2643}
2644#[doc = "Floating-point compare less than zero"]
2645#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f32)"]
2646#[inline]
2647#[target_feature(enable = "neon")]
2648#[cfg_attr(test, assert_instr(fcmlt))]
2649#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2650pub fn vcltz_f32(a: float32x2_t) -> uint32x2_t {
2651    let b: f32x2 = f32x2::new(0.0, 0.0);
2652    unsafe { simd_lt(a, transmute(b)) }
2653}
2654#[doc = "Floating-point compare less than zero"]
2655#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f32)"]
2656#[inline]
2657#[target_feature(enable = "neon")]
2658#[cfg_attr(test, assert_instr(fcmlt))]
2659#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2660pub fn vcltzq_f32(a: float32x4_t) -> uint32x4_t {
2661    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2662    unsafe { simd_lt(a, transmute(b)) }
2663}
2664#[doc = "Floating-point compare less than zero"]
2665#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f64)"]
2666#[inline]
2667#[target_feature(enable = "neon")]
2668#[cfg_attr(test, assert_instr(fcmlt))]
2669#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2670pub fn vcltz_f64(a: float64x1_t) -> uint64x1_t {
2671    let b: f64 = 0.0;
2672    unsafe { simd_lt(a, transmute(b)) }
2673}
2674#[doc = "Floating-point compare less than zero"]
2675#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f64)"]
2676#[inline]
2677#[target_feature(enable = "neon")]
2678#[cfg_attr(test, assert_instr(fcmlt))]
2679#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2680pub fn vcltzq_f64(a: float64x2_t) -> uint64x2_t {
2681    let b: f64x2 = f64x2::new(0.0, 0.0);
2682    unsafe { simd_lt(a, transmute(b)) }
2683}
2684#[doc = "Compare signed less than zero"]
2685#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s8)"]
2686#[inline]
2687#[target_feature(enable = "neon")]
2688#[cfg_attr(test, assert_instr(cmlt))]
2689#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2690pub fn vcltz_s8(a: int8x8_t) -> uint8x8_t {
2691    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2692    unsafe { simd_lt(a, transmute(b)) }
2693}
2694#[doc = "Compare signed less than zero"]
2695#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s8)"]
2696#[inline]
2697#[target_feature(enable = "neon")]
2698#[cfg_attr(test, assert_instr(cmlt))]
2699#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2700pub fn vcltzq_s8(a: int8x16_t) -> uint8x16_t {
2701    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2702    unsafe { simd_lt(a, transmute(b)) }
2703}
2704#[doc = "Compare signed less than zero"]
2705#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s16)"]
2706#[inline]
2707#[target_feature(enable = "neon")]
2708#[cfg_attr(test, assert_instr(cmlt))]
2709#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2710pub fn vcltz_s16(a: int16x4_t) -> uint16x4_t {
2711    let b: i16x4 = i16x4::new(0, 0, 0, 0);
2712    unsafe { simd_lt(a, transmute(b)) }
2713}
2714#[doc = "Compare signed less than zero"]
2715#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s16)"]
2716#[inline]
2717#[target_feature(enable = "neon")]
2718#[cfg_attr(test, assert_instr(cmlt))]
2719#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2720pub fn vcltzq_s16(a: int16x8_t) -> uint16x8_t {
2721    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2722    unsafe { simd_lt(a, transmute(b)) }
2723}
2724#[doc = "Compare signed less than zero"]
2725#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s32)"]
2726#[inline]
2727#[target_feature(enable = "neon")]
2728#[cfg_attr(test, assert_instr(cmlt))]
2729#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2730pub fn vcltz_s32(a: int32x2_t) -> uint32x2_t {
2731    let b: i32x2 = i32x2::new(0, 0);
2732    unsafe { simd_lt(a, transmute(b)) }
2733}
2734#[doc = "Compare signed less than zero"]
2735#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s32)"]
2736#[inline]
2737#[target_feature(enable = "neon")]
2738#[cfg_attr(test, assert_instr(cmlt))]
2739#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2740pub fn vcltzq_s32(a: int32x4_t) -> uint32x4_t {
2741    let b: i32x4 = i32x4::new(0, 0, 0, 0);
2742    unsafe { simd_lt(a, transmute(b)) }
2743}
2744#[doc = "Compare signed less than zero"]
2745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s64)"]
2746#[inline]
2747#[target_feature(enable = "neon")]
2748#[cfg_attr(test, assert_instr(cmlt))]
2749#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2750pub fn vcltz_s64(a: int64x1_t) -> uint64x1_t {
2751    let b: i64x1 = i64x1::new(0);
2752    unsafe { simd_lt(a, transmute(b)) }
2753}
2754#[doc = "Compare signed less than zero"]
2755#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s64)"]
2756#[inline]
2757#[target_feature(enable = "neon")]
2758#[cfg_attr(test, assert_instr(cmlt))]
2759#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2760pub fn vcltzq_s64(a: int64x2_t) -> uint64x2_t {
2761    let b: i64x2 = i64x2::new(0, 0);
2762    unsafe { simd_lt(a, transmute(b)) }
2763}
2764#[doc = "Floating-point compare less than zero"]
2765#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_f64)"]
2766#[inline]
2767#[target_feature(enable = "neon")]
2768#[cfg_attr(test, assert_instr(fcmp))]
2769#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2770pub fn vcltzd_f64(a: f64) -> u64 {
2771    unsafe { simd_extract!(vcltz_f64(vdup_n_f64(a)), 0) }
2772}
2773#[doc = "Floating-point compare less than zero"]
2774#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzs_f32)"]
2775#[inline]
2776#[target_feature(enable = "neon")]
2777#[cfg_attr(test, assert_instr(fcmp))]
2778#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2779pub fn vcltzs_f32(a: f32) -> u32 {
2780    unsafe { simd_extract!(vcltz_f32(vdup_n_f32(a)), 0) }
2781}
2782#[doc = "Compare less than zero"]
2783#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_s64)"]
2784#[inline]
2785#[target_feature(enable = "neon")]
2786#[cfg_attr(test, assert_instr(asr))]
2787#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2788pub fn vcltzd_s64(a: i64) -> u64 {
2789    unsafe { transmute(vcltz_s64(transmute(a))) }
2790}
2791#[doc = "Floating-point compare less than zero"]
2792#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzh_f16)"]
2793#[inline]
2794#[cfg_attr(test, assert_instr(fcmp))]
2795#[target_feature(enable = "neon,fp16")]
2796#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2797pub fn vcltzh_f16(a: f16) -> u16 {
2798    unsafe { simd_extract!(vcltz_f16(vdup_n_f16(a)), 0) }
2799}
2800#[doc = "Floating-point complex multiply accumulate"]
2801#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f16)"]
2802#[inline]
2803#[target_feature(enable = "neon,fcma")]
2804#[target_feature(enable = "neon,fp16")]
2805#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2806#[cfg_attr(test, assert_instr(fcmla))]
2807pub fn vcmla_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
2808    unsafe extern "unadjusted" {
2809        #[cfg_attr(
2810            any(target_arch = "aarch64", target_arch = "arm64ec"),
2811            link_name = "llvm.aarch64.neon.vcmla.rot0.v4f16"
2812        )]
2813        fn _vcmla_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
2814    }
2815    unsafe { _vcmla_f16(a, b, c) }
2816}
2817#[doc = "Floating-point complex multiply accumulate"]
2818#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f16)"]
2819#[inline]
2820#[target_feature(enable = "neon,fcma")]
2821#[target_feature(enable = "neon,fp16")]
2822#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2823#[cfg_attr(test, assert_instr(fcmla))]
2824pub fn vcmlaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
2825    unsafe extern "unadjusted" {
2826        #[cfg_attr(
2827            any(target_arch = "aarch64", target_arch = "arm64ec"),
2828            link_name = "llvm.aarch64.neon.vcmla.rot0.v8f16"
2829        )]
2830        fn _vcmlaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
2831    }
2832    unsafe { _vcmlaq_f16(a, b, c) }
2833}
2834#[doc = "Floating-point complex multiply accumulate"]
2835#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f32)"]
2836#[inline]
2837#[target_feature(enable = "neon,fcma")]
2838#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2839#[cfg_attr(test, assert_instr(fcmla))]
2840pub fn vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
2841    unsafe extern "unadjusted" {
2842        #[cfg_attr(
2843            any(target_arch = "aarch64", target_arch = "arm64ec"),
2844            link_name = "llvm.aarch64.neon.vcmla.rot0.v2f32"
2845        )]
2846        fn _vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
2847    }
2848    unsafe { _vcmla_f32(a, b, c) }
2849}
2850#[doc = "Floating-point complex multiply accumulate"]
2851#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f32)"]
2852#[inline]
2853#[target_feature(enable = "neon,fcma")]
2854#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2855#[cfg_attr(test, assert_instr(fcmla))]
2856pub fn vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
2857    unsafe extern "unadjusted" {
2858        #[cfg_attr(
2859            any(target_arch = "aarch64", target_arch = "arm64ec"),
2860            link_name = "llvm.aarch64.neon.vcmla.rot0.v4f32"
2861        )]
2862        fn _vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
2863    }
2864    unsafe { _vcmlaq_f32(a, b, c) }
2865}
2866#[doc = "Floating-point complex multiply accumulate"]
2867#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f64)"]
2868#[inline]
2869#[target_feature(enable = "neon,fcma")]
2870#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2871#[cfg_attr(test, assert_instr(fcmla))]
2872pub fn vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
2873    unsafe extern "unadjusted" {
2874        #[cfg_attr(
2875            any(target_arch = "aarch64", target_arch = "arm64ec"),
2876            link_name = "llvm.aarch64.neon.vcmla.rot0.v2f64"
2877        )]
2878        fn _vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
2879    }
2880    unsafe { _vcmlaq_f64(a, b, c) }
2881}
2882#[doc = "Floating-point complex multiply accumulate"]
2883#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f16)"]
2884#[inline]
2885#[target_feature(enable = "neon,fcma")]
2886#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
2887#[rustc_legacy_const_generics(3)]
2888#[target_feature(enable = "neon,fp16")]
2889#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2890pub fn vcmla_lane_f16<const LANE: i32>(
2891    a: float16x4_t,
2892    b: float16x4_t,
2893    c: float16x4_t,
2894) -> float16x4_t {
2895    static_assert_uimm_bits!(LANE, 1);
2896    unsafe {
2897        let c: float16x4_t = simd_shuffle!(
2898            c,
2899            c,
2900            [
2901                2 * LANE as u32,
2902                2 * LANE as u32 + 1,
2903                2 * LANE as u32,
2904                2 * LANE as u32 + 1
2905            ]
2906        );
2907        vcmla_f16(a, b, c)
2908    }
2909}
2910#[doc = "Floating-point complex multiply accumulate"]
2911#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f16)"]
2912#[inline]
2913#[target_feature(enable = "neon,fcma")]
2914#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
2915#[rustc_legacy_const_generics(3)]
2916#[target_feature(enable = "neon,fp16")]
2917#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2918pub fn vcmlaq_lane_f16<const LANE: i32>(
2919    a: float16x8_t,
2920    b: float16x8_t,
2921    c: float16x4_t,
2922) -> float16x8_t {
2923    static_assert_uimm_bits!(LANE, 1);
2924    unsafe {
2925        let c: float16x8_t = simd_shuffle!(
2926            c,
2927            c,
2928            [
2929                2 * LANE as u32,
2930                2 * LANE as u32 + 1,
2931                2 * LANE as u32,
2932                2 * LANE as u32 + 1,
2933                2 * LANE as u32,
2934                2 * LANE as u32 + 1,
2935                2 * LANE as u32,
2936                2 * LANE as u32 + 1
2937            ]
2938        );
2939        vcmlaq_f16(a, b, c)
2940    }
2941}
2942#[doc = "Floating-point complex multiply accumulate"]
2943#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f32)"]
2944#[inline]
2945#[target_feature(enable = "neon,fcma")]
2946#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
2947#[rustc_legacy_const_generics(3)]
2948#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2949pub fn vcmla_lane_f32<const LANE: i32>(
2950    a: float32x2_t,
2951    b: float32x2_t,
2952    c: float32x2_t,
2953) -> float32x2_t {
2954    static_assert!(LANE == 0);
2955    unsafe {
2956        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
2957        vcmla_f32(a, b, c)
2958    }
2959}
2960#[doc = "Floating-point complex multiply accumulate"]
2961#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f32)"]
2962#[inline]
2963#[target_feature(enable = "neon,fcma")]
2964#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
2965#[rustc_legacy_const_generics(3)]
2966#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2967pub fn vcmlaq_lane_f32<const LANE: i32>(
2968    a: float32x4_t,
2969    b: float32x4_t,
2970    c: float32x2_t,
2971) -> float32x4_t {
2972    static_assert!(LANE == 0);
2973    unsafe {
2974        let c: float32x4_t = simd_shuffle!(
2975            c,
2976            c,
2977            [
2978                2 * LANE as u32,
2979                2 * LANE as u32 + 1,
2980                2 * LANE as u32,
2981                2 * LANE as u32 + 1
2982            ]
2983        );
2984        vcmlaq_f32(a, b, c)
2985    }
2986}
2987#[doc = "Floating-point complex multiply accumulate"]
2988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f16)"]
2989#[inline]
2990#[target_feature(enable = "neon,fcma")]
2991#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
2992#[rustc_legacy_const_generics(3)]
2993#[target_feature(enable = "neon,fp16")]
2994#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2995pub fn vcmla_laneq_f16<const LANE: i32>(
2996    a: float16x4_t,
2997    b: float16x4_t,
2998    c: float16x8_t,
2999) -> float16x4_t {
3000    static_assert_uimm_bits!(LANE, 2);
3001    unsafe {
3002        let c: float16x4_t = simd_shuffle!(
3003            c,
3004            c,
3005            [
3006                2 * LANE as u32,
3007                2 * LANE as u32 + 1,
3008                2 * LANE as u32,
3009                2 * LANE as u32 + 1
3010            ]
3011        );
3012        vcmla_f16(a, b, c)
3013    }
3014}
3015#[doc = "Floating-point complex multiply accumulate"]
3016#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f16)"]
3017#[inline]
3018#[target_feature(enable = "neon,fcma")]
3019#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3020#[rustc_legacy_const_generics(3)]
3021#[target_feature(enable = "neon,fp16")]
3022#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3023pub fn vcmlaq_laneq_f16<const LANE: i32>(
3024    a: float16x8_t,
3025    b: float16x8_t,
3026    c: float16x8_t,
3027) -> float16x8_t {
3028    static_assert_uimm_bits!(LANE, 2);
3029    unsafe {
3030        let c: float16x8_t = simd_shuffle!(
3031            c,
3032            c,
3033            [
3034                2 * LANE as u32,
3035                2 * LANE as u32 + 1,
3036                2 * LANE as u32,
3037                2 * LANE as u32 + 1,
3038                2 * LANE as u32,
3039                2 * LANE as u32 + 1,
3040                2 * LANE as u32,
3041                2 * LANE as u32 + 1
3042            ]
3043        );
3044        vcmlaq_f16(a, b, c)
3045    }
3046}
3047#[doc = "Floating-point complex multiply accumulate"]
3048#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f32)"]
3049#[inline]
3050#[target_feature(enable = "neon,fcma")]
3051#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3052#[rustc_legacy_const_generics(3)]
3053#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3054pub fn vcmla_laneq_f32<const LANE: i32>(
3055    a: float32x2_t,
3056    b: float32x2_t,
3057    c: float32x4_t,
3058) -> float32x2_t {
3059    static_assert_uimm_bits!(LANE, 1);
3060    unsafe {
3061        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3062        vcmla_f32(a, b, c)
3063    }
3064}
3065#[doc = "Floating-point complex multiply accumulate"]
3066#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f32)"]
3067#[inline]
3068#[target_feature(enable = "neon,fcma")]
3069#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3070#[rustc_legacy_const_generics(3)]
3071#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3072pub fn vcmlaq_laneq_f32<const LANE: i32>(
3073    a: float32x4_t,
3074    b: float32x4_t,
3075    c: float32x4_t,
3076) -> float32x4_t {
3077    static_assert_uimm_bits!(LANE, 1);
3078    unsafe {
3079        let c: float32x4_t = simd_shuffle!(
3080            c,
3081            c,
3082            [
3083                2 * LANE as u32,
3084                2 * LANE as u32 + 1,
3085                2 * LANE as u32,
3086                2 * LANE as u32 + 1
3087            ]
3088        );
3089        vcmlaq_f32(a, b, c)
3090    }
3091}
3092#[doc = "Floating-point complex multiply accumulate"]
3093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f16)"]
3094#[inline]
3095#[target_feature(enable = "neon,fcma")]
3096#[target_feature(enable = "neon,fp16")]
3097#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3098#[cfg_attr(test, assert_instr(fcmla))]
3099pub fn vcmla_rot180_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3100    unsafe extern "unadjusted" {
3101        #[cfg_attr(
3102            any(target_arch = "aarch64", target_arch = "arm64ec"),
3103            link_name = "llvm.aarch64.neon.vcmla.rot180.v4f16"
3104        )]
3105        fn _vcmla_rot180_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3106    }
3107    unsafe { _vcmla_rot180_f16(a, b, c) }
3108}
3109#[doc = "Floating-point complex multiply accumulate"]
3110#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f16)"]
3111#[inline]
3112#[target_feature(enable = "neon,fcma")]
3113#[target_feature(enable = "neon,fp16")]
3114#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3115#[cfg_attr(test, assert_instr(fcmla))]
3116pub fn vcmlaq_rot180_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3117    unsafe extern "unadjusted" {
3118        #[cfg_attr(
3119            any(target_arch = "aarch64", target_arch = "arm64ec"),
3120            link_name = "llvm.aarch64.neon.vcmla.rot180.v8f16"
3121        )]
3122        fn _vcmlaq_rot180_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3123    }
3124    unsafe { _vcmlaq_rot180_f16(a, b, c) }
3125}
3126#[doc = "Floating-point complex multiply accumulate"]
3127#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f32)"]
3128#[inline]
3129#[target_feature(enable = "neon,fcma")]
3130#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3131#[cfg_attr(test, assert_instr(fcmla))]
3132pub fn vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3133    unsafe extern "unadjusted" {
3134        #[cfg_attr(
3135            any(target_arch = "aarch64", target_arch = "arm64ec"),
3136            link_name = "llvm.aarch64.neon.vcmla.rot180.v2f32"
3137        )]
3138        fn _vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3139    }
3140    unsafe { _vcmla_rot180_f32(a, b, c) }
3141}
3142#[doc = "Floating-point complex multiply accumulate"]
3143#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f32)"]
3144#[inline]
3145#[target_feature(enable = "neon,fcma")]
3146#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3147#[cfg_attr(test, assert_instr(fcmla))]
3148pub fn vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3149    unsafe extern "unadjusted" {
3150        #[cfg_attr(
3151            any(target_arch = "aarch64", target_arch = "arm64ec"),
3152            link_name = "llvm.aarch64.neon.vcmla.rot180.v4f32"
3153        )]
3154        fn _vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3155    }
3156    unsafe { _vcmlaq_rot180_f32(a, b, c) }
3157}
3158#[doc = "Floating-point complex multiply accumulate"]
3159#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f64)"]
3160#[inline]
3161#[target_feature(enable = "neon,fcma")]
3162#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3163#[cfg_attr(test, assert_instr(fcmla))]
3164pub fn vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3165    unsafe extern "unadjusted" {
3166        #[cfg_attr(
3167            any(target_arch = "aarch64", target_arch = "arm64ec"),
3168            link_name = "llvm.aarch64.neon.vcmla.rot180.v2f64"
3169        )]
3170        fn _vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3171    }
3172    unsafe { _vcmlaq_rot180_f64(a, b, c) }
3173}
3174#[doc = "Floating-point complex multiply accumulate"]
3175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f16)"]
3176#[inline]
3177#[target_feature(enable = "neon,fcma")]
3178#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3179#[rustc_legacy_const_generics(3)]
3180#[target_feature(enable = "neon,fp16")]
3181#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3182pub fn vcmla_rot180_lane_f16<const LANE: i32>(
3183    a: float16x4_t,
3184    b: float16x4_t,
3185    c: float16x4_t,
3186) -> float16x4_t {
3187    static_assert_uimm_bits!(LANE, 1);
3188    unsafe {
3189        let c: float16x4_t = simd_shuffle!(
3190            c,
3191            c,
3192            [
3193                2 * LANE as u32,
3194                2 * LANE as u32 + 1,
3195                2 * LANE as u32,
3196                2 * LANE as u32 + 1
3197            ]
3198        );
3199        vcmla_rot180_f16(a, b, c)
3200    }
3201}
3202#[doc = "Floating-point complex multiply accumulate"]
3203#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f16)"]
3204#[inline]
3205#[target_feature(enable = "neon,fcma")]
3206#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3207#[rustc_legacy_const_generics(3)]
3208#[target_feature(enable = "neon,fp16")]
3209#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3210pub fn vcmlaq_rot180_lane_f16<const LANE: i32>(
3211    a: float16x8_t,
3212    b: float16x8_t,
3213    c: float16x4_t,
3214) -> float16x8_t {
3215    static_assert_uimm_bits!(LANE, 1);
3216    unsafe {
3217        let c: float16x8_t = simd_shuffle!(
3218            c,
3219            c,
3220            [
3221                2 * LANE as u32,
3222                2 * LANE as u32 + 1,
3223                2 * LANE as u32,
3224                2 * LANE as u32 + 1,
3225                2 * LANE as u32,
3226                2 * LANE as u32 + 1,
3227                2 * LANE as u32,
3228                2 * LANE as u32 + 1
3229            ]
3230        );
3231        vcmlaq_rot180_f16(a, b, c)
3232    }
3233}
3234#[doc = "Floating-point complex multiply accumulate"]
3235#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f32)"]
3236#[inline]
3237#[target_feature(enable = "neon,fcma")]
3238#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3239#[rustc_legacy_const_generics(3)]
3240#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3241pub fn vcmla_rot180_lane_f32<const LANE: i32>(
3242    a: float32x2_t,
3243    b: float32x2_t,
3244    c: float32x2_t,
3245) -> float32x2_t {
3246    static_assert!(LANE == 0);
3247    unsafe {
3248        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3249        vcmla_rot180_f32(a, b, c)
3250    }
3251}
3252#[doc = "Floating-point complex multiply accumulate"]
3253#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f32)"]
3254#[inline]
3255#[target_feature(enable = "neon,fcma")]
3256#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3257#[rustc_legacy_const_generics(3)]
3258#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3259pub fn vcmlaq_rot180_lane_f32<const LANE: i32>(
3260    a: float32x4_t,
3261    b: float32x4_t,
3262    c: float32x2_t,
3263) -> float32x4_t {
3264    static_assert!(LANE == 0);
3265    unsafe {
3266        let c: float32x4_t = simd_shuffle!(
3267            c,
3268            c,
3269            [
3270                2 * LANE as u32,
3271                2 * LANE as u32 + 1,
3272                2 * LANE as u32,
3273                2 * LANE as u32 + 1
3274            ]
3275        );
3276        vcmlaq_rot180_f32(a, b, c)
3277    }
3278}
3279#[doc = "Floating-point complex multiply accumulate"]
3280#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f16)"]
3281#[inline]
3282#[target_feature(enable = "neon,fcma")]
3283#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3284#[rustc_legacy_const_generics(3)]
3285#[target_feature(enable = "neon,fp16")]
3286#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3287pub fn vcmla_rot180_laneq_f16<const LANE: i32>(
3288    a: float16x4_t,
3289    b: float16x4_t,
3290    c: float16x8_t,
3291) -> float16x4_t {
3292    static_assert_uimm_bits!(LANE, 2);
3293    unsafe {
3294        let c: float16x4_t = simd_shuffle!(
3295            c,
3296            c,
3297            [
3298                2 * LANE as u32,
3299                2 * LANE as u32 + 1,
3300                2 * LANE as u32,
3301                2 * LANE as u32 + 1
3302            ]
3303        );
3304        vcmla_rot180_f16(a, b, c)
3305    }
3306}
3307#[doc = "Floating-point complex multiply accumulate"]
3308#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f16)"]
3309#[inline]
3310#[target_feature(enable = "neon,fcma")]
3311#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3312#[rustc_legacy_const_generics(3)]
3313#[target_feature(enable = "neon,fp16")]
3314#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3315pub fn vcmlaq_rot180_laneq_f16<const LANE: i32>(
3316    a: float16x8_t,
3317    b: float16x8_t,
3318    c: float16x8_t,
3319) -> float16x8_t {
3320    static_assert_uimm_bits!(LANE, 2);
3321    unsafe {
3322        let c: float16x8_t = simd_shuffle!(
3323            c,
3324            c,
3325            [
3326                2 * LANE as u32,
3327                2 * LANE as u32 + 1,
3328                2 * LANE as u32,
3329                2 * LANE as u32 + 1,
3330                2 * LANE as u32,
3331                2 * LANE as u32 + 1,
3332                2 * LANE as u32,
3333                2 * LANE as u32 + 1
3334            ]
3335        );
3336        vcmlaq_rot180_f16(a, b, c)
3337    }
3338}
3339#[doc = "Floating-point complex multiply accumulate"]
3340#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f32)"]
3341#[inline]
3342#[target_feature(enable = "neon,fcma")]
3343#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3344#[rustc_legacy_const_generics(3)]
3345#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3346pub fn vcmla_rot180_laneq_f32<const LANE: i32>(
3347    a: float32x2_t,
3348    b: float32x2_t,
3349    c: float32x4_t,
3350) -> float32x2_t {
3351    static_assert_uimm_bits!(LANE, 1);
3352    unsafe {
3353        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3354        vcmla_rot180_f32(a, b, c)
3355    }
3356}
3357#[doc = "Floating-point complex multiply accumulate"]
3358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f32)"]
3359#[inline]
3360#[target_feature(enable = "neon,fcma")]
3361#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3362#[rustc_legacy_const_generics(3)]
3363#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3364pub fn vcmlaq_rot180_laneq_f32<const LANE: i32>(
3365    a: float32x4_t,
3366    b: float32x4_t,
3367    c: float32x4_t,
3368) -> float32x4_t {
3369    static_assert_uimm_bits!(LANE, 1);
3370    unsafe {
3371        let c: float32x4_t = simd_shuffle!(
3372            c,
3373            c,
3374            [
3375                2 * LANE as u32,
3376                2 * LANE as u32 + 1,
3377                2 * LANE as u32,
3378                2 * LANE as u32 + 1
3379            ]
3380        );
3381        vcmlaq_rot180_f32(a, b, c)
3382    }
3383}
3384#[doc = "Floating-point complex multiply accumulate"]
3385#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f16)"]
3386#[inline]
3387#[target_feature(enable = "neon,fcma")]
3388#[target_feature(enable = "neon,fp16")]
3389#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3390#[cfg_attr(test, assert_instr(fcmla))]
3391pub fn vcmla_rot270_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3392    unsafe extern "unadjusted" {
3393        #[cfg_attr(
3394            any(target_arch = "aarch64", target_arch = "arm64ec"),
3395            link_name = "llvm.aarch64.neon.vcmla.rot270.v4f16"
3396        )]
3397        fn _vcmla_rot270_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3398    }
3399    unsafe { _vcmla_rot270_f16(a, b, c) }
3400}
3401#[doc = "Floating-point complex multiply accumulate"]
3402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f16)"]
3403#[inline]
3404#[target_feature(enable = "neon,fcma")]
3405#[target_feature(enable = "neon,fp16")]
3406#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3407#[cfg_attr(test, assert_instr(fcmla))]
3408pub fn vcmlaq_rot270_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3409    unsafe extern "unadjusted" {
3410        #[cfg_attr(
3411            any(target_arch = "aarch64", target_arch = "arm64ec"),
3412            link_name = "llvm.aarch64.neon.vcmla.rot270.v8f16"
3413        )]
3414        fn _vcmlaq_rot270_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3415    }
3416    unsafe { _vcmlaq_rot270_f16(a, b, c) }
3417}
3418#[doc = "Floating-point complex multiply accumulate"]
3419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f32)"]
3420#[inline]
3421#[target_feature(enable = "neon,fcma")]
3422#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3423#[cfg_attr(test, assert_instr(fcmla))]
3424pub fn vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3425    unsafe extern "unadjusted" {
3426        #[cfg_attr(
3427            any(target_arch = "aarch64", target_arch = "arm64ec"),
3428            link_name = "llvm.aarch64.neon.vcmla.rot270.v2f32"
3429        )]
3430        fn _vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3431    }
3432    unsafe { _vcmla_rot270_f32(a, b, c) }
3433}
3434#[doc = "Floating-point complex multiply accumulate"]
3435#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f32)"]
3436#[inline]
3437#[target_feature(enable = "neon,fcma")]
3438#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3439#[cfg_attr(test, assert_instr(fcmla))]
3440pub fn vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3441    unsafe extern "unadjusted" {
3442        #[cfg_attr(
3443            any(target_arch = "aarch64", target_arch = "arm64ec"),
3444            link_name = "llvm.aarch64.neon.vcmla.rot270.v4f32"
3445        )]
3446        fn _vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3447    }
3448    unsafe { _vcmlaq_rot270_f32(a, b, c) }
3449}
3450#[doc = "Floating-point complex multiply accumulate"]
3451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f64)"]
3452#[inline]
3453#[target_feature(enable = "neon,fcma")]
3454#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3455#[cfg_attr(test, assert_instr(fcmla))]
3456pub fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3457    unsafe extern "unadjusted" {
3458        #[cfg_attr(
3459            any(target_arch = "aarch64", target_arch = "arm64ec"),
3460            link_name = "llvm.aarch64.neon.vcmla.rot270.v2f64"
3461        )]
3462        fn _vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3463    }
3464    unsafe { _vcmlaq_rot270_f64(a, b, c) }
3465}
3466#[doc = "Floating-point complex multiply accumulate"]
3467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f16)"]
3468#[inline]
3469#[target_feature(enable = "neon,fcma")]
3470#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3471#[rustc_legacy_const_generics(3)]
3472#[target_feature(enable = "neon,fp16")]
3473#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3474pub fn vcmla_rot270_lane_f16<const LANE: i32>(
3475    a: float16x4_t,
3476    b: float16x4_t,
3477    c: float16x4_t,
3478) -> float16x4_t {
3479    static_assert_uimm_bits!(LANE, 1);
3480    unsafe {
3481        let c: float16x4_t = simd_shuffle!(
3482            c,
3483            c,
3484            [
3485                2 * LANE as u32,
3486                2 * LANE as u32 + 1,
3487                2 * LANE as u32,
3488                2 * LANE as u32 + 1
3489            ]
3490        );
3491        vcmla_rot270_f16(a, b, c)
3492    }
3493}
3494#[doc = "Floating-point complex multiply accumulate"]
3495#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f16)"]
3496#[inline]
3497#[target_feature(enable = "neon,fcma")]
3498#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3499#[rustc_legacy_const_generics(3)]
3500#[target_feature(enable = "neon,fp16")]
3501#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3502pub fn vcmlaq_rot270_lane_f16<const LANE: i32>(
3503    a: float16x8_t,
3504    b: float16x8_t,
3505    c: float16x4_t,
3506) -> float16x8_t {
3507    static_assert_uimm_bits!(LANE, 1);
3508    unsafe {
3509        let c: float16x8_t = simd_shuffle!(
3510            c,
3511            c,
3512            [
3513                2 * LANE as u32,
3514                2 * LANE as u32 + 1,
3515                2 * LANE as u32,
3516                2 * LANE as u32 + 1,
3517                2 * LANE as u32,
3518                2 * LANE as u32 + 1,
3519                2 * LANE as u32,
3520                2 * LANE as u32 + 1
3521            ]
3522        );
3523        vcmlaq_rot270_f16(a, b, c)
3524    }
3525}
3526#[doc = "Floating-point complex multiply accumulate"]
3527#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f32)"]
3528#[inline]
3529#[target_feature(enable = "neon,fcma")]
3530#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3531#[rustc_legacy_const_generics(3)]
3532#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3533pub fn vcmla_rot270_lane_f32<const LANE: i32>(
3534    a: float32x2_t,
3535    b: float32x2_t,
3536    c: float32x2_t,
3537) -> float32x2_t {
3538    static_assert!(LANE == 0);
3539    unsafe {
3540        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3541        vcmla_rot270_f32(a, b, c)
3542    }
3543}
3544#[doc = "Floating-point complex multiply accumulate"]
3545#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f32)"]
3546#[inline]
3547#[target_feature(enable = "neon,fcma")]
3548#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3549#[rustc_legacy_const_generics(3)]
3550#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3551pub fn vcmlaq_rot270_lane_f32<const LANE: i32>(
3552    a: float32x4_t,
3553    b: float32x4_t,
3554    c: float32x2_t,
3555) -> float32x4_t {
3556    static_assert!(LANE == 0);
3557    unsafe {
3558        let c: float32x4_t = simd_shuffle!(
3559            c,
3560            c,
3561            [
3562                2 * LANE as u32,
3563                2 * LANE as u32 + 1,
3564                2 * LANE as u32,
3565                2 * LANE as u32 + 1
3566            ]
3567        );
3568        vcmlaq_rot270_f32(a, b, c)
3569    }
3570}
3571#[doc = "Floating-point complex multiply accumulate"]
3572#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f16)"]
3573#[inline]
3574#[target_feature(enable = "neon,fcma")]
3575#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3576#[rustc_legacy_const_generics(3)]
3577#[target_feature(enable = "neon,fp16")]
3578#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3579pub fn vcmla_rot270_laneq_f16<const LANE: i32>(
3580    a: float16x4_t,
3581    b: float16x4_t,
3582    c: float16x8_t,
3583) -> float16x4_t {
3584    static_assert_uimm_bits!(LANE, 2);
3585    unsafe {
3586        let c: float16x4_t = simd_shuffle!(
3587            c,
3588            c,
3589            [
3590                2 * LANE as u32,
3591                2 * LANE as u32 + 1,
3592                2 * LANE as u32,
3593                2 * LANE as u32 + 1
3594            ]
3595        );
3596        vcmla_rot270_f16(a, b, c)
3597    }
3598}
3599#[doc = "Floating-point complex multiply accumulate"]
3600#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f16)"]
3601#[inline]
3602#[target_feature(enable = "neon,fcma")]
3603#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3604#[rustc_legacy_const_generics(3)]
3605#[target_feature(enable = "neon,fp16")]
3606#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3607pub fn vcmlaq_rot270_laneq_f16<const LANE: i32>(
3608    a: float16x8_t,
3609    b: float16x8_t,
3610    c: float16x8_t,
3611) -> float16x8_t {
3612    static_assert_uimm_bits!(LANE, 2);
3613    unsafe {
3614        let c: float16x8_t = simd_shuffle!(
3615            c,
3616            c,
3617            [
3618                2 * LANE as u32,
3619                2 * LANE as u32 + 1,
3620                2 * LANE as u32,
3621                2 * LANE as u32 + 1,
3622                2 * LANE as u32,
3623                2 * LANE as u32 + 1,
3624                2 * LANE as u32,
3625                2 * LANE as u32 + 1
3626            ]
3627        );
3628        vcmlaq_rot270_f16(a, b, c)
3629    }
3630}
3631#[doc = "Floating-point complex multiply accumulate"]
3632#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f32)"]
3633#[inline]
3634#[target_feature(enable = "neon,fcma")]
3635#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3636#[rustc_legacy_const_generics(3)]
3637#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3638pub fn vcmla_rot270_laneq_f32<const LANE: i32>(
3639    a: float32x2_t,
3640    b: float32x2_t,
3641    c: float32x4_t,
3642) -> float32x2_t {
3643    static_assert_uimm_bits!(LANE, 1);
3644    unsafe {
3645        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3646        vcmla_rot270_f32(a, b, c)
3647    }
3648}
3649#[doc = "Floating-point complex multiply accumulate"]
3650#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f32)"]
3651#[inline]
3652#[target_feature(enable = "neon,fcma")]
3653#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3654#[rustc_legacy_const_generics(3)]
3655#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3656pub fn vcmlaq_rot270_laneq_f32<const LANE: i32>(
3657    a: float32x4_t,
3658    b: float32x4_t,
3659    c: float32x4_t,
3660) -> float32x4_t {
3661    static_assert_uimm_bits!(LANE, 1);
3662    unsafe {
3663        let c: float32x4_t = simd_shuffle!(
3664            c,
3665            c,
3666            [
3667                2 * LANE as u32,
3668                2 * LANE as u32 + 1,
3669                2 * LANE as u32,
3670                2 * LANE as u32 + 1
3671            ]
3672        );
3673        vcmlaq_rot270_f32(a, b, c)
3674    }
3675}
3676#[doc = "Floating-point complex multiply accumulate"]
3677#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f16)"]
3678#[inline]
3679#[target_feature(enable = "neon,fcma")]
3680#[target_feature(enable = "neon,fp16")]
3681#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3682#[cfg_attr(test, assert_instr(fcmla))]
3683pub fn vcmla_rot90_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3684    unsafe extern "unadjusted" {
3685        #[cfg_attr(
3686            any(target_arch = "aarch64", target_arch = "arm64ec"),
3687            link_name = "llvm.aarch64.neon.vcmla.rot90.v4f16"
3688        )]
3689        fn _vcmla_rot90_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3690    }
3691    unsafe { _vcmla_rot90_f16(a, b, c) }
3692}
3693#[doc = "Floating-point complex multiply accumulate"]
3694#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f16)"]
3695#[inline]
3696#[target_feature(enable = "neon,fcma")]
3697#[target_feature(enable = "neon,fp16")]
3698#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3699#[cfg_attr(test, assert_instr(fcmla))]
3700pub fn vcmlaq_rot90_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3701    unsafe extern "unadjusted" {
3702        #[cfg_attr(
3703            any(target_arch = "aarch64", target_arch = "arm64ec"),
3704            link_name = "llvm.aarch64.neon.vcmla.rot90.v8f16"
3705        )]
3706        fn _vcmlaq_rot90_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3707    }
3708    unsafe { _vcmlaq_rot90_f16(a, b, c) }
3709}
3710#[doc = "Floating-point complex multiply accumulate"]
3711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f32)"]
3712#[inline]
3713#[target_feature(enable = "neon,fcma")]
3714#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3715#[cfg_attr(test, assert_instr(fcmla))]
3716pub fn vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3717    unsafe extern "unadjusted" {
3718        #[cfg_attr(
3719            any(target_arch = "aarch64", target_arch = "arm64ec"),
3720            link_name = "llvm.aarch64.neon.vcmla.rot90.v2f32"
3721        )]
3722        fn _vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3723    }
3724    unsafe { _vcmla_rot90_f32(a, b, c) }
3725}
3726#[doc = "Floating-point complex multiply accumulate"]
3727#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f32)"]
3728#[inline]
3729#[target_feature(enable = "neon,fcma")]
3730#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3731#[cfg_attr(test, assert_instr(fcmla))]
3732pub fn vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3733    unsafe extern "unadjusted" {
3734        #[cfg_attr(
3735            any(target_arch = "aarch64", target_arch = "arm64ec"),
3736            link_name = "llvm.aarch64.neon.vcmla.rot90.v4f32"
3737        )]
3738        fn _vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3739    }
3740    unsafe { _vcmlaq_rot90_f32(a, b, c) }
3741}
3742#[doc = "Floating-point complex multiply accumulate"]
3743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f64)"]
3744#[inline]
3745#[target_feature(enable = "neon,fcma")]
3746#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3747#[cfg_attr(test, assert_instr(fcmla))]
3748pub fn vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3749    unsafe extern "unadjusted" {
3750        #[cfg_attr(
3751            any(target_arch = "aarch64", target_arch = "arm64ec"),
3752            link_name = "llvm.aarch64.neon.vcmla.rot90.v2f64"
3753        )]
3754        fn _vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3755    }
3756    unsafe { _vcmlaq_rot90_f64(a, b, c) }
3757}
3758#[doc = "Floating-point complex multiply accumulate"]
3759#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f16)"]
3760#[inline]
3761#[target_feature(enable = "neon,fcma")]
3762#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3763#[rustc_legacy_const_generics(3)]
3764#[target_feature(enable = "neon,fp16")]
3765#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3766pub fn vcmla_rot90_lane_f16<const LANE: i32>(
3767    a: float16x4_t,
3768    b: float16x4_t,
3769    c: float16x4_t,
3770) -> float16x4_t {
3771    static_assert_uimm_bits!(LANE, 1);
3772    unsafe {
3773        let c: float16x4_t = simd_shuffle!(
3774            c,
3775            c,
3776            [
3777                2 * LANE as u32,
3778                2 * LANE as u32 + 1,
3779                2 * LANE as u32,
3780                2 * LANE as u32 + 1
3781            ]
3782        );
3783        vcmla_rot90_f16(a, b, c)
3784    }
3785}
3786#[doc = "Floating-point complex multiply accumulate"]
3787#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f16)"]
3788#[inline]
3789#[target_feature(enable = "neon,fcma")]
3790#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3791#[rustc_legacy_const_generics(3)]
3792#[target_feature(enable = "neon,fp16")]
3793#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3794pub fn vcmlaq_rot90_lane_f16<const LANE: i32>(
3795    a: float16x8_t,
3796    b: float16x8_t,
3797    c: float16x4_t,
3798) -> float16x8_t {
3799    static_assert_uimm_bits!(LANE, 1);
3800    unsafe {
3801        let c: float16x8_t = simd_shuffle!(
3802            c,
3803            c,
3804            [
3805                2 * LANE as u32,
3806                2 * LANE as u32 + 1,
3807                2 * LANE as u32,
3808                2 * LANE as u32 + 1,
3809                2 * LANE as u32,
3810                2 * LANE as u32 + 1,
3811                2 * LANE as u32,
3812                2 * LANE as u32 + 1
3813            ]
3814        );
3815        vcmlaq_rot90_f16(a, b, c)
3816    }
3817}
3818#[doc = "Floating-point complex multiply accumulate"]
3819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f32)"]
3820#[inline]
3821#[target_feature(enable = "neon,fcma")]
3822#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3823#[rustc_legacy_const_generics(3)]
3824#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3825pub fn vcmla_rot90_lane_f32<const LANE: i32>(
3826    a: float32x2_t,
3827    b: float32x2_t,
3828    c: float32x2_t,
3829) -> float32x2_t {
3830    static_assert!(LANE == 0);
3831    unsafe {
3832        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3833        vcmla_rot90_f32(a, b, c)
3834    }
3835}
3836#[doc = "Floating-point complex multiply accumulate"]
3837#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f32)"]
3838#[inline]
3839#[target_feature(enable = "neon,fcma")]
3840#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3841#[rustc_legacy_const_generics(3)]
3842#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3843pub fn vcmlaq_rot90_lane_f32<const LANE: i32>(
3844    a: float32x4_t,
3845    b: float32x4_t,
3846    c: float32x2_t,
3847) -> float32x4_t {
3848    static_assert!(LANE == 0);
3849    unsafe {
3850        let c: float32x4_t = simd_shuffle!(
3851            c,
3852            c,
3853            [
3854                2 * LANE as u32,
3855                2 * LANE as u32 + 1,
3856                2 * LANE as u32,
3857                2 * LANE as u32 + 1
3858            ]
3859        );
3860        vcmlaq_rot90_f32(a, b, c)
3861    }
3862}
3863#[doc = "Floating-point complex multiply accumulate"]
3864#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f16)"]
3865#[inline]
3866#[target_feature(enable = "neon,fcma")]
3867#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3868#[rustc_legacy_const_generics(3)]
3869#[target_feature(enable = "neon,fp16")]
3870#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3871pub fn vcmla_rot90_laneq_f16<const LANE: i32>(
3872    a: float16x4_t,
3873    b: float16x4_t,
3874    c: float16x8_t,
3875) -> float16x4_t {
3876    static_assert_uimm_bits!(LANE, 2);
3877    unsafe {
3878        let c: float16x4_t = simd_shuffle!(
3879            c,
3880            c,
3881            [
3882                2 * LANE as u32,
3883                2 * LANE as u32 + 1,
3884                2 * LANE as u32,
3885                2 * LANE as u32 + 1
3886            ]
3887        );
3888        vcmla_rot90_f16(a, b, c)
3889    }
3890}
3891#[doc = "Floating-point complex multiply accumulate"]
3892#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f16)"]
3893#[inline]
3894#[target_feature(enable = "neon,fcma")]
3895#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3896#[rustc_legacy_const_generics(3)]
3897#[target_feature(enable = "neon,fp16")]
3898#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
3899pub fn vcmlaq_rot90_laneq_f16<const LANE: i32>(
3900    a: float16x8_t,
3901    b: float16x8_t,
3902    c: float16x8_t,
3903) -> float16x8_t {
3904    static_assert_uimm_bits!(LANE, 2);
3905    unsafe {
3906        let c: float16x8_t = simd_shuffle!(
3907            c,
3908            c,
3909            [
3910                2 * LANE as u32,
3911                2 * LANE as u32 + 1,
3912                2 * LANE as u32,
3913                2 * LANE as u32 + 1,
3914                2 * LANE as u32,
3915                2 * LANE as u32 + 1,
3916                2 * LANE as u32,
3917                2 * LANE as u32 + 1
3918            ]
3919        );
3920        vcmlaq_rot90_f16(a, b, c)
3921    }
3922}
3923#[doc = "Floating-point complex multiply accumulate"]
3924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f32)"]
3925#[inline]
3926#[target_feature(enable = "neon,fcma")]
3927#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3928#[rustc_legacy_const_generics(3)]
3929#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3930pub fn vcmla_rot90_laneq_f32<const LANE: i32>(
3931    a: float32x2_t,
3932    b: float32x2_t,
3933    c: float32x4_t,
3934) -> float32x2_t {
3935    static_assert_uimm_bits!(LANE, 1);
3936    unsafe {
3937        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3938        vcmla_rot90_f32(a, b, c)
3939    }
3940}
3941#[doc = "Floating-point complex multiply accumulate"]
3942#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f32)"]
3943#[inline]
3944#[target_feature(enable = "neon,fcma")]
3945#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3946#[rustc_legacy_const_generics(3)]
3947#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3948pub fn vcmlaq_rot90_laneq_f32<const LANE: i32>(
3949    a: float32x4_t,
3950    b: float32x4_t,
3951    c: float32x4_t,
3952) -> float32x4_t {
3953    static_assert_uimm_bits!(LANE, 1);
3954    unsafe {
3955        let c: float32x4_t = simd_shuffle!(
3956            c,
3957            c,
3958            [
3959                2 * LANE as u32,
3960                2 * LANE as u32 + 1,
3961                2 * LANE as u32,
3962                2 * LANE as u32 + 1
3963            ]
3964        );
3965        vcmlaq_rot90_f32(a, b, c)
3966    }
3967}
3968#[doc = "Insert vector element from another vector element"]
3969#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_f32)"]
3970#[inline]
3971#[target_feature(enable = "neon")]
3972#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
3973#[rustc_legacy_const_generics(1, 3)]
3974#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3975pub fn vcopy_lane_f32<const LANE1: i32, const LANE2: i32>(
3976    a: float32x2_t,
3977    b: float32x2_t,
3978) -> float32x2_t {
3979    static_assert_uimm_bits!(LANE1, 1);
3980    static_assert_uimm_bits!(LANE2, 1);
3981    unsafe {
3982        match LANE1 & 0b1 {
3983            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
3984            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
3985            _ => unreachable_unchecked(),
3986        }
3987    }
3988}
3989#[doc = "Insert vector element from another vector element"]
3990#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s8)"]
3991#[inline]
3992#[target_feature(enable = "neon")]
3993#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
3994#[rustc_legacy_const_generics(1, 3)]
3995#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3996pub fn vcopy_lane_s8<const LANE1: i32, const LANE2: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
3997    static_assert_uimm_bits!(LANE1, 3);
3998    static_assert_uimm_bits!(LANE2, 3);
3999    unsafe {
4000        match LANE1 & 0b111 {
4001            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4002            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4003            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4004            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4005            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4006            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4007            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4008            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4009            _ => unreachable_unchecked(),
4010        }
4011    }
4012}
4013#[doc = "Insert vector element from another vector element"]
4014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s16)"]
4015#[inline]
4016#[target_feature(enable = "neon")]
4017#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4018#[rustc_legacy_const_generics(1, 3)]
4019#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4020pub fn vcopy_lane_s16<const LANE1: i32, const LANE2: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
4021    static_assert_uimm_bits!(LANE1, 2);
4022    static_assert_uimm_bits!(LANE2, 2);
4023    unsafe {
4024        match LANE1 & 0b11 {
4025            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4026            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4027            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4028            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4029            _ => unreachable_unchecked(),
4030        }
4031    }
4032}
4033#[doc = "Insert vector element from another vector element"]
4034#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s32)"]
4035#[inline]
4036#[target_feature(enable = "neon")]
4037#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4038#[rustc_legacy_const_generics(1, 3)]
4039#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4040pub fn vcopy_lane_s32<const LANE1: i32, const LANE2: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
4041    static_assert_uimm_bits!(LANE1, 1);
4042    static_assert_uimm_bits!(LANE2, 1);
4043    unsafe {
4044        match LANE1 & 0b1 {
4045            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4046            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4047            _ => unreachable_unchecked(),
4048        }
4049    }
4050}
4051#[doc = "Insert vector element from another vector element"]
4052#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u8)"]
4053#[inline]
4054#[target_feature(enable = "neon")]
4055#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4056#[rustc_legacy_const_generics(1, 3)]
4057#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4058pub fn vcopy_lane_u8<const LANE1: i32, const LANE2: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
4059    static_assert_uimm_bits!(LANE1, 3);
4060    static_assert_uimm_bits!(LANE2, 3);
4061    unsafe {
4062        match LANE1 & 0b111 {
4063            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4064            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4065            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4066            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4067            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4068            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4069            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4070            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4071            _ => unreachable_unchecked(),
4072        }
4073    }
4074}
4075#[doc = "Insert vector element from another vector element"]
4076#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u16)"]
4077#[inline]
4078#[target_feature(enable = "neon")]
4079#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4080#[rustc_legacy_const_generics(1, 3)]
4081#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4082pub fn vcopy_lane_u16<const LANE1: i32, const LANE2: i32>(
4083    a: uint16x4_t,
4084    b: uint16x4_t,
4085) -> uint16x4_t {
4086    static_assert_uimm_bits!(LANE1, 2);
4087    static_assert_uimm_bits!(LANE2, 2);
4088    unsafe {
4089        match LANE1 & 0b11 {
4090            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4091            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4092            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4093            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4094            _ => unreachable_unchecked(),
4095        }
4096    }
4097}
4098#[doc = "Insert vector element from another vector element"]
4099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u32)"]
4100#[inline]
4101#[target_feature(enable = "neon")]
4102#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4103#[rustc_legacy_const_generics(1, 3)]
4104#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4105pub fn vcopy_lane_u32<const LANE1: i32, const LANE2: i32>(
4106    a: uint32x2_t,
4107    b: uint32x2_t,
4108) -> uint32x2_t {
4109    static_assert_uimm_bits!(LANE1, 1);
4110    static_assert_uimm_bits!(LANE2, 1);
4111    unsafe {
4112        match LANE1 & 0b1 {
4113            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4114            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4115            _ => unreachable_unchecked(),
4116        }
4117    }
4118}
4119#[doc = "Insert vector element from another vector element"]
4120#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p8)"]
4121#[inline]
4122#[target_feature(enable = "neon")]
4123#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4124#[rustc_legacy_const_generics(1, 3)]
4125#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4126pub fn vcopy_lane_p8<const LANE1: i32, const LANE2: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
4127    static_assert_uimm_bits!(LANE1, 3);
4128    static_assert_uimm_bits!(LANE2, 3);
4129    unsafe {
4130        match LANE1 & 0b111 {
4131            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4132            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4133            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4134            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4135            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4136            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4137            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4138            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4139            _ => unreachable_unchecked(),
4140        }
4141    }
4142}
4143#[doc = "Insert vector element from another vector element"]
4144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p16)"]
4145#[inline]
4146#[target_feature(enable = "neon")]
4147#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4148#[rustc_legacy_const_generics(1, 3)]
4149#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4150pub fn vcopy_lane_p16<const LANE1: i32, const LANE2: i32>(
4151    a: poly16x4_t,
4152    b: poly16x4_t,
4153) -> poly16x4_t {
4154    static_assert_uimm_bits!(LANE1, 2);
4155    static_assert_uimm_bits!(LANE2, 2);
4156    unsafe {
4157        match LANE1 & 0b11 {
4158            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4159            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4160            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4161            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4162            _ => unreachable_unchecked(),
4163        }
4164    }
4165}
4166#[doc = "Insert vector element from another vector element"]
4167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_f32)"]
4168#[inline]
4169#[target_feature(enable = "neon")]
4170#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4171#[rustc_legacy_const_generics(1, 3)]
4172#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4173pub fn vcopy_laneq_f32<const LANE1: i32, const LANE2: i32>(
4174    a: float32x2_t,
4175    b: float32x4_t,
4176) -> float32x2_t {
4177    static_assert_uimm_bits!(LANE1, 1);
4178    static_assert_uimm_bits!(LANE2, 2);
4179    let a: float32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4180    unsafe {
4181        match LANE1 & 0b1 {
4182            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4183            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4184            _ => unreachable_unchecked(),
4185        }
4186    }
4187}
4188#[doc = "Insert vector element from another vector element"]
4189#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s8)"]
4190#[inline]
4191#[target_feature(enable = "neon")]
4192#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4193#[rustc_legacy_const_generics(1, 3)]
4194#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4195pub fn vcopy_laneq_s8<const LANE1: i32, const LANE2: i32>(a: int8x8_t, b: int8x16_t) -> int8x8_t {
4196    static_assert_uimm_bits!(LANE1, 3);
4197    static_assert_uimm_bits!(LANE2, 4);
4198    let a: int8x16_t =
4199        unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4200    unsafe {
4201        match LANE1 & 0b111 {
4202            0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4203            1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4204            2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4205            3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4206            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4207            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4208            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4209            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4210            _ => unreachable_unchecked(),
4211        }
4212    }
4213}
4214#[doc = "Insert vector element from another vector element"]
4215#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s16)"]
4216#[inline]
4217#[target_feature(enable = "neon")]
4218#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4219#[rustc_legacy_const_generics(1, 3)]
4220#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4221pub fn vcopy_laneq_s16<const LANE1: i32, const LANE2: i32>(
4222    a: int16x4_t,
4223    b: int16x8_t,
4224) -> int16x4_t {
4225    static_assert_uimm_bits!(LANE1, 2);
4226    static_assert_uimm_bits!(LANE2, 3);
4227    let a: int16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4228    unsafe {
4229        match LANE1 & 0b11 {
4230            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4231            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4232            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4233            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4234            _ => unreachable_unchecked(),
4235        }
4236    }
4237}
4238#[doc = "Insert vector element from another vector element"]
4239#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s32)"]
4240#[inline]
4241#[target_feature(enable = "neon")]
4242#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4243#[rustc_legacy_const_generics(1, 3)]
4244#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4245pub fn vcopy_laneq_s32<const LANE1: i32, const LANE2: i32>(
4246    a: int32x2_t,
4247    b: int32x4_t,
4248) -> int32x2_t {
4249    static_assert_uimm_bits!(LANE1, 1);
4250    static_assert_uimm_bits!(LANE2, 2);
4251    let a: int32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4252    unsafe {
4253        match LANE1 & 0b1 {
4254            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4255            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4256            _ => unreachable_unchecked(),
4257        }
4258    }
4259}
4260#[doc = "Insert vector element from another vector element"]
4261#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u8)"]
4262#[inline]
4263#[target_feature(enable = "neon")]
4264#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4265#[rustc_legacy_const_generics(1, 3)]
4266#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4267pub fn vcopy_laneq_u8<const LANE1: i32, const LANE2: i32>(
4268    a: uint8x8_t,
4269    b: uint8x16_t,
4270) -> uint8x8_t {
4271    static_assert_uimm_bits!(LANE1, 3);
4272    static_assert_uimm_bits!(LANE2, 4);
4273    let a: uint8x16_t =
4274        unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4275    unsafe {
4276        match LANE1 & 0b111 {
4277            0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4278            1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4279            2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4280            3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4281            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4282            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4283            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4284            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4285            _ => unreachable_unchecked(),
4286        }
4287    }
4288}
4289#[doc = "Insert vector element from another vector element"]
4290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u16)"]
4291#[inline]
4292#[target_feature(enable = "neon")]
4293#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4294#[rustc_legacy_const_generics(1, 3)]
4295#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4296pub fn vcopy_laneq_u16<const LANE1: i32, const LANE2: i32>(
4297    a: uint16x4_t,
4298    b: uint16x8_t,
4299) -> uint16x4_t {
4300    static_assert_uimm_bits!(LANE1, 2);
4301    static_assert_uimm_bits!(LANE2, 3);
4302    let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4303    unsafe {
4304        match LANE1 & 0b11 {
4305            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4306            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4307            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4308            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4309            _ => unreachable_unchecked(),
4310        }
4311    }
4312}
4313#[doc = "Insert vector element from another vector element"]
4314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u32)"]
4315#[inline]
4316#[target_feature(enable = "neon")]
4317#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4318#[rustc_legacy_const_generics(1, 3)]
4319#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4320pub fn vcopy_laneq_u32<const LANE1: i32, const LANE2: i32>(
4321    a: uint32x2_t,
4322    b: uint32x4_t,
4323) -> uint32x2_t {
4324    static_assert_uimm_bits!(LANE1, 1);
4325    static_assert_uimm_bits!(LANE2, 2);
4326    let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4327    unsafe {
4328        match LANE1 & 0b1 {
4329            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4330            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4331            _ => unreachable_unchecked(),
4332        }
4333    }
4334}
4335#[doc = "Insert vector element from another vector element"]
4336#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p8)"]
4337#[inline]
4338#[target_feature(enable = "neon")]
4339#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4340#[rustc_legacy_const_generics(1, 3)]
4341#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4342pub fn vcopy_laneq_p8<const LANE1: i32, const LANE2: i32>(
4343    a: poly8x8_t,
4344    b: poly8x16_t,
4345) -> poly8x8_t {
4346    static_assert_uimm_bits!(LANE1, 3);
4347    static_assert_uimm_bits!(LANE2, 4);
4348    let a: poly8x16_t =
4349        unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4350    unsafe {
4351        match LANE1 & 0b111 {
4352            0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4353            1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4354            2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4355            3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4356            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4357            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4358            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4359            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4360            _ => unreachable_unchecked(),
4361        }
4362    }
4363}
4364#[doc = "Insert vector element from another vector element"]
4365#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p16)"]
4366#[inline]
4367#[target_feature(enable = "neon")]
4368#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4369#[rustc_legacy_const_generics(1, 3)]
4370#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4371pub fn vcopy_laneq_p16<const LANE1: i32, const LANE2: i32>(
4372    a: poly16x4_t,
4373    b: poly16x8_t,
4374) -> poly16x4_t {
4375    static_assert_uimm_bits!(LANE1, 2);
4376    static_assert_uimm_bits!(LANE2, 3);
4377    let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4378    unsafe {
4379        match LANE1 & 0b11 {
4380            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4381            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4382            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4383            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4384            _ => unreachable_unchecked(),
4385        }
4386    }
4387}
4388#[doc = "Insert vector element from another vector element"]
4389#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f32)"]
4390#[inline]
4391#[target_feature(enable = "neon")]
4392#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4393#[rustc_legacy_const_generics(1, 3)]
4394#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4395pub fn vcopyq_lane_f32<const LANE1: i32, const LANE2: i32>(
4396    a: float32x4_t,
4397    b: float32x2_t,
4398) -> float32x4_t {
4399    static_assert_uimm_bits!(LANE1, 2);
4400    static_assert_uimm_bits!(LANE2, 1);
4401    let b: float32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
4402    unsafe {
4403        match LANE1 & 0b11 {
4404            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4405            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4406            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4407            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4408            _ => unreachable_unchecked(),
4409        }
4410    }
4411}
4412#[doc = "Insert vector element from another vector element"]
4413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f64)"]
4414#[inline]
4415#[target_feature(enable = "neon")]
4416#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4417#[rustc_legacy_const_generics(1, 3)]
4418#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4419pub fn vcopyq_lane_f64<const LANE1: i32, const LANE2: i32>(
4420    a: float64x2_t,
4421    b: float64x1_t,
4422) -> float64x2_t {
4423    static_assert_uimm_bits!(LANE1, 1);
4424    static_assert!(LANE2 == 0);
4425    let b: float64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4426    unsafe {
4427        match LANE1 & 0b1 {
4428            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4429            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4430            _ => unreachable_unchecked(),
4431        }
4432    }
4433}
4434#[doc = "Insert vector element from another vector element"]
4435#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s64)"]
4436#[inline]
4437#[target_feature(enable = "neon")]
4438#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4439#[rustc_legacy_const_generics(1, 3)]
4440#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4441pub fn vcopyq_lane_s64<const LANE1: i32, const LANE2: i32>(
4442    a: int64x2_t,
4443    b: int64x1_t,
4444) -> int64x2_t {
4445    static_assert_uimm_bits!(LANE1, 1);
4446    static_assert!(LANE2 == 0);
4447    let b: int64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4448    unsafe {
4449        match LANE1 & 0b1 {
4450            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4451            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4452            _ => unreachable_unchecked(),
4453        }
4454    }
4455}
4456#[doc = "Insert vector element from another vector element"]
4457#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u64)"]
4458#[inline]
4459#[target_feature(enable = "neon")]
4460#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4461#[rustc_legacy_const_generics(1, 3)]
4462#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4463pub fn vcopyq_lane_u64<const LANE1: i32, const LANE2: i32>(
4464    a: uint64x2_t,
4465    b: uint64x1_t,
4466) -> uint64x2_t {
4467    static_assert_uimm_bits!(LANE1, 1);
4468    static_assert!(LANE2 == 0);
4469    let b: uint64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4470    unsafe {
4471        match LANE1 & 0b1 {
4472            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4473            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4474            _ => unreachable_unchecked(),
4475        }
4476    }
4477}
4478#[doc = "Insert vector element from another vector element"]
4479#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p64)"]
4480#[inline]
4481#[target_feature(enable = "neon")]
4482#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4483#[rustc_legacy_const_generics(1, 3)]
4484#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4485pub fn vcopyq_lane_p64<const LANE1: i32, const LANE2: i32>(
4486    a: poly64x2_t,
4487    b: poly64x1_t,
4488) -> poly64x2_t {
4489    static_assert_uimm_bits!(LANE1, 1);
4490    static_assert!(LANE2 == 0);
4491    let b: poly64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4492    unsafe {
4493        match LANE1 & 0b1 {
4494            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4495            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4496            _ => unreachable_unchecked(),
4497        }
4498    }
4499}
4500#[doc = "Insert vector element from another vector element"]
4501#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s8)"]
4502#[inline]
4503#[target_feature(enable = "neon")]
4504#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4505#[rustc_legacy_const_generics(1, 3)]
4506#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4507pub fn vcopyq_lane_s8<const LANE1: i32, const LANE2: i32>(a: int8x16_t, b: int8x8_t) -> int8x16_t {
4508    static_assert_uimm_bits!(LANE1, 4);
4509    static_assert_uimm_bits!(LANE2, 3);
4510    let b: int8x16_t =
4511        unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4512    unsafe {
4513        match LANE1 & 0b1111 {
4514            0 => simd_shuffle!(
4515                a,
4516                b,
4517                [
4518                    16 + LANE2 as u32,
4519                    1,
4520                    2,
4521                    3,
4522                    4,
4523                    5,
4524                    6,
4525                    7,
4526                    8,
4527                    9,
4528                    10,
4529                    11,
4530                    12,
4531                    13,
4532                    14,
4533                    15
4534                ]
4535            ),
4536            1 => simd_shuffle!(
4537                a,
4538                b,
4539                [
4540                    0,
4541                    16 + LANE2 as u32,
4542                    2,
4543                    3,
4544                    4,
4545                    5,
4546                    6,
4547                    7,
4548                    8,
4549                    9,
4550                    10,
4551                    11,
4552                    12,
4553                    13,
4554                    14,
4555                    15
4556                ]
4557            ),
4558            2 => simd_shuffle!(
4559                a,
4560                b,
4561                [
4562                    0,
4563                    1,
4564                    16 + LANE2 as u32,
4565                    3,
4566                    4,
4567                    5,
4568                    6,
4569                    7,
4570                    8,
4571                    9,
4572                    10,
4573                    11,
4574                    12,
4575                    13,
4576                    14,
4577                    15
4578                ]
4579            ),
4580            3 => simd_shuffle!(
4581                a,
4582                b,
4583                [
4584                    0,
4585                    1,
4586                    2,
4587                    16 + LANE2 as u32,
4588                    4,
4589                    5,
4590                    6,
4591                    7,
4592                    8,
4593                    9,
4594                    10,
4595                    11,
4596                    12,
4597                    13,
4598                    14,
4599                    15
4600                ]
4601            ),
4602            4 => simd_shuffle!(
4603                a,
4604                b,
4605                [
4606                    0,
4607                    1,
4608                    2,
4609                    3,
4610                    16 + LANE2 as u32,
4611                    5,
4612                    6,
4613                    7,
4614                    8,
4615                    9,
4616                    10,
4617                    11,
4618                    12,
4619                    13,
4620                    14,
4621                    15
4622                ]
4623            ),
4624            5 => simd_shuffle!(
4625                a,
4626                b,
4627                [
4628                    0,
4629                    1,
4630                    2,
4631                    3,
4632                    4,
4633                    16 + LANE2 as u32,
4634                    6,
4635                    7,
4636                    8,
4637                    9,
4638                    10,
4639                    11,
4640                    12,
4641                    13,
4642                    14,
4643                    15
4644                ]
4645            ),
4646            6 => simd_shuffle!(
4647                a,
4648                b,
4649                [
4650                    0,
4651                    1,
4652                    2,
4653                    3,
4654                    4,
4655                    5,
4656                    16 + LANE2 as u32,
4657                    7,
4658                    8,
4659                    9,
4660                    10,
4661                    11,
4662                    12,
4663                    13,
4664                    14,
4665                    15
4666                ]
4667            ),
4668            7 => simd_shuffle!(
4669                a,
4670                b,
4671                [
4672                    0,
4673                    1,
4674                    2,
4675                    3,
4676                    4,
4677                    5,
4678                    6,
4679                    16 + LANE2 as u32,
4680                    8,
4681                    9,
4682                    10,
4683                    11,
4684                    12,
4685                    13,
4686                    14,
4687                    15
4688                ]
4689            ),
4690            8 => simd_shuffle!(
4691                a,
4692                b,
4693                [
4694                    0,
4695                    1,
4696                    2,
4697                    3,
4698                    4,
4699                    5,
4700                    6,
4701                    7,
4702                    16 + LANE2 as u32,
4703                    9,
4704                    10,
4705                    11,
4706                    12,
4707                    13,
4708                    14,
4709                    15
4710                ]
4711            ),
4712            9 => simd_shuffle!(
4713                a,
4714                b,
4715                [
4716                    0,
4717                    1,
4718                    2,
4719                    3,
4720                    4,
4721                    5,
4722                    6,
4723                    7,
4724                    8,
4725                    16 + LANE2 as u32,
4726                    10,
4727                    11,
4728                    12,
4729                    13,
4730                    14,
4731                    15
4732                ]
4733            ),
4734            10 => simd_shuffle!(
4735                a,
4736                b,
4737                [
4738                    0,
4739                    1,
4740                    2,
4741                    3,
4742                    4,
4743                    5,
4744                    6,
4745                    7,
4746                    8,
4747                    9,
4748                    16 + LANE2 as u32,
4749                    11,
4750                    12,
4751                    13,
4752                    14,
4753                    15
4754                ]
4755            ),
4756            11 => simd_shuffle!(
4757                a,
4758                b,
4759                [
4760                    0,
4761                    1,
4762                    2,
4763                    3,
4764                    4,
4765                    5,
4766                    6,
4767                    7,
4768                    8,
4769                    9,
4770                    10,
4771                    16 + LANE2 as u32,
4772                    12,
4773                    13,
4774                    14,
4775                    15
4776                ]
4777            ),
4778            12 => simd_shuffle!(
4779                a,
4780                b,
4781                [
4782                    0,
4783                    1,
4784                    2,
4785                    3,
4786                    4,
4787                    5,
4788                    6,
4789                    7,
4790                    8,
4791                    9,
4792                    10,
4793                    11,
4794                    16 + LANE2 as u32,
4795                    13,
4796                    14,
4797                    15
4798                ]
4799            ),
4800            13 => simd_shuffle!(
4801                a,
4802                b,
4803                [
4804                    0,
4805                    1,
4806                    2,
4807                    3,
4808                    4,
4809                    5,
4810                    6,
4811                    7,
4812                    8,
4813                    9,
4814                    10,
4815                    11,
4816                    12,
4817                    16 + LANE2 as u32,
4818                    14,
4819                    15
4820                ]
4821            ),
4822            14 => simd_shuffle!(
4823                a,
4824                b,
4825                [
4826                    0,
4827                    1,
4828                    2,
4829                    3,
4830                    4,
4831                    5,
4832                    6,
4833                    7,
4834                    8,
4835                    9,
4836                    10,
4837                    11,
4838                    12,
4839                    13,
4840                    16 + LANE2 as u32,
4841                    15
4842                ]
4843            ),
4844            15 => simd_shuffle!(
4845                a,
4846                b,
4847                [
4848                    0,
4849                    1,
4850                    2,
4851                    3,
4852                    4,
4853                    5,
4854                    6,
4855                    7,
4856                    8,
4857                    9,
4858                    10,
4859                    11,
4860                    12,
4861                    13,
4862                    14,
4863                    16 + LANE2 as u32
4864                ]
4865            ),
4866            _ => unreachable_unchecked(),
4867        }
4868    }
4869}
4870#[doc = "Insert vector element from another vector element"]
4871#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s16)"]
4872#[inline]
4873#[target_feature(enable = "neon")]
4874#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4875#[rustc_legacy_const_generics(1, 3)]
4876#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4877pub fn vcopyq_lane_s16<const LANE1: i32, const LANE2: i32>(
4878    a: int16x8_t,
4879    b: int16x4_t,
4880) -> int16x8_t {
4881    static_assert_uimm_bits!(LANE1, 3);
4882    static_assert_uimm_bits!(LANE2, 2);
4883    let b: int16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
4884    unsafe {
4885        match LANE1 & 0b111 {
4886            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4887            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4888            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4889            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4890            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4891            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4892            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4893            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4894            _ => unreachable_unchecked(),
4895        }
4896    }
4897}
4898#[doc = "Insert vector element from another vector element"]
4899#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s32)"]
4900#[inline]
4901#[target_feature(enable = "neon")]
4902#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4903#[rustc_legacy_const_generics(1, 3)]
4904#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4905pub fn vcopyq_lane_s32<const LANE1: i32, const LANE2: i32>(
4906    a: int32x4_t,
4907    b: int32x2_t,
4908) -> int32x4_t {
4909    static_assert_uimm_bits!(LANE1, 2);
4910    static_assert_uimm_bits!(LANE2, 1);
4911    let b: int32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
4912    unsafe {
4913        match LANE1 & 0b11 {
4914            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4915            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4916            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4917            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4918            _ => unreachable_unchecked(),
4919        }
4920    }
4921}
4922#[doc = "Insert vector element from another vector element"]
4923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u8)"]
4924#[inline]
4925#[target_feature(enable = "neon")]
4926#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
4927#[rustc_legacy_const_generics(1, 3)]
4928#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4929pub fn vcopyq_lane_u8<const LANE1: i32, const LANE2: i32>(
4930    a: uint8x16_t,
4931    b: uint8x8_t,
4932) -> uint8x16_t {
4933    static_assert_uimm_bits!(LANE1, 4);
4934    static_assert_uimm_bits!(LANE2, 3);
4935    let b: uint8x16_t =
4936        unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4937    unsafe {
4938        match LANE1 & 0b1111 {
4939            0 => simd_shuffle!(
4940                a,
4941                b,
4942                [
4943                    16 + LANE2 as u32,
4944                    1,
4945                    2,
4946                    3,
4947                    4,
4948                    5,
4949                    6,
4950                    7,
4951                    8,
4952                    9,
4953                    10,
4954                    11,
4955                    12,
4956                    13,
4957                    14,
4958                    15
4959                ]
4960            ),
4961            1 => simd_shuffle!(
4962                a,
4963                b,
4964                [
4965                    0,
4966                    16 + LANE2 as u32,
4967                    2,
4968                    3,
4969                    4,
4970                    5,
4971                    6,
4972                    7,
4973                    8,
4974                    9,
4975                    10,
4976                    11,
4977                    12,
4978                    13,
4979                    14,
4980                    15
4981                ]
4982            ),
4983            2 => simd_shuffle!(
4984                a,
4985                b,
4986                [
4987                    0,
4988                    1,
4989                    16 + LANE2 as u32,
4990                    3,
4991                    4,
4992                    5,
4993                    6,
4994                    7,
4995                    8,
4996                    9,
4997                    10,
4998                    11,
4999                    12,
5000                    13,
5001                    14,
5002                    15
5003                ]
5004            ),
5005            3 => simd_shuffle!(
5006                a,
5007                b,
5008                [
5009                    0,
5010                    1,
5011                    2,
5012                    16 + LANE2 as u32,
5013                    4,
5014                    5,
5015                    6,
5016                    7,
5017                    8,
5018                    9,
5019                    10,
5020                    11,
5021                    12,
5022                    13,
5023                    14,
5024                    15
5025                ]
5026            ),
5027            4 => simd_shuffle!(
5028                a,
5029                b,
5030                [
5031                    0,
5032                    1,
5033                    2,
5034                    3,
5035                    16 + LANE2 as u32,
5036                    5,
5037                    6,
5038                    7,
5039                    8,
5040                    9,
5041                    10,
5042                    11,
5043                    12,
5044                    13,
5045                    14,
5046                    15
5047                ]
5048            ),
5049            5 => simd_shuffle!(
5050                a,
5051                b,
5052                [
5053                    0,
5054                    1,
5055                    2,
5056                    3,
5057                    4,
5058                    16 + LANE2 as u32,
5059                    6,
5060                    7,
5061                    8,
5062                    9,
5063                    10,
5064                    11,
5065                    12,
5066                    13,
5067                    14,
5068                    15
5069                ]
5070            ),
5071            6 => simd_shuffle!(
5072                a,
5073                b,
5074                [
5075                    0,
5076                    1,
5077                    2,
5078                    3,
5079                    4,
5080                    5,
5081                    16 + LANE2 as u32,
5082                    7,
5083                    8,
5084                    9,
5085                    10,
5086                    11,
5087                    12,
5088                    13,
5089                    14,
5090                    15
5091                ]
5092            ),
5093            7 => simd_shuffle!(
5094                a,
5095                b,
5096                [
5097                    0,
5098                    1,
5099                    2,
5100                    3,
5101                    4,
5102                    5,
5103                    6,
5104                    16 + LANE2 as u32,
5105                    8,
5106                    9,
5107                    10,
5108                    11,
5109                    12,
5110                    13,
5111                    14,
5112                    15
5113                ]
5114            ),
5115            8 => simd_shuffle!(
5116                a,
5117                b,
5118                [
5119                    0,
5120                    1,
5121                    2,
5122                    3,
5123                    4,
5124                    5,
5125                    6,
5126                    7,
5127                    16 + LANE2 as u32,
5128                    9,
5129                    10,
5130                    11,
5131                    12,
5132                    13,
5133                    14,
5134                    15
5135                ]
5136            ),
5137            9 => simd_shuffle!(
5138                a,
5139                b,
5140                [
5141                    0,
5142                    1,
5143                    2,
5144                    3,
5145                    4,
5146                    5,
5147                    6,
5148                    7,
5149                    8,
5150                    16 + LANE2 as u32,
5151                    10,
5152                    11,
5153                    12,
5154                    13,
5155                    14,
5156                    15
5157                ]
5158            ),
5159            10 => simd_shuffle!(
5160                a,
5161                b,
5162                [
5163                    0,
5164                    1,
5165                    2,
5166                    3,
5167                    4,
5168                    5,
5169                    6,
5170                    7,
5171                    8,
5172                    9,
5173                    16 + LANE2 as u32,
5174                    11,
5175                    12,
5176                    13,
5177                    14,
5178                    15
5179                ]
5180            ),
5181            11 => simd_shuffle!(
5182                a,
5183                b,
5184                [
5185                    0,
5186                    1,
5187                    2,
5188                    3,
5189                    4,
5190                    5,
5191                    6,
5192                    7,
5193                    8,
5194                    9,
5195                    10,
5196                    16 + LANE2 as u32,
5197                    12,
5198                    13,
5199                    14,
5200                    15
5201                ]
5202            ),
5203            12 => simd_shuffle!(
5204                a,
5205                b,
5206                [
5207                    0,
5208                    1,
5209                    2,
5210                    3,
5211                    4,
5212                    5,
5213                    6,
5214                    7,
5215                    8,
5216                    9,
5217                    10,
5218                    11,
5219                    16 + LANE2 as u32,
5220                    13,
5221                    14,
5222                    15
5223                ]
5224            ),
5225            13 => simd_shuffle!(
5226                a,
5227                b,
5228                [
5229                    0,
5230                    1,
5231                    2,
5232                    3,
5233                    4,
5234                    5,
5235                    6,
5236                    7,
5237                    8,
5238                    9,
5239                    10,
5240                    11,
5241                    12,
5242                    16 + LANE2 as u32,
5243                    14,
5244                    15
5245                ]
5246            ),
5247            14 => simd_shuffle!(
5248                a,
5249                b,
5250                [
5251                    0,
5252                    1,
5253                    2,
5254                    3,
5255                    4,
5256                    5,
5257                    6,
5258                    7,
5259                    8,
5260                    9,
5261                    10,
5262                    11,
5263                    12,
5264                    13,
5265                    16 + LANE2 as u32,
5266                    15
5267                ]
5268            ),
5269            15 => simd_shuffle!(
5270                a,
5271                b,
5272                [
5273                    0,
5274                    1,
5275                    2,
5276                    3,
5277                    4,
5278                    5,
5279                    6,
5280                    7,
5281                    8,
5282                    9,
5283                    10,
5284                    11,
5285                    12,
5286                    13,
5287                    14,
5288                    16 + LANE2 as u32
5289                ]
5290            ),
5291            _ => unreachable_unchecked(),
5292        }
5293    }
5294}
5295#[doc = "Insert vector element from another vector element"]
5296#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u16)"]
5297#[inline]
5298#[target_feature(enable = "neon")]
5299#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5300#[rustc_legacy_const_generics(1, 3)]
5301#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5302pub fn vcopyq_lane_u16<const LANE1: i32, const LANE2: i32>(
5303    a: uint16x8_t,
5304    b: uint16x4_t,
5305) -> uint16x8_t {
5306    static_assert_uimm_bits!(LANE1, 3);
5307    static_assert_uimm_bits!(LANE2, 2);
5308    let b: uint16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
5309    unsafe {
5310        match LANE1 & 0b111 {
5311            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
5312            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
5313            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
5314            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
5315            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
5316            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
5317            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
5318            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
5319            _ => unreachable_unchecked(),
5320        }
5321    }
5322}
5323#[doc = "Insert vector element from another vector element"]
5324#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u32)"]
5325#[inline]
5326#[target_feature(enable = "neon")]
5327#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5328#[rustc_legacy_const_generics(1, 3)]
5329#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5330pub fn vcopyq_lane_u32<const LANE1: i32, const LANE2: i32>(
5331    a: uint32x4_t,
5332    b: uint32x2_t,
5333) -> uint32x4_t {
5334    static_assert_uimm_bits!(LANE1, 2);
5335    static_assert_uimm_bits!(LANE2, 1);
5336    let b: uint32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
5337    unsafe {
5338        match LANE1 & 0b11 {
5339            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
5340            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
5341            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
5342            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
5343            _ => unreachable_unchecked(),
5344        }
5345    }
5346}
5347#[doc = "Insert vector element from another vector element"]
5348#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p8)"]
5349#[inline]
5350#[target_feature(enable = "neon")]
5351#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5352#[rustc_legacy_const_generics(1, 3)]
5353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5354pub fn vcopyq_lane_p8<const LANE1: i32, const LANE2: i32>(
5355    a: poly8x16_t,
5356    b: poly8x8_t,
5357) -> poly8x16_t {
5358    static_assert_uimm_bits!(LANE1, 4);
5359    static_assert_uimm_bits!(LANE2, 3);
5360    let b: poly8x16_t =
5361        unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
5362    unsafe {
5363        match LANE1 & 0b1111 {
5364            0 => simd_shuffle!(
5365                a,
5366                b,
5367                [
5368                    16 + LANE2 as u32,
5369                    1,
5370                    2,
5371                    3,
5372                    4,
5373                    5,
5374                    6,
5375                    7,
5376                    8,
5377                    9,
5378                    10,
5379                    11,
5380                    12,
5381                    13,
5382                    14,
5383                    15
5384                ]
5385            ),
5386            1 => simd_shuffle!(
5387                a,
5388                b,
5389                [
5390                    0,
5391                    16 + LANE2 as u32,
5392                    2,
5393                    3,
5394                    4,
5395                    5,
5396                    6,
5397                    7,
5398                    8,
5399                    9,
5400                    10,
5401                    11,
5402                    12,
5403                    13,
5404                    14,
5405                    15
5406                ]
5407            ),
5408            2 => simd_shuffle!(
5409                a,
5410                b,
5411                [
5412                    0,
5413                    1,
5414                    16 + LANE2 as u32,
5415                    3,
5416                    4,
5417                    5,
5418                    6,
5419                    7,
5420                    8,
5421                    9,
5422                    10,
5423                    11,
5424                    12,
5425                    13,
5426                    14,
5427                    15
5428                ]
5429            ),
5430            3 => simd_shuffle!(
5431                a,
5432                b,
5433                [
5434                    0,
5435                    1,
5436                    2,
5437                    16 + LANE2 as u32,
5438                    4,
5439                    5,
5440                    6,
5441                    7,
5442                    8,
5443                    9,
5444                    10,
5445                    11,
5446                    12,
5447                    13,
5448                    14,
5449                    15
5450                ]
5451            ),
5452            4 => simd_shuffle!(
5453                a,
5454                b,
5455                [
5456                    0,
5457                    1,
5458                    2,
5459                    3,
5460                    16 + LANE2 as u32,
5461                    5,
5462                    6,
5463                    7,
5464                    8,
5465                    9,
5466                    10,
5467                    11,
5468                    12,
5469                    13,
5470                    14,
5471                    15
5472                ]
5473            ),
5474            5 => simd_shuffle!(
5475                a,
5476                b,
5477                [
5478                    0,
5479                    1,
5480                    2,
5481                    3,
5482                    4,
5483                    16 + LANE2 as u32,
5484                    6,
5485                    7,
5486                    8,
5487                    9,
5488                    10,
5489                    11,
5490                    12,
5491                    13,
5492                    14,
5493                    15
5494                ]
5495            ),
5496            6 => simd_shuffle!(
5497                a,
5498                b,
5499                [
5500                    0,
5501                    1,
5502                    2,
5503                    3,
5504                    4,
5505                    5,
5506                    16 + LANE2 as u32,
5507                    7,
5508                    8,
5509                    9,
5510                    10,
5511                    11,
5512                    12,
5513                    13,
5514                    14,
5515                    15
5516                ]
5517            ),
5518            7 => simd_shuffle!(
5519                a,
5520                b,
5521                [
5522                    0,
5523                    1,
5524                    2,
5525                    3,
5526                    4,
5527                    5,
5528                    6,
5529                    16 + LANE2 as u32,
5530                    8,
5531                    9,
5532                    10,
5533                    11,
5534                    12,
5535                    13,
5536                    14,
5537                    15
5538                ]
5539            ),
5540            8 => simd_shuffle!(
5541                a,
5542                b,
5543                [
5544                    0,
5545                    1,
5546                    2,
5547                    3,
5548                    4,
5549                    5,
5550                    6,
5551                    7,
5552                    16 + LANE2 as u32,
5553                    9,
5554                    10,
5555                    11,
5556                    12,
5557                    13,
5558                    14,
5559                    15
5560                ]
5561            ),
5562            9 => simd_shuffle!(
5563                a,
5564                b,
5565                [
5566                    0,
5567                    1,
5568                    2,
5569                    3,
5570                    4,
5571                    5,
5572                    6,
5573                    7,
5574                    8,
5575                    16 + LANE2 as u32,
5576                    10,
5577                    11,
5578                    12,
5579                    13,
5580                    14,
5581                    15
5582                ]
5583            ),
5584            10 => simd_shuffle!(
5585                a,
5586                b,
5587                [
5588                    0,
5589                    1,
5590                    2,
5591                    3,
5592                    4,
5593                    5,
5594                    6,
5595                    7,
5596                    8,
5597                    9,
5598                    16 + LANE2 as u32,
5599                    11,
5600                    12,
5601                    13,
5602                    14,
5603                    15
5604                ]
5605            ),
5606            11 => simd_shuffle!(
5607                a,
5608                b,
5609                [
5610                    0,
5611                    1,
5612                    2,
5613                    3,
5614                    4,
5615                    5,
5616                    6,
5617                    7,
5618                    8,
5619                    9,
5620                    10,
5621                    16 + LANE2 as u32,
5622                    12,
5623                    13,
5624                    14,
5625                    15
5626                ]
5627            ),
5628            12 => simd_shuffle!(
5629                a,
5630                b,
5631                [
5632                    0,
5633                    1,
5634                    2,
5635                    3,
5636                    4,
5637                    5,
5638                    6,
5639                    7,
5640                    8,
5641                    9,
5642                    10,
5643                    11,
5644                    16 + LANE2 as u32,
5645                    13,
5646                    14,
5647                    15
5648                ]
5649            ),
5650            13 => simd_shuffle!(
5651                a,
5652                b,
5653                [
5654                    0,
5655                    1,
5656                    2,
5657                    3,
5658                    4,
5659                    5,
5660                    6,
5661                    7,
5662                    8,
5663                    9,
5664                    10,
5665                    11,
5666                    12,
5667                    16 + LANE2 as u32,
5668                    14,
5669                    15
5670                ]
5671            ),
5672            14 => simd_shuffle!(
5673                a,
5674                b,
5675                [
5676                    0,
5677                    1,
5678                    2,
5679                    3,
5680                    4,
5681                    5,
5682                    6,
5683                    7,
5684                    8,
5685                    9,
5686                    10,
5687                    11,
5688                    12,
5689                    13,
5690                    16 + LANE2 as u32,
5691                    15
5692                ]
5693            ),
5694            15 => simd_shuffle!(
5695                a,
5696                b,
5697                [
5698                    0,
5699                    1,
5700                    2,
5701                    3,
5702                    4,
5703                    5,
5704                    6,
5705                    7,
5706                    8,
5707                    9,
5708                    10,
5709                    11,
5710                    12,
5711                    13,
5712                    14,
5713                    16 + LANE2 as u32
5714                ]
5715            ),
5716            _ => unreachable_unchecked(),
5717        }
5718    }
5719}
5720#[doc = "Insert vector element from another vector element"]
5721#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p16)"]
5722#[inline]
5723#[target_feature(enable = "neon")]
5724#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5725#[rustc_legacy_const_generics(1, 3)]
5726#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5727pub fn vcopyq_lane_p16<const LANE1: i32, const LANE2: i32>(
5728    a: poly16x8_t,
5729    b: poly16x4_t,
5730) -> poly16x8_t {
5731    static_assert_uimm_bits!(LANE1, 3);
5732    static_assert_uimm_bits!(LANE2, 2);
5733    let b: poly16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
5734    unsafe {
5735        match LANE1 & 0b111 {
5736            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
5737            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
5738            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
5739            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
5740            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
5741            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
5742            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
5743            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
5744            _ => unreachable_unchecked(),
5745        }
5746    }
5747}
5748#[doc = "Insert vector element from another vector element"]
5749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f32)"]
5750#[inline]
5751#[target_feature(enable = "neon")]
5752#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5753#[rustc_legacy_const_generics(1, 3)]
5754#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5755pub fn vcopyq_laneq_f32<const LANE1: i32, const LANE2: i32>(
5756    a: float32x4_t,
5757    b: float32x4_t,
5758) -> float32x4_t {
5759    static_assert_uimm_bits!(LANE1, 2);
5760    static_assert_uimm_bits!(LANE2, 2);
5761    unsafe {
5762        match LANE1 & 0b11 {
5763            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
5764            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
5765            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
5766            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
5767            _ => unreachable_unchecked(),
5768        }
5769    }
5770}
5771#[doc = "Insert vector element from another vector element"]
5772#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f64)"]
5773#[inline]
5774#[target_feature(enable = "neon")]
5775#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5776#[rustc_legacy_const_generics(1, 3)]
5777#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5778pub fn vcopyq_laneq_f64<const LANE1: i32, const LANE2: i32>(
5779    a: float64x2_t,
5780    b: float64x2_t,
5781) -> float64x2_t {
5782    static_assert_uimm_bits!(LANE1, 1);
5783    static_assert_uimm_bits!(LANE2, 1);
5784    unsafe {
5785        match LANE1 & 0b1 {
5786            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
5787            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
5788            _ => unreachable_unchecked(),
5789        }
5790    }
5791}
5792#[doc = "Insert vector element from another vector element"]
5793#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s8)"]
5794#[inline]
5795#[target_feature(enable = "neon")]
5796#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
5797#[rustc_legacy_const_generics(1, 3)]
5798#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5799pub fn vcopyq_laneq_s8<const LANE1: i32, const LANE2: i32>(
5800    a: int8x16_t,
5801    b: int8x16_t,
5802) -> int8x16_t {
5803    static_assert_uimm_bits!(LANE1, 4);
5804    static_assert_uimm_bits!(LANE2, 4);
5805    unsafe {
5806        match LANE1 & 0b1111 {
5807            0 => simd_shuffle!(
5808                a,
5809                b,
5810                [
5811                    16 + LANE2 as u32,
5812                    1,
5813                    2,
5814                    3,
5815                    4,
5816                    5,
5817                    6,
5818                    7,
5819                    8,
5820                    9,
5821                    10,
5822                    11,
5823                    12,
5824                    13,
5825                    14,
5826                    15
5827                ]
5828            ),
5829            1 => simd_shuffle!(
5830                a,
5831                b,
5832                [
5833                    0,
5834                    16 + LANE2 as u32,
5835                    2,
5836                    3,
5837                    4,
5838                    5,
5839                    6,
5840                    7,
5841                    8,
5842                    9,
5843                    10,
5844                    11,
5845                    12,
5846                    13,
5847                    14,
5848                    15
5849                ]
5850            ),
5851            2 => simd_shuffle!(
5852                a,
5853                b,
5854                [
5855                    0,
5856                    1,
5857                    16 + LANE2 as u32,
5858                    3,
5859                    4,
5860                    5,
5861                    6,
5862                    7,
5863                    8,
5864                    9,
5865                    10,
5866                    11,
5867                    12,
5868                    13,
5869                    14,
5870                    15
5871                ]
5872            ),
5873            3 => simd_shuffle!(
5874                a,
5875                b,
5876                [
5877                    0,
5878                    1,
5879                    2,
5880                    16 + LANE2 as u32,
5881                    4,
5882                    5,
5883                    6,
5884                    7,
5885                    8,
5886                    9,
5887                    10,
5888                    11,
5889                    12,
5890                    13,
5891                    14,
5892                    15
5893                ]
5894            ),
5895            4 => simd_shuffle!(
5896                a,
5897                b,
5898                [
5899                    0,
5900                    1,
5901                    2,
5902                    3,
5903                    16 + LANE2 as u32,
5904                    5,
5905                    6,
5906                    7,
5907                    8,
5908                    9,
5909                    10,
5910                    11,
5911                    12,
5912                    13,
5913                    14,
5914                    15
5915                ]
5916            ),
5917            5 => simd_shuffle!(
5918                a,
5919                b,
5920                [
5921                    0,
5922                    1,
5923                    2,
5924                    3,
5925                    4,
5926                    16 + LANE2 as u32,
5927                    6,
5928                    7,
5929                    8,
5930                    9,
5931                    10,
5932                    11,
5933                    12,
5934                    13,
5935                    14,
5936                    15
5937                ]
5938            ),
5939            6 => simd_shuffle!(
5940                a,
5941                b,
5942                [
5943                    0,
5944                    1,
5945                    2,
5946                    3,
5947                    4,
5948                    5,
5949                    16 + LANE2 as u32,
5950                    7,
5951                    8,
5952                    9,
5953                    10,
5954                    11,
5955                    12,
5956                    13,
5957                    14,
5958                    15
5959                ]
5960            ),
5961            7 => simd_shuffle!(
5962                a,
5963                b,
5964                [
5965                    0,
5966                    1,
5967                    2,
5968                    3,
5969                    4,
5970                    5,
5971                    6,
5972                    16 + LANE2 as u32,
5973                    8,
5974                    9,
5975                    10,
5976                    11,
5977                    12,
5978                    13,
5979                    14,
5980                    15
5981                ]
5982            ),
5983            8 => simd_shuffle!(
5984                a,
5985                b,
5986                [
5987                    0,
5988                    1,
5989                    2,
5990                    3,
5991                    4,
5992                    5,
5993                    6,
5994                    7,
5995                    16 + LANE2 as u32,
5996                    9,
5997                    10,
5998                    11,
5999                    12,
6000                    13,
6001                    14,
6002                    15
6003                ]
6004            ),
6005            9 => simd_shuffle!(
6006                a,
6007                b,
6008                [
6009                    0,
6010                    1,
6011                    2,
6012                    3,
6013                    4,
6014                    5,
6015                    6,
6016                    7,
6017                    8,
6018                    16 + LANE2 as u32,
6019                    10,
6020                    11,
6021                    12,
6022                    13,
6023                    14,
6024                    15
6025                ]
6026            ),
6027            10 => simd_shuffle!(
6028                a,
6029                b,
6030                [
6031                    0,
6032                    1,
6033                    2,
6034                    3,
6035                    4,
6036                    5,
6037                    6,
6038                    7,
6039                    8,
6040                    9,
6041                    16 + LANE2 as u32,
6042                    11,
6043                    12,
6044                    13,
6045                    14,
6046                    15
6047                ]
6048            ),
6049            11 => simd_shuffle!(
6050                a,
6051                b,
6052                [
6053                    0,
6054                    1,
6055                    2,
6056                    3,
6057                    4,
6058                    5,
6059                    6,
6060                    7,
6061                    8,
6062                    9,
6063                    10,
6064                    16 + LANE2 as u32,
6065                    12,
6066                    13,
6067                    14,
6068                    15
6069                ]
6070            ),
6071            12 => simd_shuffle!(
6072                a,
6073                b,
6074                [
6075                    0,
6076                    1,
6077                    2,
6078                    3,
6079                    4,
6080                    5,
6081                    6,
6082                    7,
6083                    8,
6084                    9,
6085                    10,
6086                    11,
6087                    16 + LANE2 as u32,
6088                    13,
6089                    14,
6090                    15
6091                ]
6092            ),
6093            13 => simd_shuffle!(
6094                a,
6095                b,
6096                [
6097                    0,
6098                    1,
6099                    2,
6100                    3,
6101                    4,
6102                    5,
6103                    6,
6104                    7,
6105                    8,
6106                    9,
6107                    10,
6108                    11,
6109                    12,
6110                    16 + LANE2 as u32,
6111                    14,
6112                    15
6113                ]
6114            ),
6115            14 => simd_shuffle!(
6116                a,
6117                b,
6118                [
6119                    0,
6120                    1,
6121                    2,
6122                    3,
6123                    4,
6124                    5,
6125                    6,
6126                    7,
6127                    8,
6128                    9,
6129                    10,
6130                    11,
6131                    12,
6132                    13,
6133                    16 + LANE2 as u32,
6134                    15
6135                ]
6136            ),
6137            15 => simd_shuffle!(
6138                a,
6139                b,
6140                [
6141                    0,
6142                    1,
6143                    2,
6144                    3,
6145                    4,
6146                    5,
6147                    6,
6148                    7,
6149                    8,
6150                    9,
6151                    10,
6152                    11,
6153                    12,
6154                    13,
6155                    14,
6156                    16 + LANE2 as u32
6157                ]
6158            ),
6159            _ => unreachable_unchecked(),
6160        }
6161    }
6162}
6163#[doc = "Insert vector element from another vector element"]
6164#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s16)"]
6165#[inline]
6166#[target_feature(enable = "neon")]
6167#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6168#[rustc_legacy_const_generics(1, 3)]
6169#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6170pub fn vcopyq_laneq_s16<const LANE1: i32, const LANE2: i32>(
6171    a: int16x8_t,
6172    b: int16x8_t,
6173) -> int16x8_t {
6174    static_assert_uimm_bits!(LANE1, 3);
6175    static_assert_uimm_bits!(LANE2, 3);
6176    unsafe {
6177        match LANE1 & 0b111 {
6178            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
6179            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
6180            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
6181            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
6182            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
6183            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
6184            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
6185            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
6186            _ => unreachable_unchecked(),
6187        }
6188    }
6189}
6190#[doc = "Insert vector element from another vector element"]
6191#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s32)"]
6192#[inline]
6193#[target_feature(enable = "neon")]
6194#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6195#[rustc_legacy_const_generics(1, 3)]
6196#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6197pub fn vcopyq_laneq_s32<const LANE1: i32, const LANE2: i32>(
6198    a: int32x4_t,
6199    b: int32x4_t,
6200) -> int32x4_t {
6201    static_assert_uimm_bits!(LANE1, 2);
6202    static_assert_uimm_bits!(LANE2, 2);
6203    unsafe {
6204        match LANE1 & 0b11 {
6205            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
6206            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
6207            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
6208            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
6209            _ => unreachable_unchecked(),
6210        }
6211    }
6212}
6213#[doc = "Insert vector element from another vector element"]
6214#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s64)"]
6215#[inline]
6216#[target_feature(enable = "neon")]
6217#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6218#[rustc_legacy_const_generics(1, 3)]
6219#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6220pub fn vcopyq_laneq_s64<const LANE1: i32, const LANE2: i32>(
6221    a: int64x2_t,
6222    b: int64x2_t,
6223) -> int64x2_t {
6224    static_assert_uimm_bits!(LANE1, 1);
6225    static_assert_uimm_bits!(LANE2, 1);
6226    unsafe {
6227        match LANE1 & 0b1 {
6228            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
6229            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
6230            _ => unreachable_unchecked(),
6231        }
6232    }
6233}
6234#[doc = "Insert vector element from another vector element"]
6235#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u8)"]
6236#[inline]
6237#[target_feature(enable = "neon")]
6238#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6239#[rustc_legacy_const_generics(1, 3)]
6240#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6241pub fn vcopyq_laneq_u8<const LANE1: i32, const LANE2: i32>(
6242    a: uint8x16_t,
6243    b: uint8x16_t,
6244) -> uint8x16_t {
6245    static_assert_uimm_bits!(LANE1, 4);
6246    static_assert_uimm_bits!(LANE2, 4);
6247    unsafe {
6248        match LANE1 & 0b1111 {
6249            0 => simd_shuffle!(
6250                a,
6251                b,
6252                [
6253                    16 + LANE2 as u32,
6254                    1,
6255                    2,
6256                    3,
6257                    4,
6258                    5,
6259                    6,
6260                    7,
6261                    8,
6262                    9,
6263                    10,
6264                    11,
6265                    12,
6266                    13,
6267                    14,
6268                    15
6269                ]
6270            ),
6271            1 => simd_shuffle!(
6272                a,
6273                b,
6274                [
6275                    0,
6276                    16 + LANE2 as u32,
6277                    2,
6278                    3,
6279                    4,
6280                    5,
6281                    6,
6282                    7,
6283                    8,
6284                    9,
6285                    10,
6286                    11,
6287                    12,
6288                    13,
6289                    14,
6290                    15
6291                ]
6292            ),
6293            2 => simd_shuffle!(
6294                a,
6295                b,
6296                [
6297                    0,
6298                    1,
6299                    16 + LANE2 as u32,
6300                    3,
6301                    4,
6302                    5,
6303                    6,
6304                    7,
6305                    8,
6306                    9,
6307                    10,
6308                    11,
6309                    12,
6310                    13,
6311                    14,
6312                    15
6313                ]
6314            ),
6315            3 => simd_shuffle!(
6316                a,
6317                b,
6318                [
6319                    0,
6320                    1,
6321                    2,
6322                    16 + LANE2 as u32,
6323                    4,
6324                    5,
6325                    6,
6326                    7,
6327                    8,
6328                    9,
6329                    10,
6330                    11,
6331                    12,
6332                    13,
6333                    14,
6334                    15
6335                ]
6336            ),
6337            4 => simd_shuffle!(
6338                a,
6339                b,
6340                [
6341                    0,
6342                    1,
6343                    2,
6344                    3,
6345                    16 + LANE2 as u32,
6346                    5,
6347                    6,
6348                    7,
6349                    8,
6350                    9,
6351                    10,
6352                    11,
6353                    12,
6354                    13,
6355                    14,
6356                    15
6357                ]
6358            ),
6359            5 => simd_shuffle!(
6360                a,
6361                b,
6362                [
6363                    0,
6364                    1,
6365                    2,
6366                    3,
6367                    4,
6368                    16 + LANE2 as u32,
6369                    6,
6370                    7,
6371                    8,
6372                    9,
6373                    10,
6374                    11,
6375                    12,
6376                    13,
6377                    14,
6378                    15
6379                ]
6380            ),
6381            6 => simd_shuffle!(
6382                a,
6383                b,
6384                [
6385                    0,
6386                    1,
6387                    2,
6388                    3,
6389                    4,
6390                    5,
6391                    16 + LANE2 as u32,
6392                    7,
6393                    8,
6394                    9,
6395                    10,
6396                    11,
6397                    12,
6398                    13,
6399                    14,
6400                    15
6401                ]
6402            ),
6403            7 => simd_shuffle!(
6404                a,
6405                b,
6406                [
6407                    0,
6408                    1,
6409                    2,
6410                    3,
6411                    4,
6412                    5,
6413                    6,
6414                    16 + LANE2 as u32,
6415                    8,
6416                    9,
6417                    10,
6418                    11,
6419                    12,
6420                    13,
6421                    14,
6422                    15
6423                ]
6424            ),
6425            8 => simd_shuffle!(
6426                a,
6427                b,
6428                [
6429                    0,
6430                    1,
6431                    2,
6432                    3,
6433                    4,
6434                    5,
6435                    6,
6436                    7,
6437                    16 + LANE2 as u32,
6438                    9,
6439                    10,
6440                    11,
6441                    12,
6442                    13,
6443                    14,
6444                    15
6445                ]
6446            ),
6447            9 => simd_shuffle!(
6448                a,
6449                b,
6450                [
6451                    0,
6452                    1,
6453                    2,
6454                    3,
6455                    4,
6456                    5,
6457                    6,
6458                    7,
6459                    8,
6460                    16 + LANE2 as u32,
6461                    10,
6462                    11,
6463                    12,
6464                    13,
6465                    14,
6466                    15
6467                ]
6468            ),
6469            10 => simd_shuffle!(
6470                a,
6471                b,
6472                [
6473                    0,
6474                    1,
6475                    2,
6476                    3,
6477                    4,
6478                    5,
6479                    6,
6480                    7,
6481                    8,
6482                    9,
6483                    16 + LANE2 as u32,
6484                    11,
6485                    12,
6486                    13,
6487                    14,
6488                    15
6489                ]
6490            ),
6491            11 => simd_shuffle!(
6492                a,
6493                b,
6494                [
6495                    0,
6496                    1,
6497                    2,
6498                    3,
6499                    4,
6500                    5,
6501                    6,
6502                    7,
6503                    8,
6504                    9,
6505                    10,
6506                    16 + LANE2 as u32,
6507                    12,
6508                    13,
6509                    14,
6510                    15
6511                ]
6512            ),
6513            12 => simd_shuffle!(
6514                a,
6515                b,
6516                [
6517                    0,
6518                    1,
6519                    2,
6520                    3,
6521                    4,
6522                    5,
6523                    6,
6524                    7,
6525                    8,
6526                    9,
6527                    10,
6528                    11,
6529                    16 + LANE2 as u32,
6530                    13,
6531                    14,
6532                    15
6533                ]
6534            ),
6535            13 => simd_shuffle!(
6536                a,
6537                b,
6538                [
6539                    0,
6540                    1,
6541                    2,
6542                    3,
6543                    4,
6544                    5,
6545                    6,
6546                    7,
6547                    8,
6548                    9,
6549                    10,
6550                    11,
6551                    12,
6552                    16 + LANE2 as u32,
6553                    14,
6554                    15
6555                ]
6556            ),
6557            14 => simd_shuffle!(
6558                a,
6559                b,
6560                [
6561                    0,
6562                    1,
6563                    2,
6564                    3,
6565                    4,
6566                    5,
6567                    6,
6568                    7,
6569                    8,
6570                    9,
6571                    10,
6572                    11,
6573                    12,
6574                    13,
6575                    16 + LANE2 as u32,
6576                    15
6577                ]
6578            ),
6579            15 => simd_shuffle!(
6580                a,
6581                b,
6582                [
6583                    0,
6584                    1,
6585                    2,
6586                    3,
6587                    4,
6588                    5,
6589                    6,
6590                    7,
6591                    8,
6592                    9,
6593                    10,
6594                    11,
6595                    12,
6596                    13,
6597                    14,
6598                    16 + LANE2 as u32
6599                ]
6600            ),
6601            _ => unreachable_unchecked(),
6602        }
6603    }
6604}
6605#[doc = "Insert vector element from another vector element"]
6606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u16)"]
6607#[inline]
6608#[target_feature(enable = "neon")]
6609#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6610#[rustc_legacy_const_generics(1, 3)]
6611#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6612pub fn vcopyq_laneq_u16<const LANE1: i32, const LANE2: i32>(
6613    a: uint16x8_t,
6614    b: uint16x8_t,
6615) -> uint16x8_t {
6616    static_assert_uimm_bits!(LANE1, 3);
6617    static_assert_uimm_bits!(LANE2, 3);
6618    unsafe {
6619        match LANE1 & 0b111 {
6620            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
6621            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
6622            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
6623            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
6624            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
6625            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
6626            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
6627            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
6628            _ => unreachable_unchecked(),
6629        }
6630    }
6631}
6632#[doc = "Insert vector element from another vector element"]
6633#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u32)"]
6634#[inline]
6635#[target_feature(enable = "neon")]
6636#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6637#[rustc_legacy_const_generics(1, 3)]
6638#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6639pub fn vcopyq_laneq_u32<const LANE1: i32, const LANE2: i32>(
6640    a: uint32x4_t,
6641    b: uint32x4_t,
6642) -> uint32x4_t {
6643    static_assert_uimm_bits!(LANE1, 2);
6644    static_assert_uimm_bits!(LANE2, 2);
6645    unsafe {
6646        match LANE1 & 0b11 {
6647            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
6648            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
6649            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
6650            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
6651            _ => unreachable_unchecked(),
6652        }
6653    }
6654}
6655#[doc = "Insert vector element from another vector element"]
6656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u64)"]
6657#[inline]
6658#[target_feature(enable = "neon")]
6659#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6660#[rustc_legacy_const_generics(1, 3)]
6661#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6662pub fn vcopyq_laneq_u64<const LANE1: i32, const LANE2: i32>(
6663    a: uint64x2_t,
6664    b: uint64x2_t,
6665) -> uint64x2_t {
6666    static_assert_uimm_bits!(LANE1, 1);
6667    static_assert_uimm_bits!(LANE2, 1);
6668    unsafe {
6669        match LANE1 & 0b1 {
6670            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
6671            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
6672            _ => unreachable_unchecked(),
6673        }
6674    }
6675}
6676#[doc = "Insert vector element from another vector element"]
6677#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p8)"]
6678#[inline]
6679#[target_feature(enable = "neon")]
6680#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
6681#[rustc_legacy_const_generics(1, 3)]
6682#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6683pub fn vcopyq_laneq_p8<const LANE1: i32, const LANE2: i32>(
6684    a: poly8x16_t,
6685    b: poly8x16_t,
6686) -> poly8x16_t {
6687    static_assert_uimm_bits!(LANE1, 4);
6688    static_assert_uimm_bits!(LANE2, 4);
6689    unsafe {
6690        match LANE1 & 0b1111 {
6691            0 => simd_shuffle!(
6692                a,
6693                b,
6694                [
6695                    16 + LANE2 as u32,
6696                    1,
6697                    2,
6698                    3,
6699                    4,
6700                    5,
6701                    6,
6702                    7,
6703                    8,
6704                    9,
6705                    10,
6706                    11,
6707                    12,
6708                    13,
6709                    14,
6710                    15
6711                ]
6712            ),
6713            1 => simd_shuffle!(
6714                a,
6715                b,
6716                [
6717                    0,
6718                    16 + LANE2 as u32,
6719                    2,
6720                    3,
6721                    4,
6722                    5,
6723                    6,
6724                    7,
6725                    8,
6726                    9,
6727                    10,
6728                    11,
6729                    12,
6730                    13,
6731                    14,
6732                    15
6733                ]
6734            ),
6735            2 => simd_shuffle!(
6736                a,
6737                b,
6738                [
6739                    0,
6740                    1,
6741                    16 + LANE2 as u32,
6742                    3,
6743                    4,
6744                    5,
6745                    6,
6746                    7,
6747                    8,
6748                    9,
6749                    10,
6750                    11,
6751                    12,
6752                    13,
6753                    14,
6754                    15
6755                ]
6756            ),
6757            3 => simd_shuffle!(
6758                a,
6759                b,
6760                [
6761                    0,
6762                    1,
6763                    2,
6764                    16 + LANE2 as u32,
6765                    4,
6766                    5,
6767                    6,
6768                    7,
6769                    8,
6770                    9,
6771                    10,
6772                    11,
6773                    12,
6774                    13,
6775                    14,
6776                    15
6777                ]
6778            ),
6779            4 => simd_shuffle!(
6780                a,
6781                b,
6782                [
6783                    0,
6784                    1,
6785                    2,
6786                    3,
6787                    16 + LANE2 as u32,
6788                    5,
6789                    6,
6790                    7,
6791                    8,
6792                    9,
6793                    10,
6794                    11,
6795                    12,
6796                    13,
6797                    14,
6798                    15
6799                ]
6800            ),
6801            5 => simd_shuffle!(
6802                a,
6803                b,
6804                [
6805                    0,
6806                    1,
6807                    2,
6808                    3,
6809                    4,
6810                    16 + LANE2 as u32,
6811                    6,
6812                    7,
6813                    8,
6814                    9,
6815                    10,
6816                    11,
6817                    12,
6818                    13,
6819                    14,
6820                    15
6821                ]
6822            ),
6823            6 => simd_shuffle!(
6824                a,
6825                b,
6826                [
6827                    0,
6828                    1,
6829                    2,
6830                    3,
6831                    4,
6832                    5,
6833                    16 + LANE2 as u32,
6834                    7,
6835                    8,
6836                    9,
6837                    10,
6838                    11,
6839                    12,
6840                    13,
6841                    14,
6842                    15
6843                ]
6844            ),
6845            7 => simd_shuffle!(
6846                a,
6847                b,
6848                [
6849                    0,
6850                    1,
6851                    2,
6852                    3,
6853                    4,
6854                    5,
6855                    6,
6856                    16 + LANE2 as u32,
6857                    8,
6858                    9,
6859                    10,
6860                    11,
6861                    12,
6862                    13,
6863                    14,
6864                    15
6865                ]
6866            ),
6867            8 => simd_shuffle!(
6868                a,
6869                b,
6870                [
6871                    0,
6872                    1,
6873                    2,
6874                    3,
6875                    4,
6876                    5,
6877                    6,
6878                    7,
6879                    16 + LANE2 as u32,
6880                    9,
6881                    10,
6882                    11,
6883                    12,
6884                    13,
6885                    14,
6886                    15
6887                ]
6888            ),
6889            9 => simd_shuffle!(
6890                a,
6891                b,
6892                [
6893                    0,
6894                    1,
6895                    2,
6896                    3,
6897                    4,
6898                    5,
6899                    6,
6900                    7,
6901                    8,
6902                    16 + LANE2 as u32,
6903                    10,
6904                    11,
6905                    12,
6906                    13,
6907                    14,
6908                    15
6909                ]
6910            ),
6911            10 => simd_shuffle!(
6912                a,
6913                b,
6914                [
6915                    0,
6916                    1,
6917                    2,
6918                    3,
6919                    4,
6920                    5,
6921                    6,
6922                    7,
6923                    8,
6924                    9,
6925                    16 + LANE2 as u32,
6926                    11,
6927                    12,
6928                    13,
6929                    14,
6930                    15
6931                ]
6932            ),
6933            11 => simd_shuffle!(
6934                a,
6935                b,
6936                [
6937                    0,
6938                    1,
6939                    2,
6940                    3,
6941                    4,
6942                    5,
6943                    6,
6944                    7,
6945                    8,
6946                    9,
6947                    10,
6948                    16 + LANE2 as u32,
6949                    12,
6950                    13,
6951                    14,
6952                    15
6953                ]
6954            ),
6955            12 => simd_shuffle!(
6956                a,
6957                b,
6958                [
6959                    0,
6960                    1,
6961                    2,
6962                    3,
6963                    4,
6964                    5,
6965                    6,
6966                    7,
6967                    8,
6968                    9,
6969                    10,
6970                    11,
6971                    16 + LANE2 as u32,
6972                    13,
6973                    14,
6974                    15
6975                ]
6976            ),
6977            13 => simd_shuffle!(
6978                a,
6979                b,
6980                [
6981                    0,
6982                    1,
6983                    2,
6984                    3,
6985                    4,
6986                    5,
6987                    6,
6988                    7,
6989                    8,
6990                    9,
6991                    10,
6992                    11,
6993                    12,
6994                    16 + LANE2 as u32,
6995                    14,
6996                    15
6997                ]
6998            ),
6999            14 => simd_shuffle!(
7000                a,
7001                b,
7002                [
7003                    0,
7004                    1,
7005                    2,
7006                    3,
7007                    4,
7008                    5,
7009                    6,
7010                    7,
7011                    8,
7012                    9,
7013                    10,
7014                    11,
7015                    12,
7016                    13,
7017                    16 + LANE2 as u32,
7018                    15
7019                ]
7020            ),
7021            15 => simd_shuffle!(
7022                a,
7023                b,
7024                [
7025                    0,
7026                    1,
7027                    2,
7028                    3,
7029                    4,
7030                    5,
7031                    6,
7032                    7,
7033                    8,
7034                    9,
7035                    10,
7036                    11,
7037                    12,
7038                    13,
7039                    14,
7040                    16 + LANE2 as u32
7041                ]
7042            ),
7043            _ => unreachable_unchecked(),
7044        }
7045    }
7046}
7047#[doc = "Insert vector element from another vector element"]
7048#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p16)"]
7049#[inline]
7050#[target_feature(enable = "neon")]
7051#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
7052#[rustc_legacy_const_generics(1, 3)]
7053#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7054pub fn vcopyq_laneq_p16<const LANE1: i32, const LANE2: i32>(
7055    a: poly16x8_t,
7056    b: poly16x8_t,
7057) -> poly16x8_t {
7058    static_assert_uimm_bits!(LANE1, 3);
7059    static_assert_uimm_bits!(LANE2, 3);
7060    unsafe {
7061        match LANE1 & 0b111 {
7062            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
7063            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
7064            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
7065            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
7066            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
7067            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
7068            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
7069            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
7070            _ => unreachable_unchecked(),
7071        }
7072    }
7073}
7074#[doc = "Insert vector element from another vector element"]
7075#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p64)"]
7076#[inline]
7077#[target_feature(enable = "neon")]
7078#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 1))]
7079#[rustc_legacy_const_generics(1, 3)]
7080#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7081pub fn vcopyq_laneq_p64<const LANE1: i32, const LANE2: i32>(
7082    a: poly64x2_t,
7083    b: poly64x2_t,
7084) -> poly64x2_t {
7085    static_assert_uimm_bits!(LANE1, 1);
7086    static_assert_uimm_bits!(LANE2, 1);
7087    unsafe {
7088        match LANE1 & 0b1 {
7089            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
7090            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
7091            _ => unreachable_unchecked(),
7092        }
7093    }
7094}
7095#[doc = "Insert vector element from another vector element"]
7096#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f64)"]
7097#[inline]
7098#[target_feature(enable = "neon")]
7099#[cfg_attr(test, assert_instr(nop))]
7100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7101pub fn vcreate_f64(a: u64) -> float64x1_t {
7102    unsafe { transmute(a) }
7103}
7104#[doc = "Floating-point convert"]
7105#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_f64)"]
7106#[inline]
7107#[target_feature(enable = "neon")]
7108#[cfg_attr(test, assert_instr(fcvtn))]
7109#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7110pub fn vcvt_f32_f64(a: float64x2_t) -> float32x2_t {
7111    unsafe { simd_cast(a) }
7112}
7113#[doc = "Floating-point convert to higher precision long"]
7114#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_f32)"]
7115#[inline]
7116#[target_feature(enable = "neon")]
7117#[cfg_attr(test, assert_instr(fcvtl))]
7118#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7119pub fn vcvt_f64_f32(a: float32x2_t) -> float64x2_t {
7120    unsafe { simd_cast(a) }
7121}
7122#[doc = "Fixed-point convert to floating-point"]
7123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_s64)"]
7124#[inline]
7125#[target_feature(enable = "neon")]
7126#[cfg_attr(test, assert_instr(scvtf))]
7127#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7128pub fn vcvt_f64_s64(a: int64x1_t) -> float64x1_t {
7129    unsafe { simd_cast(a) }
7130}
7131#[doc = "Fixed-point convert to floating-point"]
7132#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_s64)"]
7133#[inline]
7134#[target_feature(enable = "neon")]
7135#[cfg_attr(test, assert_instr(scvtf))]
7136#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7137pub fn vcvtq_f64_s64(a: int64x2_t) -> float64x2_t {
7138    unsafe { simd_cast(a) }
7139}
7140#[doc = "Fixed-point convert to floating-point"]
7141#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_u64)"]
7142#[inline]
7143#[target_feature(enable = "neon")]
7144#[cfg_attr(test, assert_instr(ucvtf))]
7145#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7146pub fn vcvt_f64_u64(a: uint64x1_t) -> float64x1_t {
7147    unsafe { simd_cast(a) }
7148}
7149#[doc = "Fixed-point convert to floating-point"]
7150#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_u64)"]
7151#[inline]
7152#[target_feature(enable = "neon")]
7153#[cfg_attr(test, assert_instr(ucvtf))]
7154#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7155pub fn vcvtq_f64_u64(a: uint64x2_t) -> float64x2_t {
7156    unsafe { simd_cast(a) }
7157}
7158#[doc = "Floating-point convert to lower precision"]
7159#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f16_f32)"]
7160#[inline]
7161#[cfg_attr(test, assert_instr(fcvtn2))]
7162#[target_feature(enable = "neon,fp16")]
7163#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7164pub fn vcvt_high_f16_f32(a: float16x4_t, b: float32x4_t) -> float16x8_t {
7165    vcombine_f16(a, vcvt_f16_f32(b))
7166}
7167#[doc = "Floating-point convert to higher precision"]
7168#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f16)"]
7169#[inline]
7170#[cfg_attr(test, assert_instr(fcvtl2))]
7171#[target_feature(enable = "neon,fp16")]
7172#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7173pub fn vcvt_high_f32_f16(a: float16x8_t) -> float32x4_t {
7174    vcvt_f32_f16(vget_high_f16(a))
7175}
7176#[doc = "Floating-point convert to lower precision narrow"]
7177#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f64)"]
7178#[inline]
7179#[target_feature(enable = "neon")]
7180#[cfg_attr(test, assert_instr(fcvtn2))]
7181#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7182pub fn vcvt_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t {
7183    unsafe { simd_shuffle!(a, simd_cast(b), [0, 1, 2, 3]) }
7184}
7185#[doc = "Floating-point convert to higher precision long"]
7186#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f64_f32)"]
7187#[inline]
7188#[target_feature(enable = "neon")]
7189#[cfg_attr(test, assert_instr(fcvtl2))]
7190#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7191pub fn vcvt_high_f64_f32(a: float32x4_t) -> float64x2_t {
7192    unsafe {
7193        let b: float32x2_t = simd_shuffle!(a, a, [2, 3]);
7194        simd_cast(b)
7195    }
7196}
7197#[doc = "Fixed-point convert to floating-point"]
7198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_s64)"]
7199#[inline]
7200#[target_feature(enable = "neon")]
7201#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7202#[rustc_legacy_const_generics(1)]
7203#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7204pub fn vcvt_n_f64_s64<const N: i32>(a: int64x1_t) -> float64x1_t {
7205    static_assert!(N >= 1 && N <= 64);
7206    unsafe extern "unadjusted" {
7207        #[cfg_attr(
7208            any(target_arch = "aarch64", target_arch = "arm64ec"),
7209            link_name = "llvm.aarch64.neon.vcvtfxs2fp.v1f64.v1i64"
7210        )]
7211        fn _vcvt_n_f64_s64(a: int64x1_t, n: i32) -> float64x1_t;
7212    }
7213    unsafe { _vcvt_n_f64_s64(a, N) }
7214}
7215#[doc = "Fixed-point convert to floating-point"]
7216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_s64)"]
7217#[inline]
7218#[target_feature(enable = "neon")]
7219#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7220#[rustc_legacy_const_generics(1)]
7221#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7222pub fn vcvtq_n_f64_s64<const N: i32>(a: int64x2_t) -> float64x2_t {
7223    static_assert!(N >= 1 && N <= 64);
7224    unsafe extern "unadjusted" {
7225        #[cfg_attr(
7226            any(target_arch = "aarch64", target_arch = "arm64ec"),
7227            link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64"
7228        )]
7229        fn _vcvtq_n_f64_s64(a: int64x2_t, n: i32) -> float64x2_t;
7230    }
7231    unsafe { _vcvtq_n_f64_s64(a, N) }
7232}
7233#[doc = "Fixed-point convert to floating-point"]
7234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_u64)"]
7235#[inline]
7236#[target_feature(enable = "neon")]
7237#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7238#[rustc_legacy_const_generics(1)]
7239#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7240pub fn vcvt_n_f64_u64<const N: i32>(a: uint64x1_t) -> float64x1_t {
7241    static_assert!(N >= 1 && N <= 64);
7242    unsafe extern "unadjusted" {
7243        #[cfg_attr(
7244            any(target_arch = "aarch64", target_arch = "arm64ec"),
7245            link_name = "llvm.aarch64.neon.vcvtfxu2fp.v1f64.v1i64"
7246        )]
7247        fn _vcvt_n_f64_u64(a: uint64x1_t, n: i32) -> float64x1_t;
7248    }
7249    unsafe { _vcvt_n_f64_u64(a, N) }
7250}
7251#[doc = "Fixed-point convert to floating-point"]
7252#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_u64)"]
7253#[inline]
7254#[target_feature(enable = "neon")]
7255#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7256#[rustc_legacy_const_generics(1)]
7257#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7258pub fn vcvtq_n_f64_u64<const N: i32>(a: uint64x2_t) -> float64x2_t {
7259    static_assert!(N >= 1 && N <= 64);
7260    unsafe extern "unadjusted" {
7261        #[cfg_attr(
7262            any(target_arch = "aarch64", target_arch = "arm64ec"),
7263            link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64"
7264        )]
7265        fn _vcvtq_n_f64_u64(a: uint64x2_t, n: i32) -> float64x2_t;
7266    }
7267    unsafe { _vcvtq_n_f64_u64(a, N) }
7268}
7269#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7270#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s64_f64)"]
7271#[inline]
7272#[target_feature(enable = "neon")]
7273#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
7274#[rustc_legacy_const_generics(1)]
7275#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7276pub fn vcvt_n_s64_f64<const N: i32>(a: float64x1_t) -> int64x1_t {
7277    static_assert!(N >= 1 && N <= 64);
7278    unsafe extern "unadjusted" {
7279        #[cfg_attr(
7280            any(target_arch = "aarch64", target_arch = "arm64ec"),
7281            link_name = "llvm.aarch64.neon.vcvtfp2fxs.v1i64.v1f64"
7282        )]
7283        fn _vcvt_n_s64_f64(a: float64x1_t, n: i32) -> int64x1_t;
7284    }
7285    unsafe { _vcvt_n_s64_f64(a, N) }
7286}
7287#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7288#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s64_f64)"]
7289#[inline]
7290#[target_feature(enable = "neon")]
7291#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
7292#[rustc_legacy_const_generics(1)]
7293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7294pub fn vcvtq_n_s64_f64<const N: i32>(a: float64x2_t) -> int64x2_t {
7295    static_assert!(N >= 1 && N <= 64);
7296    unsafe extern "unadjusted" {
7297        #[cfg_attr(
7298            any(target_arch = "aarch64", target_arch = "arm64ec"),
7299            link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i64.v2f64"
7300        )]
7301        fn _vcvtq_n_s64_f64(a: float64x2_t, n: i32) -> int64x2_t;
7302    }
7303    unsafe { _vcvtq_n_s64_f64(a, N) }
7304}
7305#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u64_f64)"]
7307#[inline]
7308#[target_feature(enable = "neon")]
7309#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
7310#[rustc_legacy_const_generics(1)]
7311#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7312pub fn vcvt_n_u64_f64<const N: i32>(a: float64x1_t) -> uint64x1_t {
7313    static_assert!(N >= 1 && N <= 64);
7314    unsafe extern "unadjusted" {
7315        #[cfg_attr(
7316            any(target_arch = "aarch64", target_arch = "arm64ec"),
7317            link_name = "llvm.aarch64.neon.vcvtfp2fxu.v1i64.v1f64"
7318        )]
7319        fn _vcvt_n_u64_f64(a: float64x1_t, n: i32) -> uint64x1_t;
7320    }
7321    unsafe { _vcvt_n_u64_f64(a, N) }
7322}
7323#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7324#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u64_f64)"]
7325#[inline]
7326#[target_feature(enable = "neon")]
7327#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
7328#[rustc_legacy_const_generics(1)]
7329#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7330pub fn vcvtq_n_u64_f64<const N: i32>(a: float64x2_t) -> uint64x2_t {
7331    static_assert!(N >= 1 && N <= 64);
7332    unsafe extern "unadjusted" {
7333        #[cfg_attr(
7334            any(target_arch = "aarch64", target_arch = "arm64ec"),
7335            link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i64.v2f64"
7336        )]
7337        fn _vcvtq_n_u64_f64(a: float64x2_t, n: i32) -> uint64x2_t;
7338    }
7339    unsafe { _vcvtq_n_u64_f64(a, N) }
7340}
7341#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"]
7342#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s64_f64)"]
7343#[inline]
7344#[target_feature(enable = "neon")]
7345#[cfg_attr(test, assert_instr(fcvtzs))]
7346#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7347pub fn vcvt_s64_f64(a: float64x1_t) -> int64x1_t {
7348    unsafe extern "unadjusted" {
7349        #[cfg_attr(
7350            any(target_arch = "aarch64", target_arch = "arm64ec"),
7351            link_name = "llvm.fptosi.sat.v1i64.v1f64"
7352        )]
7353        fn _vcvt_s64_f64(a: float64x1_t) -> int64x1_t;
7354    }
7355    unsafe { _vcvt_s64_f64(a) }
7356}
7357#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"]
7358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s64_f64)"]
7359#[inline]
7360#[target_feature(enable = "neon")]
7361#[cfg_attr(test, assert_instr(fcvtzs))]
7362#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7363pub fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t {
7364    unsafe extern "unadjusted" {
7365        #[cfg_attr(
7366            any(target_arch = "aarch64", target_arch = "arm64ec"),
7367            link_name = "llvm.fptosi.sat.v2i64.v2f64"
7368        )]
7369        fn _vcvtq_s64_f64(a: float64x2_t) -> int64x2_t;
7370    }
7371    unsafe { _vcvtq_s64_f64(a) }
7372}
7373#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"]
7374#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u64_f64)"]
7375#[inline]
7376#[target_feature(enable = "neon")]
7377#[cfg_attr(test, assert_instr(fcvtzu))]
7378#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7379pub fn vcvt_u64_f64(a: float64x1_t) -> uint64x1_t {
7380    unsafe extern "unadjusted" {
7381        #[cfg_attr(
7382            any(target_arch = "aarch64", target_arch = "arm64ec"),
7383            link_name = "llvm.fptoui.sat.v1i64.v1f64"
7384        )]
7385        fn _vcvt_u64_f64(a: float64x1_t) -> uint64x1_t;
7386    }
7387    unsafe { _vcvt_u64_f64(a) }
7388}
7389#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"]
7390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u64_f64)"]
7391#[inline]
7392#[target_feature(enable = "neon")]
7393#[cfg_attr(test, assert_instr(fcvtzu))]
7394#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7395pub fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t {
7396    unsafe extern "unadjusted" {
7397        #[cfg_attr(
7398            any(target_arch = "aarch64", target_arch = "arm64ec"),
7399            link_name = "llvm.fptoui.sat.v2i64.v2f64"
7400        )]
7401        fn _vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t;
7402    }
7403    unsafe { _vcvtq_u64_f64(a) }
7404}
7405#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7406#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s16_f16)"]
7407#[inline]
7408#[cfg_attr(test, assert_instr(fcvtas))]
7409#[target_feature(enable = "neon,fp16")]
7410#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7411pub fn vcvta_s16_f16(a: float16x4_t) -> int16x4_t {
7412    unsafe extern "unadjusted" {
7413        #[cfg_attr(
7414            any(target_arch = "aarch64", target_arch = "arm64ec"),
7415            link_name = "llvm.aarch64.neon.fcvtas.v4i16.v4f16"
7416        )]
7417        fn _vcvta_s16_f16(a: float16x4_t) -> int16x4_t;
7418    }
7419    unsafe { _vcvta_s16_f16(a) }
7420}
7421#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s16_f16)"]
7423#[inline]
7424#[cfg_attr(test, assert_instr(fcvtas))]
7425#[target_feature(enable = "neon,fp16")]
7426#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7427pub fn vcvtaq_s16_f16(a: float16x8_t) -> int16x8_t {
7428    unsafe extern "unadjusted" {
7429        #[cfg_attr(
7430            any(target_arch = "aarch64", target_arch = "arm64ec"),
7431            link_name = "llvm.aarch64.neon.fcvtas.v8i16.v8f16"
7432        )]
7433        fn _vcvtaq_s16_f16(a: float16x8_t) -> int16x8_t;
7434    }
7435    unsafe { _vcvtaq_s16_f16(a) }
7436}
7437#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7438#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s32_f32)"]
7439#[inline]
7440#[target_feature(enable = "neon")]
7441#[cfg_attr(test, assert_instr(fcvtas))]
7442#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7443pub fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t {
7444    unsafe extern "unadjusted" {
7445        #[cfg_attr(
7446            any(target_arch = "aarch64", target_arch = "arm64ec"),
7447            link_name = "llvm.aarch64.neon.fcvtas.v2i32.v2f32"
7448        )]
7449        fn _vcvta_s32_f32(a: float32x2_t) -> int32x2_t;
7450    }
7451    unsafe { _vcvta_s32_f32(a) }
7452}
7453#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s32_f32)"]
7455#[inline]
7456#[target_feature(enable = "neon")]
7457#[cfg_attr(test, assert_instr(fcvtas))]
7458#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7459pub fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t {
7460    unsafe extern "unadjusted" {
7461        #[cfg_attr(
7462            any(target_arch = "aarch64", target_arch = "arm64ec"),
7463            link_name = "llvm.aarch64.neon.fcvtas.v4i32.v4f32"
7464        )]
7465        fn _vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t;
7466    }
7467    unsafe { _vcvtaq_s32_f32(a) }
7468}
7469#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s64_f64)"]
7471#[inline]
7472#[target_feature(enable = "neon")]
7473#[cfg_attr(test, assert_instr(fcvtas))]
7474#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7475pub fn vcvta_s64_f64(a: float64x1_t) -> int64x1_t {
7476    unsafe extern "unadjusted" {
7477        #[cfg_attr(
7478            any(target_arch = "aarch64", target_arch = "arm64ec"),
7479            link_name = "llvm.aarch64.neon.fcvtas.v1i64.v1f64"
7480        )]
7481        fn _vcvta_s64_f64(a: float64x1_t) -> int64x1_t;
7482    }
7483    unsafe { _vcvta_s64_f64(a) }
7484}
7485#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s64_f64)"]
7487#[inline]
7488#[target_feature(enable = "neon")]
7489#[cfg_attr(test, assert_instr(fcvtas))]
7490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7491pub fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t {
7492    unsafe extern "unadjusted" {
7493        #[cfg_attr(
7494            any(target_arch = "aarch64", target_arch = "arm64ec"),
7495            link_name = "llvm.aarch64.neon.fcvtas.v2i64.v2f64"
7496        )]
7497        fn _vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t;
7498    }
7499    unsafe { _vcvtaq_s64_f64(a) }
7500}
7501#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7502#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u16_f16)"]
7503#[inline]
7504#[cfg_attr(test, assert_instr(fcvtau))]
7505#[target_feature(enable = "neon,fp16")]
7506#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7507pub fn vcvta_u16_f16(a: float16x4_t) -> uint16x4_t {
7508    unsafe extern "unadjusted" {
7509        #[cfg_attr(
7510            any(target_arch = "aarch64", target_arch = "arm64ec"),
7511            link_name = "llvm.aarch64.neon.fcvtau.v4i16.v4f16"
7512        )]
7513        fn _vcvta_u16_f16(a: float16x4_t) -> uint16x4_t;
7514    }
7515    unsafe { _vcvta_u16_f16(a) }
7516}
7517#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7518#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u16_f16)"]
7519#[inline]
7520#[cfg_attr(test, assert_instr(fcvtau))]
7521#[target_feature(enable = "neon,fp16")]
7522#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7523pub fn vcvtaq_u16_f16(a: float16x8_t) -> uint16x8_t {
7524    unsafe extern "unadjusted" {
7525        #[cfg_attr(
7526            any(target_arch = "aarch64", target_arch = "arm64ec"),
7527            link_name = "llvm.aarch64.neon.fcvtau.v8i16.v8f16"
7528        )]
7529        fn _vcvtaq_u16_f16(a: float16x8_t) -> uint16x8_t;
7530    }
7531    unsafe { _vcvtaq_u16_f16(a) }
7532}
7533#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7534#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u32_f32)"]
7535#[inline]
7536#[target_feature(enable = "neon")]
7537#[cfg_attr(test, assert_instr(fcvtau))]
7538#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7539pub fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t {
7540    unsafe extern "unadjusted" {
7541        #[cfg_attr(
7542            any(target_arch = "aarch64", target_arch = "arm64ec"),
7543            link_name = "llvm.aarch64.neon.fcvtau.v2i32.v2f32"
7544        )]
7545        fn _vcvta_u32_f32(a: float32x2_t) -> uint32x2_t;
7546    }
7547    unsafe { _vcvta_u32_f32(a) }
7548}
7549#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7550#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u32_f32)"]
7551#[inline]
7552#[target_feature(enable = "neon")]
7553#[cfg_attr(test, assert_instr(fcvtau))]
7554#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7555pub fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t {
7556    unsafe extern "unadjusted" {
7557        #[cfg_attr(
7558            any(target_arch = "aarch64", target_arch = "arm64ec"),
7559            link_name = "llvm.aarch64.neon.fcvtau.v4i32.v4f32"
7560        )]
7561        fn _vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t;
7562    }
7563    unsafe { _vcvtaq_u32_f32(a) }
7564}
7565#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7566#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u64_f64)"]
7567#[inline]
7568#[target_feature(enable = "neon")]
7569#[cfg_attr(test, assert_instr(fcvtau))]
7570#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7571pub fn vcvta_u64_f64(a: float64x1_t) -> uint64x1_t {
7572    unsafe extern "unadjusted" {
7573        #[cfg_attr(
7574            any(target_arch = "aarch64", target_arch = "arm64ec"),
7575            link_name = "llvm.aarch64.neon.fcvtau.v1i64.v1f64"
7576        )]
7577        fn _vcvta_u64_f64(a: float64x1_t) -> uint64x1_t;
7578    }
7579    unsafe { _vcvta_u64_f64(a) }
7580}
7581#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7582#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u64_f64)"]
7583#[inline]
7584#[target_feature(enable = "neon")]
7585#[cfg_attr(test, assert_instr(fcvtau))]
7586#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7587pub fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t {
7588    unsafe extern "unadjusted" {
7589        #[cfg_attr(
7590            any(target_arch = "aarch64", target_arch = "arm64ec"),
7591            link_name = "llvm.aarch64.neon.fcvtau.v2i64.v2f64"
7592        )]
7593        fn _vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t;
7594    }
7595    unsafe { _vcvtaq_u64_f64(a) }
7596}
7597#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7598#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s16_f16)"]
7599#[inline]
7600#[cfg_attr(test, assert_instr(fcvtas))]
7601#[target_feature(enable = "neon,fp16")]
7602#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7603pub fn vcvtah_s16_f16(a: f16) -> i16 {
7604    vcvtah_s32_f16(a) as i16
7605}
7606#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7607#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s32_f16)"]
7608#[inline]
7609#[cfg_attr(test, assert_instr(fcvtas))]
7610#[target_feature(enable = "neon,fp16")]
7611#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7612pub fn vcvtah_s32_f16(a: f16) -> i32 {
7613    unsafe extern "unadjusted" {
7614        #[cfg_attr(
7615            any(target_arch = "aarch64", target_arch = "arm64ec"),
7616            link_name = "llvm.aarch64.neon.fcvtas.i32.f16"
7617        )]
7618        fn _vcvtah_s32_f16(a: f16) -> i32;
7619    }
7620    unsafe { _vcvtah_s32_f16(a) }
7621}
7622#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7623#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s64_f16)"]
7624#[inline]
7625#[cfg_attr(test, assert_instr(fcvtas))]
7626#[target_feature(enable = "neon,fp16")]
7627#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7628pub fn vcvtah_s64_f16(a: f16) -> i64 {
7629    unsafe extern "unadjusted" {
7630        #[cfg_attr(
7631            any(target_arch = "aarch64", target_arch = "arm64ec"),
7632            link_name = "llvm.aarch64.neon.fcvtas.i64.f16"
7633        )]
7634        fn _vcvtah_s64_f16(a: f16) -> i64;
7635    }
7636    unsafe { _vcvtah_s64_f16(a) }
7637}
7638#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7639#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u16_f16)"]
7640#[inline]
7641#[cfg_attr(test, assert_instr(fcvtau))]
7642#[target_feature(enable = "neon,fp16")]
7643#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7644pub fn vcvtah_u16_f16(a: f16) -> u16 {
7645    vcvtah_u32_f16(a) as u16
7646}
7647#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7648#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u32_f16)"]
7649#[inline]
7650#[cfg_attr(test, assert_instr(fcvtau))]
7651#[target_feature(enable = "neon,fp16")]
7652#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7653pub fn vcvtah_u32_f16(a: f16) -> u32 {
7654    unsafe extern "unadjusted" {
7655        #[cfg_attr(
7656            any(target_arch = "aarch64", target_arch = "arm64ec"),
7657            link_name = "llvm.aarch64.neon.fcvtau.i32.f16"
7658        )]
7659        fn _vcvtah_u32_f16(a: f16) -> u32;
7660    }
7661    unsafe { _vcvtah_u32_f16(a) }
7662}
7663#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7664#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u64_f16)"]
7665#[inline]
7666#[cfg_attr(test, assert_instr(fcvtau))]
7667#[target_feature(enable = "neon,fp16")]
7668#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7669pub fn vcvtah_u64_f16(a: f16) -> u64 {
7670    unsafe extern "unadjusted" {
7671        #[cfg_attr(
7672            any(target_arch = "aarch64", target_arch = "arm64ec"),
7673            link_name = "llvm.aarch64.neon.fcvtau.i64.f16"
7674        )]
7675        fn _vcvtah_u64_f16(a: f16) -> u64;
7676    }
7677    unsafe { _vcvtah_u64_f16(a) }
7678}
7679#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_s32_f32)"]
7681#[inline]
7682#[target_feature(enable = "neon")]
7683#[cfg_attr(test, assert_instr(fcvtas))]
7684#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7685pub fn vcvtas_s32_f32(a: f32) -> i32 {
7686    unsafe extern "unadjusted" {
7687        #[cfg_attr(
7688            any(target_arch = "aarch64", target_arch = "arm64ec"),
7689            link_name = "llvm.aarch64.neon.fcvtas.i32.f32"
7690        )]
7691        fn _vcvtas_s32_f32(a: f32) -> i32;
7692    }
7693    unsafe { _vcvtas_s32_f32(a) }
7694}
7695#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7696#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_s64_f64)"]
7697#[inline]
7698#[target_feature(enable = "neon")]
7699#[cfg_attr(test, assert_instr(fcvtas))]
7700#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7701pub fn vcvtad_s64_f64(a: f64) -> i64 {
7702    unsafe extern "unadjusted" {
7703        #[cfg_attr(
7704            any(target_arch = "aarch64", target_arch = "arm64ec"),
7705            link_name = "llvm.aarch64.neon.fcvtas.i64.f64"
7706        )]
7707        fn _vcvtad_s64_f64(a: f64) -> i64;
7708    }
7709    unsafe { _vcvtad_s64_f64(a) }
7710}
7711#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7712#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_u32_f32)"]
7713#[inline]
7714#[target_feature(enable = "neon")]
7715#[cfg_attr(test, assert_instr(fcvtau))]
7716#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7717pub fn vcvtas_u32_f32(a: f32) -> u32 {
7718    unsafe extern "unadjusted" {
7719        #[cfg_attr(
7720            any(target_arch = "aarch64", target_arch = "arm64ec"),
7721            link_name = "llvm.aarch64.neon.fcvtau.i32.f32"
7722        )]
7723        fn _vcvtas_u32_f32(a: f32) -> u32;
7724    }
7725    unsafe { _vcvtas_u32_f32(a) }
7726}
7727#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_u64_f64)"]
7729#[inline]
7730#[target_feature(enable = "neon")]
7731#[cfg_attr(test, assert_instr(fcvtau))]
7732#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7733pub fn vcvtad_u64_f64(a: f64) -> u64 {
7734    unsafe extern "unadjusted" {
7735        #[cfg_attr(
7736            any(target_arch = "aarch64", target_arch = "arm64ec"),
7737            link_name = "llvm.aarch64.neon.fcvtau.i64.f64"
7738        )]
7739        fn _vcvtad_u64_f64(a: f64) -> u64;
7740    }
7741    unsafe { _vcvtad_u64_f64(a) }
7742}
7743#[doc = "Fixed-point convert to floating-point"]
7744#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_s64)"]
7745#[inline]
7746#[target_feature(enable = "neon")]
7747#[cfg_attr(test, assert_instr(scvtf))]
7748#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7749pub fn vcvtd_f64_s64(a: i64) -> f64 {
7750    a as f64
7751}
7752#[doc = "Fixed-point convert to floating-point"]
7753#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_s32)"]
7754#[inline]
7755#[target_feature(enable = "neon")]
7756#[cfg_attr(test, assert_instr(scvtf))]
7757#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7758pub fn vcvts_f32_s32(a: i32) -> f32 {
7759    a as f32
7760}
7761#[doc = "Fixed-point convert to floating-point"]
7762#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s16)"]
7763#[inline]
7764#[cfg_attr(test, assert_instr(scvtf))]
7765#[target_feature(enable = "neon,fp16")]
7766#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7767pub fn vcvth_f16_s16(a: i16) -> f16 {
7768    a as f16
7769}
7770#[doc = "Fixed-point convert to floating-point"]
7771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s32)"]
7772#[inline]
7773#[cfg_attr(test, assert_instr(scvtf))]
7774#[target_feature(enable = "neon,fp16")]
7775#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7776pub fn vcvth_f16_s32(a: i32) -> f16 {
7777    a as f16
7778}
7779#[doc = "Fixed-point convert to floating-point"]
7780#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s64)"]
7781#[inline]
7782#[cfg_attr(test, assert_instr(scvtf))]
7783#[target_feature(enable = "neon,fp16")]
7784#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7785pub fn vcvth_f16_s64(a: i64) -> f16 {
7786    a as f16
7787}
7788#[doc = "Unsigned fixed-point convert to floating-point"]
7789#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u16)"]
7790#[inline]
7791#[cfg_attr(test, assert_instr(ucvtf))]
7792#[target_feature(enable = "neon,fp16")]
7793#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7794pub fn vcvth_f16_u16(a: u16) -> f16 {
7795    a as f16
7796}
7797#[doc = "Unsigned fixed-point convert to floating-point"]
7798#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u32)"]
7799#[inline]
7800#[cfg_attr(test, assert_instr(ucvtf))]
7801#[target_feature(enable = "neon,fp16")]
7802#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7803pub fn vcvth_f16_u32(a: u32) -> f16 {
7804    a as f16
7805}
7806#[doc = "Unsigned fixed-point convert to floating-point"]
7807#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u64)"]
7808#[inline]
7809#[cfg_attr(test, assert_instr(ucvtf))]
7810#[target_feature(enable = "neon,fp16")]
7811#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7812pub fn vcvth_f16_u64(a: u64) -> f16 {
7813    a as f16
7814}
7815#[doc = "Fixed-point convert to floating-point"]
7816#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s16)"]
7817#[inline]
7818#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7819#[rustc_legacy_const_generics(1)]
7820#[target_feature(enable = "neon,fp16")]
7821#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7822pub fn vcvth_n_f16_s16<const N: i32>(a: i16) -> f16 {
7823    static_assert!(N >= 1 && N <= 16);
7824    vcvth_n_f16_s32::<N>(a as i32)
7825}
7826#[doc = "Fixed-point convert to floating-point"]
7827#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s32)"]
7828#[inline]
7829#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7830#[rustc_legacy_const_generics(1)]
7831#[target_feature(enable = "neon,fp16")]
7832#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7833pub fn vcvth_n_f16_s32<const N: i32>(a: i32) -> f16 {
7834    static_assert!(N >= 1 && N <= 16);
7835    unsafe extern "unadjusted" {
7836        #[cfg_attr(
7837            any(target_arch = "aarch64", target_arch = "arm64ec"),
7838            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f16.i32"
7839        )]
7840        fn _vcvth_n_f16_s32(a: i32, n: i32) -> f16;
7841    }
7842    unsafe { _vcvth_n_f16_s32(a, N) }
7843}
7844#[doc = "Fixed-point convert to floating-point"]
7845#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s64)"]
7846#[inline]
7847#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7848#[rustc_legacy_const_generics(1)]
7849#[target_feature(enable = "neon,fp16")]
7850#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7851pub fn vcvth_n_f16_s64<const N: i32>(a: i64) -> f16 {
7852    static_assert!(N >= 1 && N <= 16);
7853    unsafe extern "unadjusted" {
7854        #[cfg_attr(
7855            any(target_arch = "aarch64", target_arch = "arm64ec"),
7856            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f16.i64"
7857        )]
7858        fn _vcvth_n_f16_s64(a: i64, n: i32) -> f16;
7859    }
7860    unsafe { _vcvth_n_f16_s64(a, N) }
7861}
7862#[doc = "Fixed-point convert to floating-point"]
7863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u16)"]
7864#[inline]
7865#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7866#[rustc_legacy_const_generics(1)]
7867#[target_feature(enable = "neon,fp16")]
7868#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7869pub fn vcvth_n_f16_u16<const N: i32>(a: u16) -> f16 {
7870    static_assert!(N >= 1 && N <= 16);
7871    vcvth_n_f16_u32::<N>(a as u32)
7872}
7873#[doc = "Fixed-point convert to floating-point"]
7874#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u32)"]
7875#[inline]
7876#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7877#[rustc_legacy_const_generics(1)]
7878#[target_feature(enable = "neon,fp16")]
7879#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7880pub fn vcvth_n_f16_u32<const N: i32>(a: u32) -> f16 {
7881    static_assert!(N >= 1 && N <= 16);
7882    unsafe extern "unadjusted" {
7883        #[cfg_attr(
7884            any(target_arch = "aarch64", target_arch = "arm64ec"),
7885            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f16.i32"
7886        )]
7887        fn _vcvth_n_f16_u32(a: u32, n: i32) -> f16;
7888    }
7889    unsafe { _vcvth_n_f16_u32(a, N) }
7890}
7891#[doc = "Fixed-point convert to floating-point"]
7892#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u64)"]
7893#[inline]
7894#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7895#[rustc_legacy_const_generics(1)]
7896#[target_feature(enable = "neon,fp16")]
7897#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7898pub fn vcvth_n_f16_u64<const N: i32>(a: u64) -> f16 {
7899    static_assert!(N >= 1 && N <= 16);
7900    unsafe extern "unadjusted" {
7901        #[cfg_attr(
7902            any(target_arch = "aarch64", target_arch = "arm64ec"),
7903            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f16.i64"
7904        )]
7905        fn _vcvth_n_f16_u64(a: u64, n: i32) -> f16;
7906    }
7907    unsafe { _vcvth_n_f16_u64(a, N) }
7908}
7909#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7910#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s16_f16)"]
7911#[inline]
7912#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
7913#[rustc_legacy_const_generics(1)]
7914#[target_feature(enable = "neon,fp16")]
7915#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7916pub fn vcvth_n_s16_f16<const N: i32>(a: f16) -> i16 {
7917    static_assert!(N >= 1 && N <= 16);
7918    vcvth_n_s32_f16::<N>(a) as i16
7919}
7920#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7921#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s32_f16)"]
7922#[inline]
7923#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
7924#[rustc_legacy_const_generics(1)]
7925#[target_feature(enable = "neon,fp16")]
7926#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7927pub fn vcvth_n_s32_f16<const N: i32>(a: f16) -> i32 {
7928    static_assert!(N >= 1 && N <= 16);
7929    unsafe extern "unadjusted" {
7930        #[cfg_attr(
7931            any(target_arch = "aarch64", target_arch = "arm64ec"),
7932            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f16"
7933        )]
7934        fn _vcvth_n_s32_f16(a: f16, n: i32) -> i32;
7935    }
7936    unsafe { _vcvth_n_s32_f16(a, N) }
7937}
7938#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s64_f16)"]
7940#[inline]
7941#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
7942#[rustc_legacy_const_generics(1)]
7943#[target_feature(enable = "neon,fp16")]
7944#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7945pub fn vcvth_n_s64_f16<const N: i32>(a: f16) -> i64 {
7946    static_assert!(N >= 1 && N <= 16);
7947    unsafe extern "unadjusted" {
7948        #[cfg_attr(
7949            any(target_arch = "aarch64", target_arch = "arm64ec"),
7950            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f16"
7951        )]
7952        fn _vcvth_n_s64_f16(a: f16, n: i32) -> i64;
7953    }
7954    unsafe { _vcvth_n_s64_f16(a, N) }
7955}
7956#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u16_f16)"]
7958#[inline]
7959#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
7960#[rustc_legacy_const_generics(1)]
7961#[target_feature(enable = "neon,fp16")]
7962#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7963pub fn vcvth_n_u16_f16<const N: i32>(a: f16) -> u16 {
7964    static_assert!(N >= 1 && N <= 16);
7965    vcvth_n_u32_f16::<N>(a) as u16
7966}
7967#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7968#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u32_f16)"]
7969#[inline]
7970#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
7971#[rustc_legacy_const_generics(1)]
7972#[target_feature(enable = "neon,fp16")]
7973#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7974pub fn vcvth_n_u32_f16<const N: i32>(a: f16) -> u32 {
7975    static_assert!(N >= 1 && N <= 16);
7976    unsafe extern "unadjusted" {
7977        #[cfg_attr(
7978            any(target_arch = "aarch64", target_arch = "arm64ec"),
7979            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f16"
7980        )]
7981        fn _vcvth_n_u32_f16(a: f16, n: i32) -> u32;
7982    }
7983    unsafe { _vcvth_n_u32_f16(a, N) }
7984}
7985#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7986#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u64_f16)"]
7987#[inline]
7988#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
7989#[rustc_legacy_const_generics(1)]
7990#[target_feature(enable = "neon,fp16")]
7991#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7992pub fn vcvth_n_u64_f16<const N: i32>(a: f16) -> u64 {
7993    static_assert!(N >= 1 && N <= 16);
7994    unsafe extern "unadjusted" {
7995        #[cfg_attr(
7996            any(target_arch = "aarch64", target_arch = "arm64ec"),
7997            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f16"
7998        )]
7999        fn _vcvth_n_u64_f16(a: f16, n: i32) -> u64;
8000    }
8001    unsafe { _vcvth_n_u64_f16(a, N) }
8002}
8003#[doc = "Floating-point convert to signed fixed-point"]
8004#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s16_f16)"]
8005#[inline]
8006#[cfg_attr(test, assert_instr(fcvtzs))]
8007#[target_feature(enable = "neon,fp16")]
8008#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8009pub fn vcvth_s16_f16(a: f16) -> i16 {
8010    a as i16
8011}
8012#[doc = "Floating-point convert to signed fixed-point"]
8013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s32_f16)"]
8014#[inline]
8015#[cfg_attr(test, assert_instr(fcvtzs))]
8016#[target_feature(enable = "neon,fp16")]
8017#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8018pub fn vcvth_s32_f16(a: f16) -> i32 {
8019    a as i32
8020}
8021#[doc = "Floating-point convert to signed fixed-point"]
8022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s64_f16)"]
8023#[inline]
8024#[cfg_attr(test, assert_instr(fcvtzs))]
8025#[target_feature(enable = "neon,fp16")]
8026#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8027pub fn vcvth_s64_f16(a: f16) -> i64 {
8028    a as i64
8029}
8030#[doc = "Floating-point convert to unsigned fixed-point"]
8031#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u16_f16)"]
8032#[inline]
8033#[cfg_attr(test, assert_instr(fcvtzu))]
8034#[target_feature(enable = "neon,fp16")]
8035#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8036pub fn vcvth_u16_f16(a: f16) -> u16 {
8037    a as u16
8038}
8039#[doc = "Floating-point convert to unsigned fixed-point"]
8040#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u32_f16)"]
8041#[inline]
8042#[cfg_attr(test, assert_instr(fcvtzu))]
8043#[target_feature(enable = "neon,fp16")]
8044#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8045pub fn vcvth_u32_f16(a: f16) -> u32 {
8046    a as u32
8047}
8048#[doc = "Floating-point convert to unsigned fixed-point"]
8049#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u64_f16)"]
8050#[inline]
8051#[cfg_attr(test, assert_instr(fcvtzu))]
8052#[target_feature(enable = "neon,fp16")]
8053#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8054pub fn vcvth_u64_f16(a: f16) -> u64 {
8055    a as u64
8056}
8057#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8058#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s16_f16)"]
8059#[inline]
8060#[cfg_attr(test, assert_instr(fcvtms))]
8061#[target_feature(enable = "neon,fp16")]
8062#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8063pub fn vcvtm_s16_f16(a: float16x4_t) -> int16x4_t {
8064    unsafe extern "unadjusted" {
8065        #[cfg_attr(
8066            any(target_arch = "aarch64", target_arch = "arm64ec"),
8067            link_name = "llvm.aarch64.neon.fcvtms.v4i16.v4f16"
8068        )]
8069        fn _vcvtm_s16_f16(a: float16x4_t) -> int16x4_t;
8070    }
8071    unsafe { _vcvtm_s16_f16(a) }
8072}
8073#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8074#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s16_f16)"]
8075#[inline]
8076#[cfg_attr(test, assert_instr(fcvtms))]
8077#[target_feature(enable = "neon,fp16")]
8078#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8079pub fn vcvtmq_s16_f16(a: float16x8_t) -> int16x8_t {
8080    unsafe extern "unadjusted" {
8081        #[cfg_attr(
8082            any(target_arch = "aarch64", target_arch = "arm64ec"),
8083            link_name = "llvm.aarch64.neon.fcvtms.v8i16.v8f16"
8084        )]
8085        fn _vcvtmq_s16_f16(a: float16x8_t) -> int16x8_t;
8086    }
8087    unsafe { _vcvtmq_s16_f16(a) }
8088}
8089#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8090#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s32_f32)"]
8091#[inline]
8092#[target_feature(enable = "neon")]
8093#[cfg_attr(test, assert_instr(fcvtms))]
8094#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8095pub fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t {
8096    unsafe extern "unadjusted" {
8097        #[cfg_attr(
8098            any(target_arch = "aarch64", target_arch = "arm64ec"),
8099            link_name = "llvm.aarch64.neon.fcvtms.v2i32.v2f32"
8100        )]
8101        fn _vcvtm_s32_f32(a: float32x2_t) -> int32x2_t;
8102    }
8103    unsafe { _vcvtm_s32_f32(a) }
8104}
8105#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s32_f32)"]
8107#[inline]
8108#[target_feature(enable = "neon")]
8109#[cfg_attr(test, assert_instr(fcvtms))]
8110#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8111pub fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t {
8112    unsafe extern "unadjusted" {
8113        #[cfg_attr(
8114            any(target_arch = "aarch64", target_arch = "arm64ec"),
8115            link_name = "llvm.aarch64.neon.fcvtms.v4i32.v4f32"
8116        )]
8117        fn _vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t;
8118    }
8119    unsafe { _vcvtmq_s32_f32(a) }
8120}
8121#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s64_f64)"]
8123#[inline]
8124#[target_feature(enable = "neon")]
8125#[cfg_attr(test, assert_instr(fcvtms))]
8126#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8127pub fn vcvtm_s64_f64(a: float64x1_t) -> int64x1_t {
8128    unsafe extern "unadjusted" {
8129        #[cfg_attr(
8130            any(target_arch = "aarch64", target_arch = "arm64ec"),
8131            link_name = "llvm.aarch64.neon.fcvtms.v1i64.v1f64"
8132        )]
8133        fn _vcvtm_s64_f64(a: float64x1_t) -> int64x1_t;
8134    }
8135    unsafe { _vcvtm_s64_f64(a) }
8136}
8137#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s64_f64)"]
8139#[inline]
8140#[target_feature(enable = "neon")]
8141#[cfg_attr(test, assert_instr(fcvtms))]
8142#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8143pub fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t {
8144    unsafe extern "unadjusted" {
8145        #[cfg_attr(
8146            any(target_arch = "aarch64", target_arch = "arm64ec"),
8147            link_name = "llvm.aarch64.neon.fcvtms.v2i64.v2f64"
8148        )]
8149        fn _vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t;
8150    }
8151    unsafe { _vcvtmq_s64_f64(a) }
8152}
8153#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8154#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u16_f16)"]
8155#[inline]
8156#[cfg_attr(test, assert_instr(fcvtmu))]
8157#[target_feature(enable = "neon,fp16")]
8158#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8159pub fn vcvtm_u16_f16(a: float16x4_t) -> uint16x4_t {
8160    unsafe extern "unadjusted" {
8161        #[cfg_attr(
8162            any(target_arch = "aarch64", target_arch = "arm64ec"),
8163            link_name = "llvm.aarch64.neon.fcvtmu.v4i16.v4f16"
8164        )]
8165        fn _vcvtm_u16_f16(a: float16x4_t) -> uint16x4_t;
8166    }
8167    unsafe { _vcvtm_u16_f16(a) }
8168}
8169#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u16_f16)"]
8171#[inline]
8172#[cfg_attr(test, assert_instr(fcvtmu))]
8173#[target_feature(enable = "neon,fp16")]
8174#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8175pub fn vcvtmq_u16_f16(a: float16x8_t) -> uint16x8_t {
8176    unsafe extern "unadjusted" {
8177        #[cfg_attr(
8178            any(target_arch = "aarch64", target_arch = "arm64ec"),
8179            link_name = "llvm.aarch64.neon.fcvtmu.v8i16.v8f16"
8180        )]
8181        fn _vcvtmq_u16_f16(a: float16x8_t) -> uint16x8_t;
8182    }
8183    unsafe { _vcvtmq_u16_f16(a) }
8184}
8185#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8186#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u32_f32)"]
8187#[inline]
8188#[target_feature(enable = "neon")]
8189#[cfg_attr(test, assert_instr(fcvtmu))]
8190#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8191pub fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t {
8192    unsafe extern "unadjusted" {
8193        #[cfg_attr(
8194            any(target_arch = "aarch64", target_arch = "arm64ec"),
8195            link_name = "llvm.aarch64.neon.fcvtmu.v2i32.v2f32"
8196        )]
8197        fn _vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t;
8198    }
8199    unsafe { _vcvtm_u32_f32(a) }
8200}
8201#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8202#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u32_f32)"]
8203#[inline]
8204#[target_feature(enable = "neon")]
8205#[cfg_attr(test, assert_instr(fcvtmu))]
8206#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8207pub fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t {
8208    unsafe extern "unadjusted" {
8209        #[cfg_attr(
8210            any(target_arch = "aarch64", target_arch = "arm64ec"),
8211            link_name = "llvm.aarch64.neon.fcvtmu.v4i32.v4f32"
8212        )]
8213        fn _vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t;
8214    }
8215    unsafe { _vcvtmq_u32_f32(a) }
8216}
8217#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8218#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u64_f64)"]
8219#[inline]
8220#[target_feature(enable = "neon")]
8221#[cfg_attr(test, assert_instr(fcvtmu))]
8222#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8223pub fn vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t {
8224    unsafe extern "unadjusted" {
8225        #[cfg_attr(
8226            any(target_arch = "aarch64", target_arch = "arm64ec"),
8227            link_name = "llvm.aarch64.neon.fcvtmu.v1i64.v1f64"
8228        )]
8229        fn _vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t;
8230    }
8231    unsafe { _vcvtm_u64_f64(a) }
8232}
8233#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u64_f64)"]
8235#[inline]
8236#[target_feature(enable = "neon")]
8237#[cfg_attr(test, assert_instr(fcvtmu))]
8238#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8239pub fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t {
8240    unsafe extern "unadjusted" {
8241        #[cfg_attr(
8242            any(target_arch = "aarch64", target_arch = "arm64ec"),
8243            link_name = "llvm.aarch64.neon.fcvtmu.v2i64.v2f64"
8244        )]
8245        fn _vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t;
8246    }
8247    unsafe { _vcvtmq_u64_f64(a) }
8248}
8249#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8250#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s16_f16)"]
8251#[inline]
8252#[cfg_attr(test, assert_instr(fcvtms))]
8253#[target_feature(enable = "neon,fp16")]
8254#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8255pub fn vcvtmh_s16_f16(a: f16) -> i16 {
8256    vcvtmh_s32_f16(a) as i16
8257}
8258#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s32_f16)"]
8260#[inline]
8261#[cfg_attr(test, assert_instr(fcvtms))]
8262#[target_feature(enable = "neon,fp16")]
8263#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8264pub fn vcvtmh_s32_f16(a: f16) -> i32 {
8265    unsafe extern "unadjusted" {
8266        #[cfg_attr(
8267            any(target_arch = "aarch64", target_arch = "arm64ec"),
8268            link_name = "llvm.aarch64.neon.fcvtms.i32.f16"
8269        )]
8270        fn _vcvtmh_s32_f16(a: f16) -> i32;
8271    }
8272    unsafe { _vcvtmh_s32_f16(a) }
8273}
8274#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8275#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s64_f16)"]
8276#[inline]
8277#[cfg_attr(test, assert_instr(fcvtms))]
8278#[target_feature(enable = "neon,fp16")]
8279#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8280pub fn vcvtmh_s64_f16(a: f16) -> i64 {
8281    unsafe extern "unadjusted" {
8282        #[cfg_attr(
8283            any(target_arch = "aarch64", target_arch = "arm64ec"),
8284            link_name = "llvm.aarch64.neon.fcvtms.i64.f16"
8285        )]
8286        fn _vcvtmh_s64_f16(a: f16) -> i64;
8287    }
8288    unsafe { _vcvtmh_s64_f16(a) }
8289}
8290#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8291#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u16_f16)"]
8292#[inline]
8293#[cfg_attr(test, assert_instr(fcvtmu))]
8294#[target_feature(enable = "neon,fp16")]
8295#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8296pub fn vcvtmh_u16_f16(a: f16) -> u16 {
8297    vcvtmh_u32_f16(a) as u16
8298}
8299#[doc = "Floating-point convert to unsigned integer, rounding towards minus infinity"]
8300#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u32_f16)"]
8301#[inline]
8302#[cfg_attr(test, assert_instr(fcvtmu))]
8303#[target_feature(enable = "neon,fp16")]
8304#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8305pub fn vcvtmh_u32_f16(a: f16) -> u32 {
8306    unsafe extern "unadjusted" {
8307        #[cfg_attr(
8308            any(target_arch = "aarch64", target_arch = "arm64ec"),
8309            link_name = "llvm.aarch64.neon.fcvtmu.i32.f16"
8310        )]
8311        fn _vcvtmh_u32_f16(a: f16) -> u32;
8312    }
8313    unsafe { _vcvtmh_u32_f16(a) }
8314}
8315#[doc = "Floating-point convert to unsigned integer, rounding towards minus infinity"]
8316#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u64_f16)"]
8317#[inline]
8318#[cfg_attr(test, assert_instr(fcvtmu))]
8319#[target_feature(enable = "neon,fp16")]
8320#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8321pub fn vcvtmh_u64_f16(a: f16) -> u64 {
8322    unsafe extern "unadjusted" {
8323        #[cfg_attr(
8324            any(target_arch = "aarch64", target_arch = "arm64ec"),
8325            link_name = "llvm.aarch64.neon.fcvtmu.i64.f16"
8326        )]
8327        fn _vcvtmh_u64_f16(a: f16) -> u64;
8328    }
8329    unsafe { _vcvtmh_u64_f16(a) }
8330}
8331#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8332#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_s32_f32)"]
8333#[inline]
8334#[target_feature(enable = "neon")]
8335#[cfg_attr(test, assert_instr(fcvtms))]
8336#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8337pub fn vcvtms_s32_f32(a: f32) -> i32 {
8338    unsafe extern "unadjusted" {
8339        #[cfg_attr(
8340            any(target_arch = "aarch64", target_arch = "arm64ec"),
8341            link_name = "llvm.aarch64.neon.fcvtms.i32.f32"
8342        )]
8343        fn _vcvtms_s32_f32(a: f32) -> i32;
8344    }
8345    unsafe { _vcvtms_s32_f32(a) }
8346}
8347#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8348#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_s64_f64)"]
8349#[inline]
8350#[target_feature(enable = "neon")]
8351#[cfg_attr(test, assert_instr(fcvtms))]
8352#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8353pub fn vcvtmd_s64_f64(a: f64) -> i64 {
8354    unsafe extern "unadjusted" {
8355        #[cfg_attr(
8356            any(target_arch = "aarch64", target_arch = "arm64ec"),
8357            link_name = "llvm.aarch64.neon.fcvtms.i64.f64"
8358        )]
8359        fn _vcvtmd_s64_f64(a: f64) -> i64;
8360    }
8361    unsafe { _vcvtmd_s64_f64(a) }
8362}
8363#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8364#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_u32_f32)"]
8365#[inline]
8366#[target_feature(enable = "neon")]
8367#[cfg_attr(test, assert_instr(fcvtmu))]
8368#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8369pub fn vcvtms_u32_f32(a: f32) -> u32 {
8370    unsafe extern "unadjusted" {
8371        #[cfg_attr(
8372            any(target_arch = "aarch64", target_arch = "arm64ec"),
8373            link_name = "llvm.aarch64.neon.fcvtmu.i32.f32"
8374        )]
8375        fn _vcvtms_u32_f32(a: f32) -> u32;
8376    }
8377    unsafe { _vcvtms_u32_f32(a) }
8378}
8379#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8380#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_u64_f64)"]
8381#[inline]
8382#[target_feature(enable = "neon")]
8383#[cfg_attr(test, assert_instr(fcvtmu))]
8384#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8385pub fn vcvtmd_u64_f64(a: f64) -> u64 {
8386    unsafe extern "unadjusted" {
8387        #[cfg_attr(
8388            any(target_arch = "aarch64", target_arch = "arm64ec"),
8389            link_name = "llvm.aarch64.neon.fcvtmu.i64.f64"
8390        )]
8391        fn _vcvtmd_u64_f64(a: f64) -> u64;
8392    }
8393    unsafe { _vcvtmd_u64_f64(a) }
8394}
8395#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8396#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s16_f16)"]
8397#[inline]
8398#[cfg_attr(test, assert_instr(fcvtns))]
8399#[target_feature(enable = "neon,fp16")]
8400#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8401pub fn vcvtn_s16_f16(a: float16x4_t) -> int16x4_t {
8402    unsafe extern "unadjusted" {
8403        #[cfg_attr(
8404            any(target_arch = "aarch64", target_arch = "arm64ec"),
8405            link_name = "llvm.aarch64.neon.fcvtns.v4i16.v4f16"
8406        )]
8407        fn _vcvtn_s16_f16(a: float16x4_t) -> int16x4_t;
8408    }
8409    unsafe { _vcvtn_s16_f16(a) }
8410}
8411#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8412#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s16_f16)"]
8413#[inline]
8414#[cfg_attr(test, assert_instr(fcvtns))]
8415#[target_feature(enable = "neon,fp16")]
8416#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8417pub fn vcvtnq_s16_f16(a: float16x8_t) -> int16x8_t {
8418    unsafe extern "unadjusted" {
8419        #[cfg_attr(
8420            any(target_arch = "aarch64", target_arch = "arm64ec"),
8421            link_name = "llvm.aarch64.neon.fcvtns.v8i16.v8f16"
8422        )]
8423        fn _vcvtnq_s16_f16(a: float16x8_t) -> int16x8_t;
8424    }
8425    unsafe { _vcvtnq_s16_f16(a) }
8426}
8427#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8428#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s32_f32)"]
8429#[inline]
8430#[target_feature(enable = "neon")]
8431#[cfg_attr(test, assert_instr(fcvtns))]
8432#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8433pub fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t {
8434    unsafe extern "unadjusted" {
8435        #[cfg_attr(
8436            any(target_arch = "aarch64", target_arch = "arm64ec"),
8437            link_name = "llvm.aarch64.neon.fcvtns.v2i32.v2f32"
8438        )]
8439        fn _vcvtn_s32_f32(a: float32x2_t) -> int32x2_t;
8440    }
8441    unsafe { _vcvtn_s32_f32(a) }
8442}
8443#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8444#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s32_f32)"]
8445#[inline]
8446#[target_feature(enable = "neon")]
8447#[cfg_attr(test, assert_instr(fcvtns))]
8448#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8449pub fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t {
8450    unsafe extern "unadjusted" {
8451        #[cfg_attr(
8452            any(target_arch = "aarch64", target_arch = "arm64ec"),
8453            link_name = "llvm.aarch64.neon.fcvtns.v4i32.v4f32"
8454        )]
8455        fn _vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t;
8456    }
8457    unsafe { _vcvtnq_s32_f32(a) }
8458}
8459#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8460#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s64_f64)"]
8461#[inline]
8462#[target_feature(enable = "neon")]
8463#[cfg_attr(test, assert_instr(fcvtns))]
8464#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8465pub fn vcvtn_s64_f64(a: float64x1_t) -> int64x1_t {
8466    unsafe extern "unadjusted" {
8467        #[cfg_attr(
8468            any(target_arch = "aarch64", target_arch = "arm64ec"),
8469            link_name = "llvm.aarch64.neon.fcvtns.v1i64.v1f64"
8470        )]
8471        fn _vcvtn_s64_f64(a: float64x1_t) -> int64x1_t;
8472    }
8473    unsafe { _vcvtn_s64_f64(a) }
8474}
8475#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8476#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s64_f64)"]
8477#[inline]
8478#[target_feature(enable = "neon")]
8479#[cfg_attr(test, assert_instr(fcvtns))]
8480#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8481pub fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t {
8482    unsafe extern "unadjusted" {
8483        #[cfg_attr(
8484            any(target_arch = "aarch64", target_arch = "arm64ec"),
8485            link_name = "llvm.aarch64.neon.fcvtns.v2i64.v2f64"
8486        )]
8487        fn _vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t;
8488    }
8489    unsafe { _vcvtnq_s64_f64(a) }
8490}
8491#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8492#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u16_f16)"]
8493#[inline]
8494#[cfg_attr(test, assert_instr(fcvtnu))]
8495#[target_feature(enable = "neon,fp16")]
8496#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8497pub fn vcvtn_u16_f16(a: float16x4_t) -> uint16x4_t {
8498    unsafe extern "unadjusted" {
8499        #[cfg_attr(
8500            any(target_arch = "aarch64", target_arch = "arm64ec"),
8501            link_name = "llvm.aarch64.neon.fcvtnu.v4i16.v4f16"
8502        )]
8503        fn _vcvtn_u16_f16(a: float16x4_t) -> uint16x4_t;
8504    }
8505    unsafe { _vcvtn_u16_f16(a) }
8506}
8507#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8508#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u16_f16)"]
8509#[inline]
8510#[cfg_attr(test, assert_instr(fcvtnu))]
8511#[target_feature(enable = "neon,fp16")]
8512#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8513pub fn vcvtnq_u16_f16(a: float16x8_t) -> uint16x8_t {
8514    unsafe extern "unadjusted" {
8515        #[cfg_attr(
8516            any(target_arch = "aarch64", target_arch = "arm64ec"),
8517            link_name = "llvm.aarch64.neon.fcvtnu.v8i16.v8f16"
8518        )]
8519        fn _vcvtnq_u16_f16(a: float16x8_t) -> uint16x8_t;
8520    }
8521    unsafe { _vcvtnq_u16_f16(a) }
8522}
8523#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8524#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u32_f32)"]
8525#[inline]
8526#[target_feature(enable = "neon")]
8527#[cfg_attr(test, assert_instr(fcvtnu))]
8528#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8529pub fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t {
8530    unsafe extern "unadjusted" {
8531        #[cfg_attr(
8532            any(target_arch = "aarch64", target_arch = "arm64ec"),
8533            link_name = "llvm.aarch64.neon.fcvtnu.v2i32.v2f32"
8534        )]
8535        fn _vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t;
8536    }
8537    unsafe { _vcvtn_u32_f32(a) }
8538}
8539#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8540#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u32_f32)"]
8541#[inline]
8542#[target_feature(enable = "neon")]
8543#[cfg_attr(test, assert_instr(fcvtnu))]
8544#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8545pub fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t {
8546    unsafe extern "unadjusted" {
8547        #[cfg_attr(
8548            any(target_arch = "aarch64", target_arch = "arm64ec"),
8549            link_name = "llvm.aarch64.neon.fcvtnu.v4i32.v4f32"
8550        )]
8551        fn _vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t;
8552    }
8553    unsafe { _vcvtnq_u32_f32(a) }
8554}
8555#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8556#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u64_f64)"]
8557#[inline]
8558#[target_feature(enable = "neon")]
8559#[cfg_attr(test, assert_instr(fcvtnu))]
8560#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8561pub fn vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t {
8562    unsafe extern "unadjusted" {
8563        #[cfg_attr(
8564            any(target_arch = "aarch64", target_arch = "arm64ec"),
8565            link_name = "llvm.aarch64.neon.fcvtnu.v1i64.v1f64"
8566        )]
8567        fn _vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t;
8568    }
8569    unsafe { _vcvtn_u64_f64(a) }
8570}
8571#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8572#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u64_f64)"]
8573#[inline]
8574#[target_feature(enable = "neon")]
8575#[cfg_attr(test, assert_instr(fcvtnu))]
8576#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8577pub fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t {
8578    unsafe extern "unadjusted" {
8579        #[cfg_attr(
8580            any(target_arch = "aarch64", target_arch = "arm64ec"),
8581            link_name = "llvm.aarch64.neon.fcvtnu.v2i64.v2f64"
8582        )]
8583        fn _vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t;
8584    }
8585    unsafe { _vcvtnq_u64_f64(a) }
8586}
8587#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8588#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s16_f16)"]
8589#[inline]
8590#[cfg_attr(test, assert_instr(fcvtns))]
8591#[target_feature(enable = "neon,fp16")]
8592#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8593pub fn vcvtnh_s16_f16(a: f16) -> i16 {
8594    vcvtnh_s32_f16(a) as i16
8595}
8596#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8597#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s32_f16)"]
8598#[inline]
8599#[cfg_attr(test, assert_instr(fcvtns))]
8600#[target_feature(enable = "neon,fp16")]
8601#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8602pub fn vcvtnh_s32_f16(a: f16) -> i32 {
8603    unsafe extern "unadjusted" {
8604        #[cfg_attr(
8605            any(target_arch = "aarch64", target_arch = "arm64ec"),
8606            link_name = "llvm.aarch64.neon.fcvtns.i32.f16"
8607        )]
8608        fn _vcvtnh_s32_f16(a: f16) -> i32;
8609    }
8610    unsafe { _vcvtnh_s32_f16(a) }
8611}
8612#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8613#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s64_f16)"]
8614#[inline]
8615#[cfg_attr(test, assert_instr(fcvtns))]
8616#[target_feature(enable = "neon,fp16")]
8617#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8618pub fn vcvtnh_s64_f16(a: f16) -> i64 {
8619    unsafe extern "unadjusted" {
8620        #[cfg_attr(
8621            any(target_arch = "aarch64", target_arch = "arm64ec"),
8622            link_name = "llvm.aarch64.neon.fcvtns.i64.f16"
8623        )]
8624        fn _vcvtnh_s64_f16(a: f16) -> i64;
8625    }
8626    unsafe { _vcvtnh_s64_f16(a) }
8627}
8628#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8629#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u16_f16)"]
8630#[inline]
8631#[cfg_attr(test, assert_instr(fcvtnu))]
8632#[target_feature(enable = "neon,fp16")]
8633#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8634pub fn vcvtnh_u16_f16(a: f16) -> u16 {
8635    vcvtnh_u32_f16(a) as u16
8636}
8637#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8638#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u32_f16)"]
8639#[inline]
8640#[cfg_attr(test, assert_instr(fcvtnu))]
8641#[target_feature(enable = "neon,fp16")]
8642#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8643pub fn vcvtnh_u32_f16(a: f16) -> u32 {
8644    unsafe extern "unadjusted" {
8645        #[cfg_attr(
8646            any(target_arch = "aarch64", target_arch = "arm64ec"),
8647            link_name = "llvm.aarch64.neon.fcvtnu.i32.f16"
8648        )]
8649        fn _vcvtnh_u32_f16(a: f16) -> u32;
8650    }
8651    unsafe { _vcvtnh_u32_f16(a) }
8652}
8653#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8654#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u64_f16)"]
8655#[inline]
8656#[cfg_attr(test, assert_instr(fcvtnu))]
8657#[target_feature(enable = "neon,fp16")]
8658#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8659pub fn vcvtnh_u64_f16(a: f16) -> u64 {
8660    unsafe extern "unadjusted" {
8661        #[cfg_attr(
8662            any(target_arch = "aarch64", target_arch = "arm64ec"),
8663            link_name = "llvm.aarch64.neon.fcvtnu.i64.f16"
8664        )]
8665        fn _vcvtnh_u64_f16(a: f16) -> u64;
8666    }
8667    unsafe { _vcvtnh_u64_f16(a) }
8668}
8669#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8670#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_s32_f32)"]
8671#[inline]
8672#[target_feature(enable = "neon")]
8673#[cfg_attr(test, assert_instr(fcvtns))]
8674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8675pub fn vcvtns_s32_f32(a: f32) -> i32 {
8676    unsafe extern "unadjusted" {
8677        #[cfg_attr(
8678            any(target_arch = "aarch64", target_arch = "arm64ec"),
8679            link_name = "llvm.aarch64.neon.fcvtns.i32.f32"
8680        )]
8681        fn _vcvtns_s32_f32(a: f32) -> i32;
8682    }
8683    unsafe { _vcvtns_s32_f32(a) }
8684}
8685#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_s64_f64)"]
8687#[inline]
8688#[target_feature(enable = "neon")]
8689#[cfg_attr(test, assert_instr(fcvtns))]
8690#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8691pub fn vcvtnd_s64_f64(a: f64) -> i64 {
8692    unsafe extern "unadjusted" {
8693        #[cfg_attr(
8694            any(target_arch = "aarch64", target_arch = "arm64ec"),
8695            link_name = "llvm.aarch64.neon.fcvtns.i64.f64"
8696        )]
8697        fn _vcvtnd_s64_f64(a: f64) -> i64;
8698    }
8699    unsafe { _vcvtnd_s64_f64(a) }
8700}
8701#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8702#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_u32_f32)"]
8703#[inline]
8704#[target_feature(enable = "neon")]
8705#[cfg_attr(test, assert_instr(fcvtnu))]
8706#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8707pub fn vcvtns_u32_f32(a: f32) -> u32 {
8708    unsafe extern "unadjusted" {
8709        #[cfg_attr(
8710            any(target_arch = "aarch64", target_arch = "arm64ec"),
8711            link_name = "llvm.aarch64.neon.fcvtnu.i32.f32"
8712        )]
8713        fn _vcvtns_u32_f32(a: f32) -> u32;
8714    }
8715    unsafe { _vcvtns_u32_f32(a) }
8716}
8717#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_u64_f64)"]
8719#[inline]
8720#[target_feature(enable = "neon")]
8721#[cfg_attr(test, assert_instr(fcvtnu))]
8722#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8723pub fn vcvtnd_u64_f64(a: f64) -> u64 {
8724    unsafe extern "unadjusted" {
8725        #[cfg_attr(
8726            any(target_arch = "aarch64", target_arch = "arm64ec"),
8727            link_name = "llvm.aarch64.neon.fcvtnu.i64.f64"
8728        )]
8729        fn _vcvtnd_u64_f64(a: f64) -> u64;
8730    }
8731    unsafe { _vcvtnd_u64_f64(a) }
8732}
8733#[doc = "Floating-point convert to signed integer, rounding to plus infinity"]
8734#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s16_f16)"]
8735#[inline]
8736#[cfg_attr(test, assert_instr(fcvtps))]
8737#[target_feature(enable = "neon,fp16")]
8738#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8739pub fn vcvtp_s16_f16(a: float16x4_t) -> int16x4_t {
8740    unsafe extern "unadjusted" {
8741        #[cfg_attr(
8742            any(target_arch = "aarch64", target_arch = "arm64ec"),
8743            link_name = "llvm.aarch64.neon.fcvtps.v4i16.v4f16"
8744        )]
8745        fn _vcvtp_s16_f16(a: float16x4_t) -> int16x4_t;
8746    }
8747    unsafe { _vcvtp_s16_f16(a) }
8748}
8749#[doc = "Floating-point convert to signed integer, rounding to plus infinity"]
8750#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s16_f16)"]
8751#[inline]
8752#[cfg_attr(test, assert_instr(fcvtps))]
8753#[target_feature(enable = "neon,fp16")]
8754#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8755pub fn vcvtpq_s16_f16(a: float16x8_t) -> int16x8_t {
8756    unsafe extern "unadjusted" {
8757        #[cfg_attr(
8758            any(target_arch = "aarch64", target_arch = "arm64ec"),
8759            link_name = "llvm.aarch64.neon.fcvtps.v8i16.v8f16"
8760        )]
8761        fn _vcvtpq_s16_f16(a: float16x8_t) -> int16x8_t;
8762    }
8763    unsafe { _vcvtpq_s16_f16(a) }
8764}
8765#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8766#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s32_f32)"]
8767#[inline]
8768#[target_feature(enable = "neon")]
8769#[cfg_attr(test, assert_instr(fcvtps))]
8770#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8771pub fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t {
8772    unsafe extern "unadjusted" {
8773        #[cfg_attr(
8774            any(target_arch = "aarch64", target_arch = "arm64ec"),
8775            link_name = "llvm.aarch64.neon.fcvtps.v2i32.v2f32"
8776        )]
8777        fn _vcvtp_s32_f32(a: float32x2_t) -> int32x2_t;
8778    }
8779    unsafe { _vcvtp_s32_f32(a) }
8780}
8781#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8782#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s32_f32)"]
8783#[inline]
8784#[target_feature(enable = "neon")]
8785#[cfg_attr(test, assert_instr(fcvtps))]
8786#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8787pub fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t {
8788    unsafe extern "unadjusted" {
8789        #[cfg_attr(
8790            any(target_arch = "aarch64", target_arch = "arm64ec"),
8791            link_name = "llvm.aarch64.neon.fcvtps.v4i32.v4f32"
8792        )]
8793        fn _vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t;
8794    }
8795    unsafe { _vcvtpq_s32_f32(a) }
8796}
8797#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8798#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s64_f64)"]
8799#[inline]
8800#[target_feature(enable = "neon")]
8801#[cfg_attr(test, assert_instr(fcvtps))]
8802#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8803pub fn vcvtp_s64_f64(a: float64x1_t) -> int64x1_t {
8804    unsafe extern "unadjusted" {
8805        #[cfg_attr(
8806            any(target_arch = "aarch64", target_arch = "arm64ec"),
8807            link_name = "llvm.aarch64.neon.fcvtps.v1i64.v1f64"
8808        )]
8809        fn _vcvtp_s64_f64(a: float64x1_t) -> int64x1_t;
8810    }
8811    unsafe { _vcvtp_s64_f64(a) }
8812}
8813#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8814#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s64_f64)"]
8815#[inline]
8816#[target_feature(enable = "neon")]
8817#[cfg_attr(test, assert_instr(fcvtps))]
8818#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8819pub fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t {
8820    unsafe extern "unadjusted" {
8821        #[cfg_attr(
8822            any(target_arch = "aarch64", target_arch = "arm64ec"),
8823            link_name = "llvm.aarch64.neon.fcvtps.v2i64.v2f64"
8824        )]
8825        fn _vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t;
8826    }
8827    unsafe { _vcvtpq_s64_f64(a) }
8828}
8829#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
8830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u16_f16)"]
8831#[inline]
8832#[cfg_attr(test, assert_instr(fcvtpu))]
8833#[target_feature(enable = "neon,fp16")]
8834#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8835pub fn vcvtp_u16_f16(a: float16x4_t) -> uint16x4_t {
8836    unsafe extern "unadjusted" {
8837        #[cfg_attr(
8838            any(target_arch = "aarch64", target_arch = "arm64ec"),
8839            link_name = "llvm.aarch64.neon.fcvtpu.v4i16.v4f16"
8840        )]
8841        fn _vcvtp_u16_f16(a: float16x4_t) -> uint16x4_t;
8842    }
8843    unsafe { _vcvtp_u16_f16(a) }
8844}
8845#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
8846#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u16_f16)"]
8847#[inline]
8848#[cfg_attr(test, assert_instr(fcvtpu))]
8849#[target_feature(enable = "neon,fp16")]
8850#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8851pub fn vcvtpq_u16_f16(a: float16x8_t) -> uint16x8_t {
8852    unsafe extern "unadjusted" {
8853        #[cfg_attr(
8854            any(target_arch = "aarch64", target_arch = "arm64ec"),
8855            link_name = "llvm.aarch64.neon.fcvtpu.v8i16.v8f16"
8856        )]
8857        fn _vcvtpq_u16_f16(a: float16x8_t) -> uint16x8_t;
8858    }
8859    unsafe { _vcvtpq_u16_f16(a) }
8860}
8861#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
8862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u32_f32)"]
8863#[inline]
8864#[target_feature(enable = "neon")]
8865#[cfg_attr(test, assert_instr(fcvtpu))]
8866#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8867pub fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t {
8868    unsafe extern "unadjusted" {
8869        #[cfg_attr(
8870            any(target_arch = "aarch64", target_arch = "arm64ec"),
8871            link_name = "llvm.aarch64.neon.fcvtpu.v2i32.v2f32"
8872        )]
8873        fn _vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t;
8874    }
8875    unsafe { _vcvtp_u32_f32(a) }
8876}
8877#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
8878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u32_f32)"]
8879#[inline]
8880#[target_feature(enable = "neon")]
8881#[cfg_attr(test, assert_instr(fcvtpu))]
8882#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8883pub fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t {
8884    unsafe extern "unadjusted" {
8885        #[cfg_attr(
8886            any(target_arch = "aarch64", target_arch = "arm64ec"),
8887            link_name = "llvm.aarch64.neon.fcvtpu.v4i32.v4f32"
8888        )]
8889        fn _vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t;
8890    }
8891    unsafe { _vcvtpq_u32_f32(a) }
8892}
8893#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
8894#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u64_f64)"]
8895#[inline]
8896#[target_feature(enable = "neon")]
8897#[cfg_attr(test, assert_instr(fcvtpu))]
8898#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8899pub fn vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t {
8900    unsafe extern "unadjusted" {
8901        #[cfg_attr(
8902            any(target_arch = "aarch64", target_arch = "arm64ec"),
8903            link_name = "llvm.aarch64.neon.fcvtpu.v1i64.v1f64"
8904        )]
8905        fn _vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t;
8906    }
8907    unsafe { _vcvtp_u64_f64(a) }
8908}
8909#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
8910#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u64_f64)"]
8911#[inline]
8912#[target_feature(enable = "neon")]
8913#[cfg_attr(test, assert_instr(fcvtpu))]
8914#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8915pub fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t {
8916    unsafe extern "unadjusted" {
8917        #[cfg_attr(
8918            any(target_arch = "aarch64", target_arch = "arm64ec"),
8919            link_name = "llvm.aarch64.neon.fcvtpu.v2i64.v2f64"
8920        )]
8921        fn _vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t;
8922    }
8923    unsafe { _vcvtpq_u64_f64(a) }
8924}
8925#[doc = "Floating-point convert to integer, rounding to plus infinity"]
8926#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s16_f16)"]
8927#[inline]
8928#[cfg_attr(test, assert_instr(fcvtps))]
8929#[target_feature(enable = "neon,fp16")]
8930#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8931pub fn vcvtph_s16_f16(a: f16) -> i16 {
8932    vcvtph_s32_f16(a) as i16
8933}
8934#[doc = "Floating-point convert to integer, rounding to plus infinity"]
8935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s32_f16)"]
8936#[inline]
8937#[cfg_attr(test, assert_instr(fcvtps))]
8938#[target_feature(enable = "neon,fp16")]
8939#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8940pub fn vcvtph_s32_f16(a: f16) -> i32 {
8941    unsafe extern "unadjusted" {
8942        #[cfg_attr(
8943            any(target_arch = "aarch64", target_arch = "arm64ec"),
8944            link_name = "llvm.aarch64.neon.fcvtps.i32.f16"
8945        )]
8946        fn _vcvtph_s32_f16(a: f16) -> i32;
8947    }
8948    unsafe { _vcvtph_s32_f16(a) }
8949}
8950#[doc = "Floating-point convert to integer, rounding to plus infinity"]
8951#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s64_f16)"]
8952#[inline]
8953#[cfg_attr(test, assert_instr(fcvtps))]
8954#[target_feature(enable = "neon,fp16")]
8955#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8956pub fn vcvtph_s64_f16(a: f16) -> i64 {
8957    unsafe extern "unadjusted" {
8958        #[cfg_attr(
8959            any(target_arch = "aarch64", target_arch = "arm64ec"),
8960            link_name = "llvm.aarch64.neon.fcvtps.i64.f16"
8961        )]
8962        fn _vcvtph_s64_f16(a: f16) -> i64;
8963    }
8964    unsafe { _vcvtph_s64_f16(a) }
8965}
8966#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
8967#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u16_f16)"]
8968#[inline]
8969#[cfg_attr(test, assert_instr(fcvtpu))]
8970#[target_feature(enable = "neon,fp16")]
8971#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8972pub fn vcvtph_u16_f16(a: f16) -> u16 {
8973    vcvtph_u32_f16(a) as u16
8974}
8975#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
8976#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u32_f16)"]
8977#[inline]
8978#[cfg_attr(test, assert_instr(fcvtpu))]
8979#[target_feature(enable = "neon,fp16")]
8980#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8981pub fn vcvtph_u32_f16(a: f16) -> u32 {
8982    unsafe extern "unadjusted" {
8983        #[cfg_attr(
8984            any(target_arch = "aarch64", target_arch = "arm64ec"),
8985            link_name = "llvm.aarch64.neon.fcvtpu.i32.f16"
8986        )]
8987        fn _vcvtph_u32_f16(a: f16) -> u32;
8988    }
8989    unsafe { _vcvtph_u32_f16(a) }
8990}
8991#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
8992#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u64_f16)"]
8993#[inline]
8994#[cfg_attr(test, assert_instr(fcvtpu))]
8995#[target_feature(enable = "neon,fp16")]
8996#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8997pub fn vcvtph_u64_f16(a: f16) -> u64 {
8998    unsafe extern "unadjusted" {
8999        #[cfg_attr(
9000            any(target_arch = "aarch64", target_arch = "arm64ec"),
9001            link_name = "llvm.aarch64.neon.fcvtpu.i64.f16"
9002        )]
9003        fn _vcvtph_u64_f16(a: f16) -> u64;
9004    }
9005    unsafe { _vcvtph_u64_f16(a) }
9006}
9007#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
9008#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_s32_f32)"]
9009#[inline]
9010#[target_feature(enable = "neon")]
9011#[cfg_attr(test, assert_instr(fcvtps))]
9012#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9013pub fn vcvtps_s32_f32(a: f32) -> i32 {
9014    unsafe extern "unadjusted" {
9015        #[cfg_attr(
9016            any(target_arch = "aarch64", target_arch = "arm64ec"),
9017            link_name = "llvm.aarch64.neon.fcvtps.i32.f32"
9018        )]
9019        fn _vcvtps_s32_f32(a: f32) -> i32;
9020    }
9021    unsafe { _vcvtps_s32_f32(a) }
9022}
9023#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
9024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_s64_f64)"]
9025#[inline]
9026#[target_feature(enable = "neon")]
9027#[cfg_attr(test, assert_instr(fcvtps))]
9028#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9029pub fn vcvtpd_s64_f64(a: f64) -> i64 {
9030    unsafe extern "unadjusted" {
9031        #[cfg_attr(
9032            any(target_arch = "aarch64", target_arch = "arm64ec"),
9033            link_name = "llvm.aarch64.neon.fcvtps.i64.f64"
9034        )]
9035        fn _vcvtpd_s64_f64(a: f64) -> i64;
9036    }
9037    unsafe { _vcvtpd_s64_f64(a) }
9038}
9039#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9040#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_u32_f32)"]
9041#[inline]
9042#[target_feature(enable = "neon")]
9043#[cfg_attr(test, assert_instr(fcvtpu))]
9044#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9045pub fn vcvtps_u32_f32(a: f32) -> u32 {
9046    unsafe extern "unadjusted" {
9047        #[cfg_attr(
9048            any(target_arch = "aarch64", target_arch = "arm64ec"),
9049            link_name = "llvm.aarch64.neon.fcvtpu.i32.f32"
9050        )]
9051        fn _vcvtps_u32_f32(a: f32) -> u32;
9052    }
9053    unsafe { _vcvtps_u32_f32(a) }
9054}
9055#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9056#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_u64_f64)"]
9057#[inline]
9058#[target_feature(enable = "neon")]
9059#[cfg_attr(test, assert_instr(fcvtpu))]
9060#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9061pub fn vcvtpd_u64_f64(a: f64) -> u64 {
9062    unsafe extern "unadjusted" {
9063        #[cfg_attr(
9064            any(target_arch = "aarch64", target_arch = "arm64ec"),
9065            link_name = "llvm.aarch64.neon.fcvtpu.i64.f64"
9066        )]
9067        fn _vcvtpd_u64_f64(a: f64) -> u64;
9068    }
9069    unsafe { _vcvtpd_u64_f64(a) }
9070}
9071#[doc = "Fixed-point convert to floating-point"]
9072#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_u32)"]
9073#[inline]
9074#[target_feature(enable = "neon")]
9075#[cfg_attr(test, assert_instr(ucvtf))]
9076#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9077pub fn vcvts_f32_u32(a: u32) -> f32 {
9078    a as f32
9079}
9080#[doc = "Fixed-point convert to floating-point"]
9081#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_u64)"]
9082#[inline]
9083#[target_feature(enable = "neon")]
9084#[cfg_attr(test, assert_instr(ucvtf))]
9085#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9086pub fn vcvtd_f64_u64(a: u64) -> f64 {
9087    a as f64
9088}
9089#[doc = "Fixed-point convert to floating-point"]
9090#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_s32)"]
9091#[inline]
9092#[target_feature(enable = "neon")]
9093#[cfg_attr(test, assert_instr(scvtf, N = 2))]
9094#[rustc_legacy_const_generics(1)]
9095#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9096pub fn vcvts_n_f32_s32<const N: i32>(a: i32) -> f32 {
9097    static_assert!(N >= 1 && N <= 64);
9098    unsafe extern "unadjusted" {
9099        #[cfg_attr(
9100            any(target_arch = "aarch64", target_arch = "arm64ec"),
9101            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f32.i32"
9102        )]
9103        fn _vcvts_n_f32_s32(a: i32, n: i32) -> f32;
9104    }
9105    unsafe { _vcvts_n_f32_s32(a, N) }
9106}
9107#[doc = "Fixed-point convert to floating-point"]
9108#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_s64)"]
9109#[inline]
9110#[target_feature(enable = "neon")]
9111#[cfg_attr(test, assert_instr(scvtf, N = 2))]
9112#[rustc_legacy_const_generics(1)]
9113#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9114pub fn vcvtd_n_f64_s64<const N: i32>(a: i64) -> f64 {
9115    static_assert!(N >= 1 && N <= 64);
9116    unsafe extern "unadjusted" {
9117        #[cfg_attr(
9118            any(target_arch = "aarch64", target_arch = "arm64ec"),
9119            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f64.i64"
9120        )]
9121        fn _vcvtd_n_f64_s64(a: i64, n: i32) -> f64;
9122    }
9123    unsafe { _vcvtd_n_f64_s64(a, N) }
9124}
9125#[doc = "Fixed-point convert to floating-point"]
9126#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_u32)"]
9127#[inline]
9128#[target_feature(enable = "neon")]
9129#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
9130#[rustc_legacy_const_generics(1)]
9131#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9132pub fn vcvts_n_f32_u32<const N: i32>(a: u32) -> f32 {
9133    static_assert!(N >= 1 && N <= 32);
9134    unsafe extern "unadjusted" {
9135        #[cfg_attr(
9136            any(target_arch = "aarch64", target_arch = "arm64ec"),
9137            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f32.i32"
9138        )]
9139        fn _vcvts_n_f32_u32(a: u32, n: i32) -> f32;
9140    }
9141    unsafe { _vcvts_n_f32_u32(a, N) }
9142}
9143#[doc = "Fixed-point convert to floating-point"]
9144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_u64)"]
9145#[inline]
9146#[target_feature(enable = "neon")]
9147#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
9148#[rustc_legacy_const_generics(1)]
9149#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9150pub fn vcvtd_n_f64_u64<const N: i32>(a: u64) -> f64 {
9151    static_assert!(N >= 1 && N <= 64);
9152    unsafe extern "unadjusted" {
9153        #[cfg_attr(
9154            any(target_arch = "aarch64", target_arch = "arm64ec"),
9155            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f64.i64"
9156        )]
9157        fn _vcvtd_n_f64_u64(a: u64, n: i32) -> f64;
9158    }
9159    unsafe { _vcvtd_n_f64_u64(a, N) }
9160}
9161#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9162#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_s32_f32)"]
9163#[inline]
9164#[target_feature(enable = "neon")]
9165#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
9166#[rustc_legacy_const_generics(1)]
9167#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9168pub fn vcvts_n_s32_f32<const N: i32>(a: f32) -> i32 {
9169    static_assert!(N >= 1 && N <= 32);
9170    unsafe extern "unadjusted" {
9171        #[cfg_attr(
9172            any(target_arch = "aarch64", target_arch = "arm64ec"),
9173            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f32"
9174        )]
9175        fn _vcvts_n_s32_f32(a: f32, n: i32) -> i32;
9176    }
9177    unsafe { _vcvts_n_s32_f32(a, N) }
9178}
9179#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_s64_f64)"]
9181#[inline]
9182#[target_feature(enable = "neon")]
9183#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
9184#[rustc_legacy_const_generics(1)]
9185#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9186pub fn vcvtd_n_s64_f64<const N: i32>(a: f64) -> i64 {
9187    static_assert!(N >= 1 && N <= 64);
9188    unsafe extern "unadjusted" {
9189        #[cfg_attr(
9190            any(target_arch = "aarch64", target_arch = "arm64ec"),
9191            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f64"
9192        )]
9193        fn _vcvtd_n_s64_f64(a: f64, n: i32) -> i64;
9194    }
9195    unsafe { _vcvtd_n_s64_f64(a, N) }
9196}
9197#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_u32_f32)"]
9199#[inline]
9200#[target_feature(enable = "neon")]
9201#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
9202#[rustc_legacy_const_generics(1)]
9203#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9204pub fn vcvts_n_u32_f32<const N: i32>(a: f32) -> u32 {
9205    static_assert!(N >= 1 && N <= 32);
9206    unsafe extern "unadjusted" {
9207        #[cfg_attr(
9208            any(target_arch = "aarch64", target_arch = "arm64ec"),
9209            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f32"
9210        )]
9211        fn _vcvts_n_u32_f32(a: f32, n: i32) -> u32;
9212    }
9213    unsafe { _vcvts_n_u32_f32(a, N) }
9214}
9215#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_u64_f64)"]
9217#[inline]
9218#[target_feature(enable = "neon")]
9219#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
9220#[rustc_legacy_const_generics(1)]
9221#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9222pub fn vcvtd_n_u64_f64<const N: i32>(a: f64) -> u64 {
9223    static_assert!(N >= 1 && N <= 64);
9224    unsafe extern "unadjusted" {
9225        #[cfg_attr(
9226            any(target_arch = "aarch64", target_arch = "arm64ec"),
9227            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f64"
9228        )]
9229        fn _vcvtd_n_u64_f64(a: f64, n: i32) -> u64;
9230    }
9231    unsafe { _vcvtd_n_u64_f64(a, N) }
9232}
9233#[doc = "Fixed-point convert to floating-point"]
9234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_s32_f32)"]
9235#[inline]
9236#[target_feature(enable = "neon")]
9237#[cfg_attr(test, assert_instr(fcvtzs))]
9238#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9239pub fn vcvts_s32_f32(a: f32) -> i32 {
9240    a as i32
9241}
9242#[doc = "Fixed-point convert to floating-point"]
9243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_s64_f64)"]
9244#[inline]
9245#[target_feature(enable = "neon")]
9246#[cfg_attr(test, assert_instr(fcvtzs))]
9247#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9248pub fn vcvtd_s64_f64(a: f64) -> i64 {
9249    a as i64
9250}
9251#[doc = "Fixed-point convert to floating-point"]
9252#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_u32_f32)"]
9253#[inline]
9254#[target_feature(enable = "neon")]
9255#[cfg_attr(test, assert_instr(fcvtzu))]
9256#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9257pub fn vcvts_u32_f32(a: f32) -> u32 {
9258    a as u32
9259}
9260#[doc = "Fixed-point convert to floating-point"]
9261#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_u64_f64)"]
9262#[inline]
9263#[target_feature(enable = "neon")]
9264#[cfg_attr(test, assert_instr(fcvtzu))]
9265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9266pub fn vcvtd_u64_f64(a: f64) -> u64 {
9267    a as u64
9268}
9269#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9270#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_f32_f64)"]
9271#[inline]
9272#[target_feature(enable = "neon")]
9273#[cfg_attr(test, assert_instr(fcvtxn))]
9274#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9275pub fn vcvtx_f32_f64(a: float64x2_t) -> float32x2_t {
9276    unsafe extern "unadjusted" {
9277        #[cfg_attr(
9278            any(target_arch = "aarch64", target_arch = "arm64ec"),
9279            link_name = "llvm.aarch64.neon.fcvtxn.v2f32.v2f64"
9280        )]
9281        fn _vcvtx_f32_f64(a: float64x2_t) -> float32x2_t;
9282    }
9283    unsafe { _vcvtx_f32_f64(a) }
9284}
9285#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9286#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_high_f32_f64)"]
9287#[inline]
9288#[target_feature(enable = "neon")]
9289#[cfg_attr(test, assert_instr(fcvtxn2))]
9290#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9291pub fn vcvtx_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t {
9292    unsafe { simd_shuffle!(a, vcvtx_f32_f64(b), [0, 1, 2, 3]) }
9293}
9294#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9295#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtxd_f32_f64)"]
9296#[inline]
9297#[target_feature(enable = "neon")]
9298#[cfg_attr(test, assert_instr(fcvtxn))]
9299#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9300pub fn vcvtxd_f32_f64(a: f64) -> f32 {
9301    unsafe { simd_extract!(vcvtx_f32_f64(vdupq_n_f64(a)), 0) }
9302}
9303#[doc = "Divide"]
9304#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f16)"]
9305#[inline]
9306#[target_feature(enable = "neon,fp16")]
9307#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9308#[cfg_attr(test, assert_instr(fdiv))]
9309pub fn vdiv_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
9310    unsafe { simd_div(a, b) }
9311}
9312#[doc = "Divide"]
9313#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f16)"]
9314#[inline]
9315#[target_feature(enable = "neon,fp16")]
9316#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9317#[cfg_attr(test, assert_instr(fdiv))]
9318pub fn vdivq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
9319    unsafe { simd_div(a, b) }
9320}
9321#[doc = "Divide"]
9322#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f32)"]
9323#[inline]
9324#[target_feature(enable = "neon")]
9325#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9326#[cfg_attr(test, assert_instr(fdiv))]
9327pub fn vdiv_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
9328    unsafe { simd_div(a, b) }
9329}
9330#[doc = "Divide"]
9331#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f32)"]
9332#[inline]
9333#[target_feature(enable = "neon")]
9334#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9335#[cfg_attr(test, assert_instr(fdiv))]
9336pub fn vdivq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
9337    unsafe { simd_div(a, b) }
9338}
9339#[doc = "Divide"]
9340#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f64)"]
9341#[inline]
9342#[target_feature(enable = "neon")]
9343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9344#[cfg_attr(test, assert_instr(fdiv))]
9345pub fn vdiv_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
9346    unsafe { simd_div(a, b) }
9347}
9348#[doc = "Divide"]
9349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f64)"]
9350#[inline]
9351#[target_feature(enable = "neon")]
9352#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9353#[cfg_attr(test, assert_instr(fdiv))]
9354pub fn vdivq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
9355    unsafe { simd_div(a, b) }
9356}
9357#[doc = "Divide"]
9358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivh_f16)"]
9359#[inline]
9360#[target_feature(enable = "neon,fp16")]
9361#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9362#[cfg_attr(test, assert_instr(nop))]
9363pub fn vdivh_f16(a: f16, b: f16) -> f16 {
9364    a / b
9365}
9366#[doc = "Dot product arithmetic (indexed)"]
9367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_s32)"]
9368#[inline]
9369#[target_feature(enable = "neon,dotprod")]
9370#[cfg_attr(test, assert_instr(sdot, LANE = 0))]
9371#[rustc_legacy_const_generics(3)]
9372#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")]
9373pub fn vdot_laneq_s32<const LANE: i32>(a: int32x2_t, b: int8x8_t, c: int8x16_t) -> int32x2_t {
9374    static_assert_uimm_bits!(LANE, 2);
9375    unsafe {
9376        let c: int32x4_t = transmute(c);
9377        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
9378        vdot_s32(a, b, transmute(c))
9379    }
9380}
9381#[doc = "Dot product arithmetic (indexed)"]
9382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_s32)"]
9383#[inline]
9384#[target_feature(enable = "neon,dotprod")]
9385#[cfg_attr(test, assert_instr(sdot, LANE = 0))]
9386#[rustc_legacy_const_generics(3)]
9387#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")]
9388pub fn vdotq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t {
9389    static_assert_uimm_bits!(LANE, 2);
9390    unsafe {
9391        let c: int32x4_t = transmute(c);
9392        let c: int32x4_t =
9393            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
9394        vdotq_s32(a, b, transmute(c))
9395    }
9396}
9397#[doc = "Dot product arithmetic (indexed)"]
9398#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_u32)"]
9399#[inline]
9400#[target_feature(enable = "neon,dotprod")]
9401#[cfg_attr(test, assert_instr(udot, LANE = 0))]
9402#[rustc_legacy_const_generics(3)]
9403#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")]
9404pub fn vdot_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint8x8_t, c: uint8x16_t) -> uint32x2_t {
9405    static_assert_uimm_bits!(LANE, 2);
9406    unsafe {
9407        let c: uint32x4_t = transmute(c);
9408        let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
9409        vdot_u32(a, b, transmute(c))
9410    }
9411}
9412#[doc = "Dot product arithmetic (indexed)"]
9413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_u32)"]
9414#[inline]
9415#[target_feature(enable = "neon,dotprod")]
9416#[cfg_attr(test, assert_instr(udot, LANE = 0))]
9417#[rustc_legacy_const_generics(3)]
9418#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")]
9419pub fn vdotq_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t {
9420    static_assert_uimm_bits!(LANE, 2);
9421    unsafe {
9422        let c: uint32x4_t = transmute(c);
9423        let c: uint32x4_t =
9424            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
9425        vdotq_u32(a, b, transmute(c))
9426    }
9427}
9428#[doc = "Set all vector lanes to the same value"]
9429#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f64)"]
9430#[inline]
9431#[target_feature(enable = "neon")]
9432#[cfg_attr(test, assert_instr(nop, N = 0))]
9433#[rustc_legacy_const_generics(1)]
9434#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9435pub fn vdup_lane_f64<const N: i32>(a: float64x1_t) -> float64x1_t {
9436    static_assert!(N == 0);
9437    a
9438}
9439#[doc = "Set all vector lanes to the same value"]
9440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p64)"]
9441#[inline]
9442#[target_feature(enable = "neon")]
9443#[cfg_attr(test, assert_instr(nop, N = 0))]
9444#[rustc_legacy_const_generics(1)]
9445#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9446pub fn vdup_lane_p64<const N: i32>(a: poly64x1_t) -> poly64x1_t {
9447    static_assert!(N == 0);
9448    a
9449}
9450#[doc = "Set all vector lanes to the same value"]
9451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f64)"]
9452#[inline]
9453#[target_feature(enable = "neon")]
9454#[cfg_attr(test, assert_instr(nop, N = 1))]
9455#[rustc_legacy_const_generics(1)]
9456#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9457pub fn vdup_laneq_f64<const N: i32>(a: float64x2_t) -> float64x1_t {
9458    static_assert_uimm_bits!(N, 1);
9459    unsafe { transmute::<f64, _>(simd_extract!(a, N as u32)) }
9460}
9461#[doc = "Set all vector lanes to the same value"]
9462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p64)"]
9463#[inline]
9464#[target_feature(enable = "neon")]
9465#[cfg_attr(test, assert_instr(nop, N = 1))]
9466#[rustc_legacy_const_generics(1)]
9467#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9468pub fn vdup_laneq_p64<const N: i32>(a: poly64x2_t) -> poly64x1_t {
9469    static_assert_uimm_bits!(N, 1);
9470    unsafe { transmute::<u64, _>(simd_extract!(a, N as u32)) }
9471}
9472#[doc = "Set all vector lanes to the same value"]
9473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_s8)"]
9474#[inline]
9475#[target_feature(enable = "neon")]
9476#[cfg_attr(test, assert_instr(nop, N = 4))]
9477#[rustc_legacy_const_generics(1)]
9478#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9479pub fn vdupb_lane_s8<const N: i32>(a: int8x8_t) -> i8 {
9480    static_assert_uimm_bits!(N, 3);
9481    unsafe { simd_extract!(a, N as u32) }
9482}
9483#[doc = "Set all vector lanes to the same value"]
9484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_s16)"]
9485#[inline]
9486#[target_feature(enable = "neon")]
9487#[cfg_attr(test, assert_instr(nop, N = 4))]
9488#[rustc_legacy_const_generics(1)]
9489#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9490pub fn vduph_laneq_s16<const N: i32>(a: int16x8_t) -> i16 {
9491    static_assert_uimm_bits!(N, 3);
9492    unsafe { simd_extract!(a, N as u32) }
9493}
9494#[doc = "Set all vector lanes to the same value"]
9495#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_u8)"]
9496#[inline]
9497#[target_feature(enable = "neon")]
9498#[cfg_attr(test, assert_instr(nop, N = 4))]
9499#[rustc_legacy_const_generics(1)]
9500#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9501pub fn vdupb_lane_u8<const N: i32>(a: uint8x8_t) -> u8 {
9502    static_assert_uimm_bits!(N, 3);
9503    unsafe { simd_extract!(a, N as u32) }
9504}
9505#[doc = "Set all vector lanes to the same value"]
9506#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_u16)"]
9507#[inline]
9508#[target_feature(enable = "neon")]
9509#[cfg_attr(test, assert_instr(nop, N = 4))]
9510#[rustc_legacy_const_generics(1)]
9511#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9512pub fn vduph_laneq_u16<const N: i32>(a: uint16x8_t) -> u16 {
9513    static_assert_uimm_bits!(N, 3);
9514    unsafe { simd_extract!(a, N as u32) }
9515}
9516#[doc = "Set all vector lanes to the same value"]
9517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_p8)"]
9518#[inline]
9519#[target_feature(enable = "neon")]
9520#[cfg_attr(test, assert_instr(nop, N = 4))]
9521#[rustc_legacy_const_generics(1)]
9522#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9523pub fn vdupb_lane_p8<const N: i32>(a: poly8x8_t) -> p8 {
9524    static_assert_uimm_bits!(N, 3);
9525    unsafe { simd_extract!(a, N as u32) }
9526}
9527#[doc = "Set all vector lanes to the same value"]
9528#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_p16)"]
9529#[inline]
9530#[target_feature(enable = "neon")]
9531#[cfg_attr(test, assert_instr(nop, N = 4))]
9532#[rustc_legacy_const_generics(1)]
9533#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9534pub fn vduph_laneq_p16<const N: i32>(a: poly16x8_t) -> p16 {
9535    static_assert_uimm_bits!(N, 3);
9536    unsafe { simd_extract!(a, N as u32) }
9537}
9538#[doc = "Extract an element from a vector"]
9539#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_s8)"]
9540#[inline]
9541#[target_feature(enable = "neon")]
9542#[cfg_attr(test, assert_instr(nop, N = 8))]
9543#[rustc_legacy_const_generics(1)]
9544#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9545pub fn vdupb_laneq_s8<const N: i32>(a: int8x16_t) -> i8 {
9546    static_assert_uimm_bits!(N, 4);
9547    unsafe { simd_extract!(a, N as u32) }
9548}
9549#[doc = "Extract an element from a vector"]
9550#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_u8)"]
9551#[inline]
9552#[target_feature(enable = "neon")]
9553#[cfg_attr(test, assert_instr(nop, N = 8))]
9554#[rustc_legacy_const_generics(1)]
9555#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9556pub fn vdupb_laneq_u8<const N: i32>(a: uint8x16_t) -> u8 {
9557    static_assert_uimm_bits!(N, 4);
9558    unsafe { simd_extract!(a, N as u32) }
9559}
9560#[doc = "Extract an element from a vector"]
9561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_p8)"]
9562#[inline]
9563#[target_feature(enable = "neon")]
9564#[cfg_attr(test, assert_instr(nop, N = 8))]
9565#[rustc_legacy_const_generics(1)]
9566#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9567pub fn vdupb_laneq_p8<const N: i32>(a: poly8x16_t) -> p8 {
9568    static_assert_uimm_bits!(N, 4);
9569    unsafe { simd_extract!(a, N as u32) }
9570}
9571#[doc = "Set all vector lanes to the same value"]
9572#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_f64)"]
9573#[inline]
9574#[target_feature(enable = "neon")]
9575#[cfg_attr(test, assert_instr(nop, N = 0))]
9576#[rustc_legacy_const_generics(1)]
9577#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9578pub fn vdupd_lane_f64<const N: i32>(a: float64x1_t) -> f64 {
9579    static_assert!(N == 0);
9580    unsafe { simd_extract!(a, N as u32) }
9581}
9582#[doc = "Set all vector lanes to the same value"]
9583#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_s64)"]
9584#[inline]
9585#[target_feature(enable = "neon")]
9586#[cfg_attr(test, assert_instr(nop, N = 0))]
9587#[rustc_legacy_const_generics(1)]
9588#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9589pub fn vdupd_lane_s64<const N: i32>(a: int64x1_t) -> i64 {
9590    static_assert!(N == 0);
9591    unsafe { simd_extract!(a, N as u32) }
9592}
9593#[doc = "Set all vector lanes to the same value"]
9594#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_u64)"]
9595#[inline]
9596#[target_feature(enable = "neon")]
9597#[cfg_attr(test, assert_instr(nop, N = 0))]
9598#[rustc_legacy_const_generics(1)]
9599#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9600pub fn vdupd_lane_u64<const N: i32>(a: uint64x1_t) -> u64 {
9601    static_assert!(N == 0);
9602    unsafe { simd_extract!(a, N as u32) }
9603}
9604#[doc = "Set all vector lanes to the same value"]
9605#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_f16)"]
9606#[inline]
9607#[cfg_attr(test, assert_instr(nop, N = 2))]
9608#[rustc_legacy_const_generics(1)]
9609#[target_feature(enable = "neon,fp16")]
9610#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9611pub fn vduph_lane_f16<const N: i32>(a: float16x4_t) -> f16 {
9612    static_assert_uimm_bits!(N, 2);
9613    unsafe { simd_extract!(a, N as u32) }
9614}
9615#[doc = "Extract an element from a vector"]
9616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_f16)"]
9617#[inline]
9618#[cfg_attr(test, assert_instr(nop, N = 4))]
9619#[rustc_legacy_const_generics(1)]
9620#[target_feature(enable = "neon,fp16")]
9621#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9622pub fn vduph_laneq_f16<const N: i32>(a: float16x8_t) -> f16 {
9623    static_assert_uimm_bits!(N, 4);
9624    unsafe { simd_extract!(a, N as u32) }
9625}
9626#[doc = "Set all vector lanes to the same value"]
9627#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f64)"]
9628#[inline]
9629#[target_feature(enable = "neon")]
9630#[cfg_attr(test, assert_instr(dup, N = 0))]
9631#[rustc_legacy_const_generics(1)]
9632#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9633pub fn vdupq_lane_f64<const N: i32>(a: float64x1_t) -> float64x2_t {
9634    static_assert!(N == 0);
9635    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9636}
9637#[doc = "Set all vector lanes to the same value"]
9638#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p64)"]
9639#[inline]
9640#[target_feature(enable = "neon")]
9641#[cfg_attr(test, assert_instr(dup, N = 0))]
9642#[rustc_legacy_const_generics(1)]
9643#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9644pub fn vdupq_lane_p64<const N: i32>(a: poly64x1_t) -> poly64x2_t {
9645    static_assert!(N == 0);
9646    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9647}
9648#[doc = "Set all vector lanes to the same value"]
9649#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f64)"]
9650#[inline]
9651#[target_feature(enable = "neon")]
9652#[cfg_attr(test, assert_instr(dup, N = 1))]
9653#[rustc_legacy_const_generics(1)]
9654#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9655pub fn vdupq_laneq_f64<const N: i32>(a: float64x2_t) -> float64x2_t {
9656    static_assert_uimm_bits!(N, 1);
9657    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9658}
9659#[doc = "Set all vector lanes to the same value"]
9660#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p64)"]
9661#[inline]
9662#[target_feature(enable = "neon")]
9663#[cfg_attr(test, assert_instr(dup, N = 1))]
9664#[rustc_legacy_const_generics(1)]
9665#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9666pub fn vdupq_laneq_p64<const N: i32>(a: poly64x2_t) -> poly64x2_t {
9667    static_assert_uimm_bits!(N, 1);
9668    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9669}
9670#[doc = "Set all vector lanes to the same value"]
9671#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_f32)"]
9672#[inline]
9673#[target_feature(enable = "neon")]
9674#[cfg_attr(test, assert_instr(nop, N = 1))]
9675#[rustc_legacy_const_generics(1)]
9676#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9677pub fn vdups_lane_f32<const N: i32>(a: float32x2_t) -> f32 {
9678    static_assert_uimm_bits!(N, 1);
9679    unsafe { simd_extract!(a, N as u32) }
9680}
9681#[doc = "Set all vector lanes to the same value"]
9682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_f64)"]
9683#[inline]
9684#[target_feature(enable = "neon")]
9685#[cfg_attr(test, assert_instr(nop, N = 1))]
9686#[rustc_legacy_const_generics(1)]
9687#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9688pub fn vdupd_laneq_f64<const N: i32>(a: float64x2_t) -> f64 {
9689    static_assert_uimm_bits!(N, 1);
9690    unsafe { simd_extract!(a, N as u32) }
9691}
9692#[doc = "Set all vector lanes to the same value"]
9693#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_s32)"]
9694#[inline]
9695#[target_feature(enable = "neon")]
9696#[cfg_attr(test, assert_instr(nop, N = 1))]
9697#[rustc_legacy_const_generics(1)]
9698#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9699pub fn vdups_lane_s32<const N: i32>(a: int32x2_t) -> i32 {
9700    static_assert_uimm_bits!(N, 1);
9701    unsafe { simd_extract!(a, N as u32) }
9702}
9703#[doc = "Set all vector lanes to the same value"]
9704#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_s64)"]
9705#[inline]
9706#[target_feature(enable = "neon")]
9707#[cfg_attr(test, assert_instr(nop, N = 1))]
9708#[rustc_legacy_const_generics(1)]
9709#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9710pub fn vdupd_laneq_s64<const N: i32>(a: int64x2_t) -> i64 {
9711    static_assert_uimm_bits!(N, 1);
9712    unsafe { simd_extract!(a, N as u32) }
9713}
9714#[doc = "Set all vector lanes to the same value"]
9715#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_u32)"]
9716#[inline]
9717#[target_feature(enable = "neon")]
9718#[cfg_attr(test, assert_instr(nop, N = 1))]
9719#[rustc_legacy_const_generics(1)]
9720#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9721pub fn vdups_lane_u32<const N: i32>(a: uint32x2_t) -> u32 {
9722    static_assert_uimm_bits!(N, 1);
9723    unsafe { simd_extract!(a, N as u32) }
9724}
9725#[doc = "Set all vector lanes to the same value"]
9726#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_u64)"]
9727#[inline]
9728#[target_feature(enable = "neon")]
9729#[cfg_attr(test, assert_instr(nop, N = 1))]
9730#[rustc_legacy_const_generics(1)]
9731#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9732pub fn vdupd_laneq_u64<const N: i32>(a: uint64x2_t) -> u64 {
9733    static_assert_uimm_bits!(N, 1);
9734    unsafe { simd_extract!(a, N as u32) }
9735}
9736#[doc = "Set all vector lanes to the same value"]
9737#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_f32)"]
9738#[inline]
9739#[target_feature(enable = "neon")]
9740#[cfg_attr(test, assert_instr(nop, N = 2))]
9741#[rustc_legacy_const_generics(1)]
9742#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9743pub fn vdups_laneq_f32<const N: i32>(a: float32x4_t) -> f32 {
9744    static_assert_uimm_bits!(N, 2);
9745    unsafe { simd_extract!(a, N as u32) }
9746}
9747#[doc = "Set all vector lanes to the same value"]
9748#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_s16)"]
9749#[inline]
9750#[target_feature(enable = "neon")]
9751#[cfg_attr(test, assert_instr(nop, N = 2))]
9752#[rustc_legacy_const_generics(1)]
9753#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9754pub fn vduph_lane_s16<const N: i32>(a: int16x4_t) -> i16 {
9755    static_assert_uimm_bits!(N, 2);
9756    unsafe { simd_extract!(a, N as u32) }
9757}
9758#[doc = "Set all vector lanes to the same value"]
9759#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_s32)"]
9760#[inline]
9761#[target_feature(enable = "neon")]
9762#[cfg_attr(test, assert_instr(nop, N = 2))]
9763#[rustc_legacy_const_generics(1)]
9764#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9765pub fn vdups_laneq_s32<const N: i32>(a: int32x4_t) -> i32 {
9766    static_assert_uimm_bits!(N, 2);
9767    unsafe { simd_extract!(a, N as u32) }
9768}
9769#[doc = "Set all vector lanes to the same value"]
9770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_u16)"]
9771#[inline]
9772#[target_feature(enable = "neon")]
9773#[cfg_attr(test, assert_instr(nop, N = 2))]
9774#[rustc_legacy_const_generics(1)]
9775#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9776pub fn vduph_lane_u16<const N: i32>(a: uint16x4_t) -> u16 {
9777    static_assert_uimm_bits!(N, 2);
9778    unsafe { simd_extract!(a, N as u32) }
9779}
9780#[doc = "Set all vector lanes to the same value"]
9781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_u32)"]
9782#[inline]
9783#[target_feature(enable = "neon")]
9784#[cfg_attr(test, assert_instr(nop, N = 2))]
9785#[rustc_legacy_const_generics(1)]
9786#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9787pub fn vdups_laneq_u32<const N: i32>(a: uint32x4_t) -> u32 {
9788    static_assert_uimm_bits!(N, 2);
9789    unsafe { simd_extract!(a, N as u32) }
9790}
9791#[doc = "Set all vector lanes to the same value"]
9792#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_p16)"]
9793#[inline]
9794#[target_feature(enable = "neon")]
9795#[cfg_attr(test, assert_instr(nop, N = 2))]
9796#[rustc_legacy_const_generics(1)]
9797#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9798pub fn vduph_lane_p16<const N: i32>(a: poly16x4_t) -> p16 {
9799    static_assert_uimm_bits!(N, 2);
9800    unsafe { simd_extract!(a, N as u32) }
9801}
9802#[doc = "Three-way exclusive OR"]
9803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s8)"]
9804#[inline]
9805#[target_feature(enable = "neon,sha3")]
9806#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9807#[cfg_attr(test, assert_instr(eor3))]
9808pub fn veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
9809    unsafe extern "unadjusted" {
9810        #[cfg_attr(
9811            any(target_arch = "aarch64", target_arch = "arm64ec"),
9812            link_name = "llvm.aarch64.crypto.eor3s.v16i8"
9813        )]
9814        fn _veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t;
9815    }
9816    unsafe { _veor3q_s8(a, b, c) }
9817}
9818#[doc = "Three-way exclusive OR"]
9819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s16)"]
9820#[inline]
9821#[target_feature(enable = "neon,sha3")]
9822#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9823#[cfg_attr(test, assert_instr(eor3))]
9824pub fn veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
9825    unsafe extern "unadjusted" {
9826        #[cfg_attr(
9827            any(target_arch = "aarch64", target_arch = "arm64ec"),
9828            link_name = "llvm.aarch64.crypto.eor3s.v8i16"
9829        )]
9830        fn _veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
9831    }
9832    unsafe { _veor3q_s16(a, b, c) }
9833}
9834#[doc = "Three-way exclusive OR"]
9835#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s32)"]
9836#[inline]
9837#[target_feature(enable = "neon,sha3")]
9838#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9839#[cfg_attr(test, assert_instr(eor3))]
9840pub fn veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
9841    unsafe extern "unadjusted" {
9842        #[cfg_attr(
9843            any(target_arch = "aarch64", target_arch = "arm64ec"),
9844            link_name = "llvm.aarch64.crypto.eor3s.v4i32"
9845        )]
9846        fn _veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
9847    }
9848    unsafe { _veor3q_s32(a, b, c) }
9849}
9850#[doc = "Three-way exclusive OR"]
9851#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s64)"]
9852#[inline]
9853#[target_feature(enable = "neon,sha3")]
9854#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9855#[cfg_attr(test, assert_instr(eor3))]
9856pub fn veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t {
9857    unsafe extern "unadjusted" {
9858        #[cfg_attr(
9859            any(target_arch = "aarch64", target_arch = "arm64ec"),
9860            link_name = "llvm.aarch64.crypto.eor3s.v2i64"
9861        )]
9862        fn _veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t;
9863    }
9864    unsafe { _veor3q_s64(a, b, c) }
9865}
9866#[doc = "Three-way exclusive OR"]
9867#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u8)"]
9868#[inline]
9869#[target_feature(enable = "neon,sha3")]
9870#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9871#[cfg_attr(test, assert_instr(eor3))]
9872pub fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
9873    unsafe extern "unadjusted" {
9874        #[cfg_attr(
9875            any(target_arch = "aarch64", target_arch = "arm64ec"),
9876            link_name = "llvm.aarch64.crypto.eor3u.v16i8"
9877        )]
9878        fn _veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t;
9879    }
9880    unsafe { _veor3q_u8(a, b, c) }
9881}
9882#[doc = "Three-way exclusive OR"]
9883#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u16)"]
9884#[inline]
9885#[target_feature(enable = "neon,sha3")]
9886#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9887#[cfg_attr(test, assert_instr(eor3))]
9888pub fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
9889    unsafe extern "unadjusted" {
9890        #[cfg_attr(
9891            any(target_arch = "aarch64", target_arch = "arm64ec"),
9892            link_name = "llvm.aarch64.crypto.eor3u.v8i16"
9893        )]
9894        fn _veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t;
9895    }
9896    unsafe { _veor3q_u16(a, b, c) }
9897}
9898#[doc = "Three-way exclusive OR"]
9899#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u32)"]
9900#[inline]
9901#[target_feature(enable = "neon,sha3")]
9902#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9903#[cfg_attr(test, assert_instr(eor3))]
9904pub fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
9905    unsafe extern "unadjusted" {
9906        #[cfg_attr(
9907            any(target_arch = "aarch64", target_arch = "arm64ec"),
9908            link_name = "llvm.aarch64.crypto.eor3u.v4i32"
9909        )]
9910        fn _veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
9911    }
9912    unsafe { _veor3q_u32(a, b, c) }
9913}
9914#[doc = "Three-way exclusive OR"]
9915#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u64)"]
9916#[inline]
9917#[target_feature(enable = "neon,sha3")]
9918#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9919#[cfg_attr(test, assert_instr(eor3))]
9920pub fn veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
9921    unsafe extern "unadjusted" {
9922        #[cfg_attr(
9923            any(target_arch = "aarch64", target_arch = "arm64ec"),
9924            link_name = "llvm.aarch64.crypto.eor3u.v2i64"
9925        )]
9926        fn _veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
9927    }
9928    unsafe { _veor3q_u64(a, b, c) }
9929}
9930#[doc = "Extract vector from pair of vectors"]
9931#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f64)"]
9932#[inline]
9933#[target_feature(enable = "neon")]
9934#[cfg_attr(test, assert_instr(ext, N = 1))]
9935#[rustc_legacy_const_generics(2)]
9936#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9937pub fn vextq_f64<const N: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
9938    static_assert_uimm_bits!(N, 1);
9939    unsafe {
9940        match N & 0b1 {
9941            0 => simd_shuffle!(a, b, [0, 1]),
9942            1 => simd_shuffle!(a, b, [1, 2]),
9943            _ => unreachable_unchecked(),
9944        }
9945    }
9946}
9947#[doc = "Extract vector from pair of vectors"]
9948#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p64)"]
9949#[inline]
9950#[target_feature(enable = "neon")]
9951#[cfg_attr(test, assert_instr(ext, N = 1))]
9952#[rustc_legacy_const_generics(2)]
9953#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9954pub fn vextq_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
9955    static_assert_uimm_bits!(N, 1);
9956    unsafe {
9957        match N & 0b1 {
9958            0 => simd_shuffle!(a, b, [0, 1]),
9959            1 => simd_shuffle!(a, b, [1, 2]),
9960            _ => unreachable_unchecked(),
9961        }
9962    }
9963}
9964#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
9965#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f64)"]
9966#[inline]
9967#[target_feature(enable = "neon")]
9968#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9969#[cfg_attr(test, assert_instr(fmadd))]
9970pub fn vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
9971    unsafe { simd_fma(b, c, a) }
9972}
9973#[doc = "Floating-point fused multiply-add to accumulator"]
9974#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f16)"]
9975#[inline]
9976#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
9977#[rustc_legacy_const_generics(3)]
9978#[target_feature(enable = "neon,fp16")]
9979#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9980pub fn vfma_lane_f16<const LANE: i32>(
9981    a: float16x4_t,
9982    b: float16x4_t,
9983    c: float16x4_t,
9984) -> float16x4_t {
9985    static_assert_uimm_bits!(LANE, 2);
9986    unsafe { vfma_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
9987}
9988#[doc = "Floating-point fused multiply-add to accumulator"]
9989#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f16)"]
9990#[inline]
9991#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
9992#[rustc_legacy_const_generics(3)]
9993#[target_feature(enable = "neon,fp16")]
9994#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9995pub fn vfma_laneq_f16<const LANE: i32>(
9996    a: float16x4_t,
9997    b: float16x4_t,
9998    c: float16x8_t,
9999) -> float16x4_t {
10000    static_assert_uimm_bits!(LANE, 3);
10001    unsafe { vfma_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10002}
10003#[doc = "Floating-point fused multiply-add to accumulator"]
10004#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f16)"]
10005#[inline]
10006#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10007#[rustc_legacy_const_generics(3)]
10008#[target_feature(enable = "neon,fp16")]
10009#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10010pub fn vfmaq_lane_f16<const LANE: i32>(
10011    a: float16x8_t,
10012    b: float16x8_t,
10013    c: float16x4_t,
10014) -> float16x8_t {
10015    static_assert_uimm_bits!(LANE, 2);
10016    unsafe { vfmaq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10017}
10018#[doc = "Floating-point fused multiply-add to accumulator"]
10019#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f16)"]
10020#[inline]
10021#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10022#[rustc_legacy_const_generics(3)]
10023#[target_feature(enable = "neon,fp16")]
10024#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10025pub fn vfmaq_laneq_f16<const LANE: i32>(
10026    a: float16x8_t,
10027    b: float16x8_t,
10028    c: float16x8_t,
10029) -> float16x8_t {
10030    static_assert_uimm_bits!(LANE, 3);
10031    unsafe { vfmaq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10032}
10033#[doc = "Floating-point fused multiply-add to accumulator"]
10034#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f32)"]
10035#[inline]
10036#[target_feature(enable = "neon")]
10037#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10038#[rustc_legacy_const_generics(3)]
10039#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10040pub fn vfma_lane_f32<const LANE: i32>(
10041    a: float32x2_t,
10042    b: float32x2_t,
10043    c: float32x2_t,
10044) -> float32x2_t {
10045    static_assert_uimm_bits!(LANE, 1);
10046    unsafe { vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10047}
10048#[doc = "Floating-point fused multiply-add to accumulator"]
10049#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f32)"]
10050#[inline]
10051#[target_feature(enable = "neon")]
10052#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10053#[rustc_legacy_const_generics(3)]
10054#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10055pub fn vfma_laneq_f32<const LANE: i32>(
10056    a: float32x2_t,
10057    b: float32x2_t,
10058    c: float32x4_t,
10059) -> float32x2_t {
10060    static_assert_uimm_bits!(LANE, 2);
10061    unsafe { vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10062}
10063#[doc = "Floating-point fused multiply-add to accumulator"]
10064#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f32)"]
10065#[inline]
10066#[target_feature(enable = "neon")]
10067#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10068#[rustc_legacy_const_generics(3)]
10069#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10070pub fn vfmaq_lane_f32<const LANE: i32>(
10071    a: float32x4_t,
10072    b: float32x4_t,
10073    c: float32x2_t,
10074) -> float32x4_t {
10075    static_assert_uimm_bits!(LANE, 1);
10076    unsafe { vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10077}
10078#[doc = "Floating-point fused multiply-add to accumulator"]
10079#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f32)"]
10080#[inline]
10081#[target_feature(enable = "neon")]
10082#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10083#[rustc_legacy_const_generics(3)]
10084#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10085pub fn vfmaq_laneq_f32<const LANE: i32>(
10086    a: float32x4_t,
10087    b: float32x4_t,
10088    c: float32x4_t,
10089) -> float32x4_t {
10090    static_assert_uimm_bits!(LANE, 2);
10091    unsafe { vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10092}
10093#[doc = "Floating-point fused multiply-add to accumulator"]
10094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f64)"]
10095#[inline]
10096#[target_feature(enable = "neon")]
10097#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10098#[rustc_legacy_const_generics(3)]
10099#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10100pub fn vfmaq_laneq_f64<const LANE: i32>(
10101    a: float64x2_t,
10102    b: float64x2_t,
10103    c: float64x2_t,
10104) -> float64x2_t {
10105    static_assert_uimm_bits!(LANE, 1);
10106    unsafe { vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10107}
10108#[doc = "Floating-point fused multiply-add to accumulator"]
10109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f64)"]
10110#[inline]
10111#[target_feature(enable = "neon")]
10112#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10113#[rustc_legacy_const_generics(3)]
10114#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10115pub fn vfma_lane_f64<const LANE: i32>(
10116    a: float64x1_t,
10117    b: float64x1_t,
10118    c: float64x1_t,
10119) -> float64x1_t {
10120    static_assert!(LANE == 0);
10121    unsafe { vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
10122}
10123#[doc = "Floating-point fused multiply-add to accumulator"]
10124#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f64)"]
10125#[inline]
10126#[target_feature(enable = "neon")]
10127#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10128#[rustc_legacy_const_generics(3)]
10129#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10130pub fn vfma_laneq_f64<const LANE: i32>(
10131    a: float64x1_t,
10132    b: float64x1_t,
10133    c: float64x2_t,
10134) -> float64x1_t {
10135    static_assert_uimm_bits!(LANE, 1);
10136    unsafe { vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
10137}
10138#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
10139#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f16)"]
10140#[inline]
10141#[target_feature(enable = "neon,fp16")]
10142#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10143#[cfg_attr(test, assert_instr(fmla))]
10144pub fn vfma_n_f16(a: float16x4_t, b: float16x4_t, c: f16) -> float16x4_t {
10145    vfma_f16(a, b, vdup_n_f16(c))
10146}
10147#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
10148#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f16)"]
10149#[inline]
10150#[target_feature(enable = "neon,fp16")]
10151#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10152#[cfg_attr(test, assert_instr(fmla))]
10153pub fn vfmaq_n_f16(a: float16x8_t, b: float16x8_t, c: f16) -> float16x8_t {
10154    vfmaq_f16(a, b, vdupq_n_f16(c))
10155}
10156#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10157#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f64)"]
10158#[inline]
10159#[target_feature(enable = "neon")]
10160#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10161#[cfg_attr(test, assert_instr(fmadd))]
10162pub fn vfma_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t {
10163    vfma_f64(a, b, vdup_n_f64(c))
10164}
10165#[doc = "Floating-point fused multiply-add to accumulator"]
10166#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_lane_f64)"]
10167#[inline]
10168#[target_feature(enable = "neon")]
10169#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10170#[rustc_legacy_const_generics(3)]
10171#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10172pub fn vfmad_lane_f64<const LANE: i32>(a: f64, b: f64, c: float64x1_t) -> f64 {
10173    static_assert!(LANE == 0);
10174    unsafe {
10175        let c: f64 = simd_extract!(c, LANE as u32);
10176        fmaf64(b, c, a)
10177    }
10178}
10179#[doc = "Floating-point fused multiply-add to accumulator"]
10180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_f16)"]
10181#[inline]
10182#[cfg_attr(test, assert_instr(fmadd))]
10183#[target_feature(enable = "neon,fp16")]
10184#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10185pub fn vfmah_f16(a: f16, b: f16, c: f16) -> f16 {
10186    unsafe { fmaf16(b, c, a) }
10187}
10188#[doc = "Floating-point fused multiply-add to accumulator"]
10189#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_lane_f16)"]
10190#[inline]
10191#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10192#[rustc_legacy_const_generics(3)]
10193#[target_feature(enable = "neon,fp16")]
10194#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10195pub fn vfmah_lane_f16<const LANE: i32>(a: f16, b: f16, v: float16x4_t) -> f16 {
10196    static_assert_uimm_bits!(LANE, 2);
10197    unsafe {
10198        let c: f16 = simd_extract!(v, LANE as u32);
10199        vfmah_f16(a, b, c)
10200    }
10201}
10202#[doc = "Floating-point fused multiply-add to accumulator"]
10203#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_laneq_f16)"]
10204#[inline]
10205#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10206#[rustc_legacy_const_generics(3)]
10207#[target_feature(enable = "neon,fp16")]
10208#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10209pub fn vfmah_laneq_f16<const LANE: i32>(a: f16, b: f16, v: float16x8_t) -> f16 {
10210    static_assert_uimm_bits!(LANE, 3);
10211    unsafe {
10212        let c: f16 = simd_extract!(v, LANE as u32);
10213        vfmah_f16(a, b, c)
10214    }
10215}
10216#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10217#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f64)"]
10218#[inline]
10219#[target_feature(enable = "neon")]
10220#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10221#[cfg_attr(test, assert_instr(fmla))]
10222pub fn vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
10223    unsafe { simd_fma(b, c, a) }
10224}
10225#[doc = "Floating-point fused multiply-add to accumulator"]
10226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f64)"]
10227#[inline]
10228#[target_feature(enable = "neon")]
10229#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10230#[rustc_legacy_const_generics(3)]
10231#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10232pub fn vfmaq_lane_f64<const LANE: i32>(
10233    a: float64x2_t,
10234    b: float64x2_t,
10235    c: float64x1_t,
10236) -> float64x2_t {
10237    static_assert!(LANE == 0);
10238    unsafe { vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10239}
10240#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10241#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f64)"]
10242#[inline]
10243#[target_feature(enable = "neon")]
10244#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10245#[cfg_attr(test, assert_instr(fmla))]
10246pub fn vfmaq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t {
10247    vfmaq_f64(a, b, vdupq_n_f64(c))
10248}
10249#[doc = "Floating-point fused multiply-add to accumulator"]
10250#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_lane_f32)"]
10251#[inline]
10252#[target_feature(enable = "neon")]
10253#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10254#[rustc_legacy_const_generics(3)]
10255#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10256pub fn vfmas_lane_f32<const LANE: i32>(a: f32, b: f32, c: float32x2_t) -> f32 {
10257    static_assert_uimm_bits!(LANE, 1);
10258    unsafe {
10259        let c: f32 = simd_extract!(c, LANE as u32);
10260        fmaf32(b, c, a)
10261    }
10262}
10263#[doc = "Floating-point fused multiply-add to accumulator"]
10264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_laneq_f32)"]
10265#[inline]
10266#[target_feature(enable = "neon")]
10267#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10268#[rustc_legacy_const_generics(3)]
10269#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10270pub fn vfmas_laneq_f32<const LANE: i32>(a: f32, b: f32, c: float32x4_t) -> f32 {
10271    static_assert_uimm_bits!(LANE, 2);
10272    unsafe {
10273        let c: f32 = simd_extract!(c, LANE as u32);
10274        fmaf32(b, c, a)
10275    }
10276}
10277#[doc = "Floating-point fused multiply-add to accumulator"]
10278#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_laneq_f64)"]
10279#[inline]
10280#[target_feature(enable = "neon")]
10281#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10282#[rustc_legacy_const_generics(3)]
10283#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10284pub fn vfmad_laneq_f64<const LANE: i32>(a: f64, b: f64, c: float64x2_t) -> f64 {
10285    static_assert_uimm_bits!(LANE, 1);
10286    unsafe {
10287        let c: f64 = simd_extract!(c, LANE as u32);
10288        fmaf64(b, c, a)
10289    }
10290}
10291#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_high_f16)"]
10293#[inline]
10294#[target_feature(enable = "neon,fp16")]
10295#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10296#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10297#[cfg_attr(test, assert_instr(fmlal2))]
10298pub fn vfmlal_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10299    unsafe extern "unadjusted" {
10300        #[cfg_attr(
10301            any(target_arch = "aarch64", target_arch = "arm64ec"),
10302            link_name = "llvm.aarch64.neon.fmlal2.v2f32.v4f16"
10303        )]
10304        fn _vfmlal_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10305    }
10306    unsafe { _vfmlal_high_f16(r, a, b) }
10307}
10308#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_high_f16)"]
10310#[inline]
10311#[target_feature(enable = "neon,fp16")]
10312#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10313#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10314#[cfg_attr(test, assert_instr(fmlal2))]
10315pub fn vfmlalq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10316    unsafe extern "unadjusted" {
10317        #[cfg_attr(
10318            any(target_arch = "aarch64", target_arch = "arm64ec"),
10319            link_name = "llvm.aarch64.neon.fmlal2.v4f32.v8f16"
10320        )]
10321        fn _vfmlalq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10322    }
10323    unsafe { _vfmlalq_high_f16(r, a, b) }
10324}
10325#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10326#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_lane_high_f16)"]
10327#[inline]
10328#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10329#[target_feature(enable = "neon,fp16")]
10330#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10331#[rustc_legacy_const_generics(3)]
10332#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10333pub fn vfmlal_lane_high_f16<const LANE: i32>(
10334    r: float32x2_t,
10335    a: float16x4_t,
10336    b: float16x4_t,
10337) -> float32x2_t {
10338    static_assert_uimm_bits!(LANE, 2);
10339    unsafe { vfmlal_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10340}
10341#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10342#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_laneq_high_f16)"]
10343#[inline]
10344#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10345#[target_feature(enable = "neon,fp16")]
10346#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10347#[rustc_legacy_const_generics(3)]
10348#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10349pub fn vfmlal_laneq_high_f16<const LANE: i32>(
10350    r: float32x2_t,
10351    a: float16x4_t,
10352    b: float16x8_t,
10353) -> float32x2_t {
10354    static_assert_uimm_bits!(LANE, 3);
10355    unsafe { vfmlal_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10356}
10357#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_lane_high_f16)"]
10359#[inline]
10360#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10361#[target_feature(enable = "neon,fp16")]
10362#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10363#[rustc_legacy_const_generics(3)]
10364#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10365pub fn vfmlalq_lane_high_f16<const LANE: i32>(
10366    r: float32x4_t,
10367    a: float16x8_t,
10368    b: float16x4_t,
10369) -> float32x4_t {
10370    static_assert_uimm_bits!(LANE, 2);
10371    unsafe { vfmlalq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10372}
10373#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10374#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_laneq_high_f16)"]
10375#[inline]
10376#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10377#[target_feature(enable = "neon,fp16")]
10378#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10379#[rustc_legacy_const_generics(3)]
10380#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10381pub fn vfmlalq_laneq_high_f16<const LANE: i32>(
10382    r: float32x4_t,
10383    a: float16x8_t,
10384    b: float16x8_t,
10385) -> float32x4_t {
10386    static_assert_uimm_bits!(LANE, 3);
10387    unsafe { vfmlalq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10388}
10389#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_lane_low_f16)"]
10391#[inline]
10392#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10393#[target_feature(enable = "neon,fp16")]
10394#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10395#[rustc_legacy_const_generics(3)]
10396#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10397pub fn vfmlal_lane_low_f16<const LANE: i32>(
10398    r: float32x2_t,
10399    a: float16x4_t,
10400    b: float16x4_t,
10401) -> float32x2_t {
10402    static_assert_uimm_bits!(LANE, 2);
10403    unsafe { vfmlal_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10404}
10405#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10406#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_laneq_low_f16)"]
10407#[inline]
10408#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10409#[target_feature(enable = "neon,fp16")]
10410#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10411#[rustc_legacy_const_generics(3)]
10412#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10413pub fn vfmlal_laneq_low_f16<const LANE: i32>(
10414    r: float32x2_t,
10415    a: float16x4_t,
10416    b: float16x8_t,
10417) -> float32x2_t {
10418    static_assert_uimm_bits!(LANE, 3);
10419    unsafe { vfmlal_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10420}
10421#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_lane_low_f16)"]
10423#[inline]
10424#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10425#[target_feature(enable = "neon,fp16")]
10426#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10427#[rustc_legacy_const_generics(3)]
10428#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10429pub fn vfmlalq_lane_low_f16<const LANE: i32>(
10430    r: float32x4_t,
10431    a: float16x8_t,
10432    b: float16x4_t,
10433) -> float32x4_t {
10434    static_assert_uimm_bits!(LANE, 2);
10435    unsafe { vfmlalq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10436}
10437#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10438#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_laneq_low_f16)"]
10439#[inline]
10440#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10441#[target_feature(enable = "neon,fp16")]
10442#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10443#[rustc_legacy_const_generics(3)]
10444#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10445pub fn vfmlalq_laneq_low_f16<const LANE: i32>(
10446    r: float32x4_t,
10447    a: float16x8_t,
10448    b: float16x8_t,
10449) -> float32x4_t {
10450    static_assert_uimm_bits!(LANE, 3);
10451    unsafe { vfmlalq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10452}
10453#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_low_f16)"]
10455#[inline]
10456#[target_feature(enable = "neon,fp16")]
10457#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10458#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10459#[cfg_attr(test, assert_instr(fmlal))]
10460pub fn vfmlal_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10461    unsafe extern "unadjusted" {
10462        #[cfg_attr(
10463            any(target_arch = "aarch64", target_arch = "arm64ec"),
10464            link_name = "llvm.aarch64.neon.fmlal.v2f32.v4f16"
10465        )]
10466        fn _vfmlal_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10467    }
10468    unsafe { _vfmlal_low_f16(r, a, b) }
10469}
10470#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10471#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_low_f16)"]
10472#[inline]
10473#[target_feature(enable = "neon,fp16")]
10474#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10475#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10476#[cfg_attr(test, assert_instr(fmlal))]
10477pub fn vfmlalq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10478    unsafe extern "unadjusted" {
10479        #[cfg_attr(
10480            any(target_arch = "aarch64", target_arch = "arm64ec"),
10481            link_name = "llvm.aarch64.neon.fmlal.v4f32.v8f16"
10482        )]
10483        fn _vfmlalq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10484    }
10485    unsafe { _vfmlalq_low_f16(r, a, b) }
10486}
10487#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10488#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_high_f16)"]
10489#[inline]
10490#[target_feature(enable = "neon,fp16")]
10491#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10492#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10493#[cfg_attr(test, assert_instr(fmlsl2))]
10494pub fn vfmlsl_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10495    unsafe extern "unadjusted" {
10496        #[cfg_attr(
10497            any(target_arch = "aarch64", target_arch = "arm64ec"),
10498            link_name = "llvm.aarch64.neon.fmlsl2.v2f32.v4f16"
10499        )]
10500        fn _vfmlsl_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10501    }
10502    unsafe { _vfmlsl_high_f16(r, a, b) }
10503}
10504#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10505#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_high_f16)"]
10506#[inline]
10507#[target_feature(enable = "neon,fp16")]
10508#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10509#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10510#[cfg_attr(test, assert_instr(fmlsl2))]
10511pub fn vfmlslq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10512    unsafe extern "unadjusted" {
10513        #[cfg_attr(
10514            any(target_arch = "aarch64", target_arch = "arm64ec"),
10515            link_name = "llvm.aarch64.neon.fmlsl2.v4f32.v8f16"
10516        )]
10517        fn _vfmlslq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10518    }
10519    unsafe { _vfmlslq_high_f16(r, a, b) }
10520}
10521#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10522#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_lane_high_f16)"]
10523#[inline]
10524#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10525#[target_feature(enable = "neon,fp16")]
10526#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10527#[rustc_legacy_const_generics(3)]
10528#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10529pub fn vfmlsl_lane_high_f16<const LANE: i32>(
10530    r: float32x2_t,
10531    a: float16x4_t,
10532    b: float16x4_t,
10533) -> float32x2_t {
10534    static_assert_uimm_bits!(LANE, 2);
10535    unsafe { vfmlsl_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10536}
10537#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10538#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_laneq_high_f16)"]
10539#[inline]
10540#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10541#[target_feature(enable = "neon,fp16")]
10542#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10543#[rustc_legacy_const_generics(3)]
10544#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10545pub fn vfmlsl_laneq_high_f16<const LANE: i32>(
10546    r: float32x2_t,
10547    a: float16x4_t,
10548    b: float16x8_t,
10549) -> float32x2_t {
10550    static_assert_uimm_bits!(LANE, 3);
10551    unsafe { vfmlsl_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10552}
10553#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10554#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_lane_high_f16)"]
10555#[inline]
10556#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10557#[target_feature(enable = "neon,fp16")]
10558#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10559#[rustc_legacy_const_generics(3)]
10560#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10561pub fn vfmlslq_lane_high_f16<const LANE: i32>(
10562    r: float32x4_t,
10563    a: float16x8_t,
10564    b: float16x4_t,
10565) -> float32x4_t {
10566    static_assert_uimm_bits!(LANE, 2);
10567    unsafe { vfmlslq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10568}
10569#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10570#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_laneq_high_f16)"]
10571#[inline]
10572#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10573#[target_feature(enable = "neon,fp16")]
10574#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10575#[rustc_legacy_const_generics(3)]
10576#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10577pub fn vfmlslq_laneq_high_f16<const LANE: i32>(
10578    r: float32x4_t,
10579    a: float16x8_t,
10580    b: float16x8_t,
10581) -> float32x4_t {
10582    static_assert_uimm_bits!(LANE, 3);
10583    unsafe { vfmlslq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10584}
10585#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_lane_low_f16)"]
10587#[inline]
10588#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10589#[target_feature(enable = "neon,fp16")]
10590#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10591#[rustc_legacy_const_generics(3)]
10592#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10593pub fn vfmlsl_lane_low_f16<const LANE: i32>(
10594    r: float32x2_t,
10595    a: float16x4_t,
10596    b: float16x4_t,
10597) -> float32x2_t {
10598    static_assert_uimm_bits!(LANE, 2);
10599    unsafe { vfmlsl_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10600}
10601#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10602#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_laneq_low_f16)"]
10603#[inline]
10604#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10605#[target_feature(enable = "neon,fp16")]
10606#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10607#[rustc_legacy_const_generics(3)]
10608#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10609pub fn vfmlsl_laneq_low_f16<const LANE: i32>(
10610    r: float32x2_t,
10611    a: float16x4_t,
10612    b: float16x8_t,
10613) -> float32x2_t {
10614    static_assert_uimm_bits!(LANE, 3);
10615    unsafe { vfmlsl_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10616}
10617#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10618#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_lane_low_f16)"]
10619#[inline]
10620#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10621#[target_feature(enable = "neon,fp16")]
10622#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10623#[rustc_legacy_const_generics(3)]
10624#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10625pub fn vfmlslq_lane_low_f16<const LANE: i32>(
10626    r: float32x4_t,
10627    a: float16x8_t,
10628    b: float16x4_t,
10629) -> float32x4_t {
10630    static_assert_uimm_bits!(LANE, 2);
10631    unsafe { vfmlslq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10632}
10633#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10634#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_laneq_low_f16)"]
10635#[inline]
10636#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10637#[target_feature(enable = "neon,fp16")]
10638#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10639#[rustc_legacy_const_generics(3)]
10640#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10641pub fn vfmlslq_laneq_low_f16<const LANE: i32>(
10642    r: float32x4_t,
10643    a: float16x8_t,
10644    b: float16x8_t,
10645) -> float32x4_t {
10646    static_assert_uimm_bits!(LANE, 3);
10647    unsafe { vfmlslq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10648}
10649#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10650#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_low_f16)"]
10651#[inline]
10652#[target_feature(enable = "neon,fp16")]
10653#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10654#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10655#[cfg_attr(test, assert_instr(fmlsl))]
10656pub fn vfmlsl_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10657    unsafe extern "unadjusted" {
10658        #[cfg_attr(
10659            any(target_arch = "aarch64", target_arch = "arm64ec"),
10660            link_name = "llvm.aarch64.neon.fmlsl.v2f32.v4f16"
10661        )]
10662        fn _vfmlsl_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10663    }
10664    unsafe { _vfmlsl_low_f16(r, a, b) }
10665}
10666#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10667#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_low_f16)"]
10668#[inline]
10669#[target_feature(enable = "neon,fp16")]
10670#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10671#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10672#[cfg_attr(test, assert_instr(fmlsl))]
10673pub fn vfmlslq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10674    unsafe extern "unadjusted" {
10675        #[cfg_attr(
10676            any(target_arch = "aarch64", target_arch = "arm64ec"),
10677            link_name = "llvm.aarch64.neon.fmlsl.v4f32.v8f16"
10678        )]
10679        fn _vfmlslq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10680    }
10681    unsafe { _vfmlslq_low_f16(r, a, b) }
10682}
10683#[doc = "Floating-point fused multiply-subtract from accumulator"]
10684#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f64)"]
10685#[inline]
10686#[target_feature(enable = "neon")]
10687#[cfg_attr(test, assert_instr(fmsub))]
10688#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10689pub fn vfms_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
10690    unsafe {
10691        let b: float64x1_t = simd_neg(b);
10692        vfma_f64(a, b, c)
10693    }
10694}
10695#[doc = "Floating-point fused multiply-subtract from accumulator"]
10696#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f16)"]
10697#[inline]
10698#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10699#[rustc_legacy_const_generics(3)]
10700#[target_feature(enable = "neon,fp16")]
10701#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10702pub fn vfms_lane_f16<const LANE: i32>(
10703    a: float16x4_t,
10704    b: float16x4_t,
10705    c: float16x4_t,
10706) -> float16x4_t {
10707    static_assert_uimm_bits!(LANE, 2);
10708    unsafe { vfms_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10709}
10710#[doc = "Floating-point fused multiply-subtract from accumulator"]
10711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f16)"]
10712#[inline]
10713#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10714#[rustc_legacy_const_generics(3)]
10715#[target_feature(enable = "neon,fp16")]
10716#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10717pub fn vfms_laneq_f16<const LANE: i32>(
10718    a: float16x4_t,
10719    b: float16x4_t,
10720    c: float16x8_t,
10721) -> float16x4_t {
10722    static_assert_uimm_bits!(LANE, 3);
10723    unsafe { vfms_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10724}
10725#[doc = "Floating-point fused multiply-subtract from accumulator"]
10726#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f16)"]
10727#[inline]
10728#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10729#[rustc_legacy_const_generics(3)]
10730#[target_feature(enable = "neon,fp16")]
10731#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10732pub fn vfmsq_lane_f16<const LANE: i32>(
10733    a: float16x8_t,
10734    b: float16x8_t,
10735    c: float16x4_t,
10736) -> float16x8_t {
10737    static_assert_uimm_bits!(LANE, 2);
10738    unsafe { vfmsq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10739}
10740#[doc = "Floating-point fused multiply-subtract from accumulator"]
10741#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f16)"]
10742#[inline]
10743#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10744#[rustc_legacy_const_generics(3)]
10745#[target_feature(enable = "neon,fp16")]
10746#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10747pub fn vfmsq_laneq_f16<const LANE: i32>(
10748    a: float16x8_t,
10749    b: float16x8_t,
10750    c: float16x8_t,
10751) -> float16x8_t {
10752    static_assert_uimm_bits!(LANE, 3);
10753    unsafe { vfmsq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10754}
10755#[doc = "Floating-point fused multiply-subtract to accumulator"]
10756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f32)"]
10757#[inline]
10758#[target_feature(enable = "neon")]
10759#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10760#[rustc_legacy_const_generics(3)]
10761#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10762pub fn vfms_lane_f32<const LANE: i32>(
10763    a: float32x2_t,
10764    b: float32x2_t,
10765    c: float32x2_t,
10766) -> float32x2_t {
10767    static_assert_uimm_bits!(LANE, 1);
10768    unsafe { vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10769}
10770#[doc = "Floating-point fused multiply-subtract to accumulator"]
10771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f32)"]
10772#[inline]
10773#[target_feature(enable = "neon")]
10774#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10775#[rustc_legacy_const_generics(3)]
10776#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10777pub fn vfms_laneq_f32<const LANE: i32>(
10778    a: float32x2_t,
10779    b: float32x2_t,
10780    c: float32x4_t,
10781) -> float32x2_t {
10782    static_assert_uimm_bits!(LANE, 2);
10783    unsafe { vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10784}
10785#[doc = "Floating-point fused multiply-subtract to accumulator"]
10786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f32)"]
10787#[inline]
10788#[target_feature(enable = "neon")]
10789#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10790#[rustc_legacy_const_generics(3)]
10791#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10792pub fn vfmsq_lane_f32<const LANE: i32>(
10793    a: float32x4_t,
10794    b: float32x4_t,
10795    c: float32x2_t,
10796) -> float32x4_t {
10797    static_assert_uimm_bits!(LANE, 1);
10798    unsafe { vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10799}
10800#[doc = "Floating-point fused multiply-subtract to accumulator"]
10801#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f32)"]
10802#[inline]
10803#[target_feature(enable = "neon")]
10804#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10805#[rustc_legacy_const_generics(3)]
10806#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10807pub fn vfmsq_laneq_f32<const LANE: i32>(
10808    a: float32x4_t,
10809    b: float32x4_t,
10810    c: float32x4_t,
10811) -> float32x4_t {
10812    static_assert_uimm_bits!(LANE, 2);
10813    unsafe { vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10814}
10815#[doc = "Floating-point fused multiply-subtract to accumulator"]
10816#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f64)"]
10817#[inline]
10818#[target_feature(enable = "neon")]
10819#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10820#[rustc_legacy_const_generics(3)]
10821#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10822pub fn vfmsq_laneq_f64<const LANE: i32>(
10823    a: float64x2_t,
10824    b: float64x2_t,
10825    c: float64x2_t,
10826) -> float64x2_t {
10827    static_assert_uimm_bits!(LANE, 1);
10828    unsafe { vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10829}
10830#[doc = "Floating-point fused multiply-subtract to accumulator"]
10831#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f64)"]
10832#[inline]
10833#[target_feature(enable = "neon")]
10834#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
10835#[rustc_legacy_const_generics(3)]
10836#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10837pub fn vfms_lane_f64<const LANE: i32>(
10838    a: float64x1_t,
10839    b: float64x1_t,
10840    c: float64x1_t,
10841) -> float64x1_t {
10842    static_assert!(LANE == 0);
10843    unsafe { vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
10844}
10845#[doc = "Floating-point fused multiply-subtract to accumulator"]
10846#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f64)"]
10847#[inline]
10848#[target_feature(enable = "neon")]
10849#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
10850#[rustc_legacy_const_generics(3)]
10851#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10852pub fn vfms_laneq_f64<const LANE: i32>(
10853    a: float64x1_t,
10854    b: float64x1_t,
10855    c: float64x2_t,
10856) -> float64x1_t {
10857    static_assert_uimm_bits!(LANE, 1);
10858    unsafe { vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
10859}
10860#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
10861#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f16)"]
10862#[inline]
10863#[target_feature(enable = "neon,fp16")]
10864#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10865#[cfg_attr(test, assert_instr(fmls))]
10866pub fn vfms_n_f16(a: float16x4_t, b: float16x4_t, c: f16) -> float16x4_t {
10867    vfms_f16(a, b, vdup_n_f16(c))
10868}
10869#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
10870#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f16)"]
10871#[inline]
10872#[target_feature(enable = "neon,fp16")]
10873#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10874#[cfg_attr(test, assert_instr(fmls))]
10875pub fn vfmsq_n_f16(a: float16x8_t, b: float16x8_t, c: f16) -> float16x8_t {
10876    vfmsq_f16(a, b, vdupq_n_f16(c))
10877}
10878#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"]
10879#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f64)"]
10880#[inline]
10881#[target_feature(enable = "neon")]
10882#[cfg_attr(test, assert_instr(fmsub))]
10883#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10884pub fn vfms_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t {
10885    vfms_f64(a, b, vdup_n_f64(c))
10886}
10887#[doc = "Floating-point fused multiply-subtract from accumulator"]
10888#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_f16)"]
10889#[inline]
10890#[cfg_attr(test, assert_instr(fmsub))]
10891#[target_feature(enable = "neon,fp16")]
10892#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10893pub fn vfmsh_f16(a: f16, b: f16, c: f16) -> f16 {
10894    vfmah_f16(a, -b, c)
10895}
10896#[doc = "Floating-point fused multiply-subtract from accumulator"]
10897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_lane_f16)"]
10898#[inline]
10899#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
10900#[rustc_legacy_const_generics(3)]
10901#[target_feature(enable = "neon,fp16")]
10902#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10903pub fn vfmsh_lane_f16<const LANE: i32>(a: f16, b: f16, v: float16x4_t) -> f16 {
10904    static_assert_uimm_bits!(LANE, 2);
10905    unsafe {
10906        let c: f16 = simd_extract!(v, LANE as u32);
10907        vfmsh_f16(a, b, c)
10908    }
10909}
10910#[doc = "Floating-point fused multiply-subtract from accumulator"]
10911#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_laneq_f16)"]
10912#[inline]
10913#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
10914#[rustc_legacy_const_generics(3)]
10915#[target_feature(enable = "neon,fp16")]
10916#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10917pub fn vfmsh_laneq_f16<const LANE: i32>(a: f16, b: f16, v: float16x8_t) -> f16 {
10918    static_assert_uimm_bits!(LANE, 3);
10919    unsafe {
10920        let c: f16 = simd_extract!(v, LANE as u32);
10921        vfmsh_f16(a, b, c)
10922    }
10923}
10924#[doc = "Floating-point fused multiply-subtract from accumulator"]
10925#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f64)"]
10926#[inline]
10927#[target_feature(enable = "neon")]
10928#[cfg_attr(test, assert_instr(fmls))]
10929#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10930pub fn vfmsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
10931    unsafe {
10932        let b: float64x2_t = simd_neg(b);
10933        vfmaq_f64(a, b, c)
10934    }
10935}
10936#[doc = "Floating-point fused multiply-subtract to accumulator"]
10937#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f64)"]
10938#[inline]
10939#[target_feature(enable = "neon")]
10940#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10941#[rustc_legacy_const_generics(3)]
10942#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10943pub fn vfmsq_lane_f64<const LANE: i32>(
10944    a: float64x2_t,
10945    b: float64x2_t,
10946    c: float64x1_t,
10947) -> float64x2_t {
10948    static_assert!(LANE == 0);
10949    unsafe { vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10950}
10951#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"]
10952#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f64)"]
10953#[inline]
10954#[target_feature(enable = "neon")]
10955#[cfg_attr(test, assert_instr(fmls))]
10956#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10957pub fn vfmsq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t {
10958    vfmsq_f64(a, b, vdupq_n_f64(c))
10959}
10960#[doc = "Floating-point fused multiply-subtract to accumulator"]
10961#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_lane_f32)"]
10962#[inline]
10963#[target_feature(enable = "neon")]
10964#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
10965#[rustc_legacy_const_generics(3)]
10966#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10967pub fn vfmss_lane_f32<const LANE: i32>(a: f32, b: f32, c: float32x2_t) -> f32 {
10968    vfmas_lane_f32::<LANE>(a, -b, c)
10969}
10970#[doc = "Floating-point fused multiply-subtract to accumulator"]
10971#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_laneq_f32)"]
10972#[inline]
10973#[target_feature(enable = "neon")]
10974#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
10975#[rustc_legacy_const_generics(3)]
10976#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10977pub fn vfmss_laneq_f32<const LANE: i32>(a: f32, b: f32, c: float32x4_t) -> f32 {
10978    vfmas_laneq_f32::<LANE>(a, -b, c)
10979}
10980#[doc = "Floating-point fused multiply-subtract to accumulator"]
10981#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_lane_f64)"]
10982#[inline]
10983#[target_feature(enable = "neon")]
10984#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
10985#[rustc_legacy_const_generics(3)]
10986#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10987pub fn vfmsd_lane_f64<const LANE: i32>(a: f64, b: f64, c: float64x1_t) -> f64 {
10988    vfmad_lane_f64::<LANE>(a, -b, c)
10989}
10990#[doc = "Floating-point fused multiply-subtract to accumulator"]
10991#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_laneq_f64)"]
10992#[inline]
10993#[target_feature(enable = "neon")]
10994#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
10995#[rustc_legacy_const_generics(3)]
10996#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10997pub fn vfmsd_laneq_f64<const LANE: i32>(a: f64, b: f64, c: float64x2_t) -> f64 {
10998    vfmad_laneq_f64::<LANE>(a, -b, c)
10999}
11000#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11001#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16)"]
11002#[doc = "## Safety"]
11003#[doc = "  * Neon instrinsic unsafe"]
11004#[inline]
11005#[target_feature(enable = "neon,fp16")]
11006#[cfg_attr(test, assert_instr(ldr))]
11007#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11008pub unsafe fn vld1_f16(ptr: *const f16) -> float16x4_t {
11009    crate::ptr::read_unaligned(ptr.cast())
11010}
11011#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11012#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16)"]
11013#[doc = "## Safety"]
11014#[doc = "  * Neon instrinsic unsafe"]
11015#[inline]
11016#[target_feature(enable = "neon,fp16")]
11017#[cfg_attr(test, assert_instr(ldr))]
11018#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11019pub unsafe fn vld1q_f16(ptr: *const f16) -> float16x8_t {
11020    crate::ptr::read_unaligned(ptr.cast())
11021}
11022#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11023#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"]
11024#[doc = "## Safety"]
11025#[doc = "  * Neon instrinsic unsafe"]
11026#[inline]
11027#[target_feature(enable = "neon")]
11028#[cfg_attr(test, assert_instr(ldr))]
11029#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11030pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t {
11031    crate::ptr::read_unaligned(ptr.cast())
11032}
11033#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11034#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32)"]
11035#[doc = "## Safety"]
11036#[doc = "  * Neon instrinsic unsafe"]
11037#[inline]
11038#[target_feature(enable = "neon")]
11039#[cfg_attr(test, assert_instr(ldr))]
11040#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11041pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t {
11042    crate::ptr::read_unaligned(ptr.cast())
11043}
11044#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11045#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64)"]
11046#[doc = "## Safety"]
11047#[doc = "  * Neon instrinsic unsafe"]
11048#[inline]
11049#[target_feature(enable = "neon")]
11050#[cfg_attr(test, assert_instr(ldr))]
11051#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11052pub unsafe fn vld1_f64(ptr: *const f64) -> float64x1_t {
11053    crate::ptr::read_unaligned(ptr.cast())
11054}
11055#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11056#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64)"]
11057#[doc = "## Safety"]
11058#[doc = "  * Neon instrinsic unsafe"]
11059#[inline]
11060#[target_feature(enable = "neon")]
11061#[cfg_attr(test, assert_instr(ldr))]
11062#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11063pub unsafe fn vld1q_f64(ptr: *const f64) -> float64x2_t {
11064    crate::ptr::read_unaligned(ptr.cast())
11065}
11066#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11067#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8)"]
11068#[doc = "## Safety"]
11069#[doc = "  * Neon instrinsic unsafe"]
11070#[inline]
11071#[target_feature(enable = "neon")]
11072#[cfg_attr(test, assert_instr(ldr))]
11073#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11074pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t {
11075    crate::ptr::read_unaligned(ptr.cast())
11076}
11077#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8)"]
11079#[doc = "## Safety"]
11080#[doc = "  * Neon instrinsic unsafe"]
11081#[inline]
11082#[target_feature(enable = "neon")]
11083#[cfg_attr(test, assert_instr(ldr))]
11084#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11085pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t {
11086    crate::ptr::read_unaligned(ptr.cast())
11087}
11088#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11089#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16)"]
11090#[doc = "## Safety"]
11091#[doc = "  * Neon instrinsic unsafe"]
11092#[inline]
11093#[target_feature(enable = "neon")]
11094#[cfg_attr(test, assert_instr(ldr))]
11095#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11096pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t {
11097    crate::ptr::read_unaligned(ptr.cast())
11098}
11099#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11100#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16)"]
11101#[doc = "## Safety"]
11102#[doc = "  * Neon instrinsic unsafe"]
11103#[inline]
11104#[target_feature(enable = "neon")]
11105#[cfg_attr(test, assert_instr(ldr))]
11106#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11107pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t {
11108    crate::ptr::read_unaligned(ptr.cast())
11109}
11110#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11111#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32)"]
11112#[doc = "## Safety"]
11113#[doc = "  * Neon instrinsic unsafe"]
11114#[inline]
11115#[target_feature(enable = "neon")]
11116#[cfg_attr(test, assert_instr(ldr))]
11117#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11118pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t {
11119    crate::ptr::read_unaligned(ptr.cast())
11120}
11121#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32)"]
11123#[doc = "## Safety"]
11124#[doc = "  * Neon instrinsic unsafe"]
11125#[inline]
11126#[target_feature(enable = "neon")]
11127#[cfg_attr(test, assert_instr(ldr))]
11128#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11129pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t {
11130    crate::ptr::read_unaligned(ptr.cast())
11131}
11132#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11133#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64)"]
11134#[doc = "## Safety"]
11135#[doc = "  * Neon instrinsic unsafe"]
11136#[inline]
11137#[target_feature(enable = "neon")]
11138#[cfg_attr(test, assert_instr(ldr))]
11139#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11140pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t {
11141    crate::ptr::read_unaligned(ptr.cast())
11142}
11143#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64)"]
11145#[doc = "## Safety"]
11146#[doc = "  * Neon instrinsic unsafe"]
11147#[inline]
11148#[target_feature(enable = "neon")]
11149#[cfg_attr(test, assert_instr(ldr))]
11150#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11151pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t {
11152    crate::ptr::read_unaligned(ptr.cast())
11153}
11154#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11155#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8)"]
11156#[doc = "## Safety"]
11157#[doc = "  * Neon instrinsic unsafe"]
11158#[inline]
11159#[target_feature(enable = "neon")]
11160#[cfg_attr(test, assert_instr(ldr))]
11161#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11162pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t {
11163    crate::ptr::read_unaligned(ptr.cast())
11164}
11165#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11166#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8)"]
11167#[doc = "## Safety"]
11168#[doc = "  * Neon instrinsic unsafe"]
11169#[inline]
11170#[target_feature(enable = "neon")]
11171#[cfg_attr(test, assert_instr(ldr))]
11172#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11173pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t {
11174    crate::ptr::read_unaligned(ptr.cast())
11175}
11176#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11177#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16)"]
11178#[doc = "## Safety"]
11179#[doc = "  * Neon instrinsic unsafe"]
11180#[inline]
11181#[target_feature(enable = "neon")]
11182#[cfg_attr(test, assert_instr(ldr))]
11183#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11184pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t {
11185    crate::ptr::read_unaligned(ptr.cast())
11186}
11187#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11188#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16)"]
11189#[doc = "## Safety"]
11190#[doc = "  * Neon instrinsic unsafe"]
11191#[inline]
11192#[target_feature(enable = "neon")]
11193#[cfg_attr(test, assert_instr(ldr))]
11194#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11195pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t {
11196    crate::ptr::read_unaligned(ptr.cast())
11197}
11198#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32)"]
11200#[doc = "## Safety"]
11201#[doc = "  * Neon instrinsic unsafe"]
11202#[inline]
11203#[target_feature(enable = "neon")]
11204#[cfg_attr(test, assert_instr(ldr))]
11205#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11206pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t {
11207    crate::ptr::read_unaligned(ptr.cast())
11208}
11209#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32)"]
11211#[doc = "## Safety"]
11212#[doc = "  * Neon instrinsic unsafe"]
11213#[inline]
11214#[target_feature(enable = "neon")]
11215#[cfg_attr(test, assert_instr(ldr))]
11216#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11217pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t {
11218    crate::ptr::read_unaligned(ptr.cast())
11219}
11220#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11221#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64)"]
11222#[doc = "## Safety"]
11223#[doc = "  * Neon instrinsic unsafe"]
11224#[inline]
11225#[target_feature(enable = "neon")]
11226#[cfg_attr(test, assert_instr(ldr))]
11227#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11228pub unsafe fn vld1_u64(ptr: *const u64) -> uint64x1_t {
11229    crate::ptr::read_unaligned(ptr.cast())
11230}
11231#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11232#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64)"]
11233#[doc = "## Safety"]
11234#[doc = "  * Neon instrinsic unsafe"]
11235#[inline]
11236#[target_feature(enable = "neon")]
11237#[cfg_attr(test, assert_instr(ldr))]
11238#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11239pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t {
11240    crate::ptr::read_unaligned(ptr.cast())
11241}
11242#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8)"]
11244#[doc = "## Safety"]
11245#[doc = "  * Neon instrinsic unsafe"]
11246#[inline]
11247#[target_feature(enable = "neon")]
11248#[cfg_attr(test, assert_instr(ldr))]
11249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11250pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t {
11251    crate::ptr::read_unaligned(ptr.cast())
11252}
11253#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11254#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8)"]
11255#[doc = "## Safety"]
11256#[doc = "  * Neon instrinsic unsafe"]
11257#[inline]
11258#[target_feature(enable = "neon")]
11259#[cfg_attr(test, assert_instr(ldr))]
11260#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11261pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t {
11262    crate::ptr::read_unaligned(ptr.cast())
11263}
11264#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11265#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16)"]
11266#[doc = "## Safety"]
11267#[doc = "  * Neon instrinsic unsafe"]
11268#[inline]
11269#[target_feature(enable = "neon")]
11270#[cfg_attr(test, assert_instr(ldr))]
11271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11272pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t {
11273    crate::ptr::read_unaligned(ptr.cast())
11274}
11275#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11276#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16)"]
11277#[doc = "## Safety"]
11278#[doc = "  * Neon instrinsic unsafe"]
11279#[inline]
11280#[target_feature(enable = "neon")]
11281#[cfg_attr(test, assert_instr(ldr))]
11282#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11283pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t {
11284    crate::ptr::read_unaligned(ptr.cast())
11285}
11286#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64)"]
11288#[doc = "## Safety"]
11289#[doc = "  * Neon instrinsic unsafe"]
11290#[inline]
11291#[target_feature(enable = "neon,aes")]
11292#[cfg_attr(test, assert_instr(ldr))]
11293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11294pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t {
11295    crate::ptr::read_unaligned(ptr.cast())
11296}
11297#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)"]
11299#[doc = "## Safety"]
11300#[doc = "  * Neon instrinsic unsafe"]
11301#[inline]
11302#[target_feature(enable = "neon,aes")]
11303#[cfg_attr(test, assert_instr(ldr))]
11304#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11305pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t {
11306    crate::ptr::read_unaligned(ptr.cast())
11307}
11308#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x2)"]
11310#[doc = "## Safety"]
11311#[doc = "  * Neon instrinsic unsafe"]
11312#[inline]
11313#[target_feature(enable = "neon")]
11314#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11315#[cfg_attr(test, assert_instr(ld1))]
11316pub unsafe fn vld1_f64_x2(a: *const f64) -> float64x1x2_t {
11317    unsafe extern "unadjusted" {
11318        #[cfg_attr(
11319            any(target_arch = "aarch64", target_arch = "arm64ec"),
11320            link_name = "llvm.aarch64.neon.ld1x2.v1f64.p0"
11321        )]
11322        fn _vld1_f64_x2(a: *const f64) -> float64x1x2_t;
11323    }
11324    _vld1_f64_x2(a)
11325}
11326#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11327#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x3)"]
11328#[doc = "## Safety"]
11329#[doc = "  * Neon instrinsic unsafe"]
11330#[inline]
11331#[target_feature(enable = "neon")]
11332#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11333#[cfg_attr(test, assert_instr(ld1))]
11334pub unsafe fn vld1_f64_x3(a: *const f64) -> float64x1x3_t {
11335    unsafe extern "unadjusted" {
11336        #[cfg_attr(
11337            any(target_arch = "aarch64", target_arch = "arm64ec"),
11338            link_name = "llvm.aarch64.neon.ld1x3.v1f64.p0"
11339        )]
11340        fn _vld1_f64_x3(a: *const f64) -> float64x1x3_t;
11341    }
11342    _vld1_f64_x3(a)
11343}
11344#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x4)"]
11346#[doc = "## Safety"]
11347#[doc = "  * Neon instrinsic unsafe"]
11348#[inline]
11349#[target_feature(enable = "neon")]
11350#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11351#[cfg_attr(test, assert_instr(ld1))]
11352pub unsafe fn vld1_f64_x4(a: *const f64) -> float64x1x4_t {
11353    unsafe extern "unadjusted" {
11354        #[cfg_attr(
11355            any(target_arch = "aarch64", target_arch = "arm64ec"),
11356            link_name = "llvm.aarch64.neon.ld1x4.v1f64.p0"
11357        )]
11358        fn _vld1_f64_x4(a: *const f64) -> float64x1x4_t;
11359    }
11360    _vld1_f64_x4(a)
11361}
11362#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x2)"]
11364#[doc = "## Safety"]
11365#[doc = "  * Neon instrinsic unsafe"]
11366#[inline]
11367#[target_feature(enable = "neon")]
11368#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11369#[cfg_attr(test, assert_instr(ld1))]
11370pub unsafe fn vld1q_f64_x2(a: *const f64) -> float64x2x2_t {
11371    unsafe extern "unadjusted" {
11372        #[cfg_attr(
11373            any(target_arch = "aarch64", target_arch = "arm64ec"),
11374            link_name = "llvm.aarch64.neon.ld1x2.v2f64.p0"
11375        )]
11376        fn _vld1q_f64_x2(a: *const f64) -> float64x2x2_t;
11377    }
11378    _vld1q_f64_x2(a)
11379}
11380#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x3)"]
11382#[doc = "## Safety"]
11383#[doc = "  * Neon instrinsic unsafe"]
11384#[inline]
11385#[target_feature(enable = "neon")]
11386#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11387#[cfg_attr(test, assert_instr(ld1))]
11388pub unsafe fn vld1q_f64_x3(a: *const f64) -> float64x2x3_t {
11389    unsafe extern "unadjusted" {
11390        #[cfg_attr(
11391            any(target_arch = "aarch64", target_arch = "arm64ec"),
11392            link_name = "llvm.aarch64.neon.ld1x3.v2f64.p0"
11393        )]
11394        fn _vld1q_f64_x3(a: *const f64) -> float64x2x3_t;
11395    }
11396    _vld1q_f64_x3(a)
11397}
11398#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11399#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x4)"]
11400#[doc = "## Safety"]
11401#[doc = "  * Neon instrinsic unsafe"]
11402#[inline]
11403#[target_feature(enable = "neon")]
11404#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11405#[cfg_attr(test, assert_instr(ld1))]
11406pub unsafe fn vld1q_f64_x4(a: *const f64) -> float64x2x4_t {
11407    unsafe extern "unadjusted" {
11408        #[cfg_attr(
11409            any(target_arch = "aarch64", target_arch = "arm64ec"),
11410            link_name = "llvm.aarch64.neon.ld1x4.v2f64.p0"
11411        )]
11412        fn _vld1q_f64_x4(a: *const f64) -> float64x2x4_t;
11413    }
11414    _vld1q_f64_x4(a)
11415}
11416#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11417#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f64)"]
11418#[doc = "## Safety"]
11419#[doc = "  * Neon instrinsic unsafe"]
11420#[inline]
11421#[target_feature(enable = "neon")]
11422#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11423#[cfg_attr(test, assert_instr(ld2r))]
11424pub unsafe fn vld2_dup_f64(a: *const f64) -> float64x1x2_t {
11425    unsafe extern "unadjusted" {
11426        #[cfg_attr(
11427            any(target_arch = "aarch64", target_arch = "arm64ec"),
11428            link_name = "llvm.aarch64.neon.ld2r.v1f64.p0"
11429        )]
11430        fn _vld2_dup_f64(ptr: *const f64) -> float64x1x2_t;
11431    }
11432    _vld2_dup_f64(a as _)
11433}
11434#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11435#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f64)"]
11436#[doc = "## Safety"]
11437#[doc = "  * Neon instrinsic unsafe"]
11438#[inline]
11439#[target_feature(enable = "neon")]
11440#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11441#[cfg_attr(test, assert_instr(ld2r))]
11442pub unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t {
11443    unsafe extern "unadjusted" {
11444        #[cfg_attr(
11445            any(target_arch = "aarch64", target_arch = "arm64ec"),
11446            link_name = "llvm.aarch64.neon.ld2r.v2f64.p0"
11447        )]
11448        fn _vld2q_dup_f64(ptr: *const f64) -> float64x2x2_t;
11449    }
11450    _vld2q_dup_f64(a as _)
11451}
11452#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11453#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s64)"]
11454#[doc = "## Safety"]
11455#[doc = "  * Neon instrinsic unsafe"]
11456#[inline]
11457#[target_feature(enable = "neon")]
11458#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11459#[cfg_attr(test, assert_instr(ld2r))]
11460pub unsafe fn vld2q_dup_s64(a: *const i64) -> int64x2x2_t {
11461    unsafe extern "unadjusted" {
11462        #[cfg_attr(
11463            any(target_arch = "aarch64", target_arch = "arm64ec"),
11464            link_name = "llvm.aarch64.neon.ld2r.v2i64.p0"
11465        )]
11466        fn _vld2q_dup_s64(ptr: *const i64) -> int64x2x2_t;
11467    }
11468    _vld2q_dup_s64(a as _)
11469}
11470#[doc = "Load multiple 2-element structures to two registers"]
11471#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f64)"]
11472#[doc = "## Safety"]
11473#[doc = "  * Neon instrinsic unsafe"]
11474#[inline]
11475#[target_feature(enable = "neon")]
11476#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11477#[cfg_attr(test, assert_instr(nop))]
11478pub unsafe fn vld2_f64(a: *const f64) -> float64x1x2_t {
11479    unsafe extern "unadjusted" {
11480        #[cfg_attr(
11481            any(target_arch = "aarch64", target_arch = "arm64ec"),
11482            link_name = "llvm.aarch64.neon.ld2.v1f64.p0"
11483        )]
11484        fn _vld2_f64(ptr: *const float64x1_t) -> float64x1x2_t;
11485    }
11486    _vld2_f64(a as _)
11487}
11488#[doc = "Load multiple 2-element structures to two registers"]
11489#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f64)"]
11490#[doc = "## Safety"]
11491#[doc = "  * Neon instrinsic unsafe"]
11492#[inline]
11493#[target_feature(enable = "neon")]
11494#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11495#[rustc_legacy_const_generics(2)]
11496#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11497pub unsafe fn vld2_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x2_t) -> float64x1x2_t {
11498    static_assert!(LANE == 0);
11499    unsafe extern "unadjusted" {
11500        #[cfg_attr(
11501            any(target_arch = "aarch64", target_arch = "arm64ec"),
11502            link_name = "llvm.aarch64.neon.ld2lane.v1f64.p0"
11503        )]
11504        fn _vld2_lane_f64(a: float64x1_t, b: float64x1_t, n: i64, ptr: *const i8) -> float64x1x2_t;
11505    }
11506    _vld2_lane_f64(b.0, b.1, LANE as i64, a as _)
11507}
11508#[doc = "Load multiple 2-element structures to two registers"]
11509#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s64)"]
11510#[doc = "## Safety"]
11511#[doc = "  * Neon instrinsic unsafe"]
11512#[inline]
11513#[target_feature(enable = "neon")]
11514#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11515#[rustc_legacy_const_generics(2)]
11516#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11517pub unsafe fn vld2_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x2_t) -> int64x1x2_t {
11518    static_assert!(LANE == 0);
11519    unsafe extern "unadjusted" {
11520        #[cfg_attr(
11521            any(target_arch = "aarch64", target_arch = "arm64ec"),
11522            link_name = "llvm.aarch64.neon.ld2lane.v1i64.p0"
11523        )]
11524        fn _vld2_lane_s64(a: int64x1_t, b: int64x1_t, n: i64, ptr: *const i8) -> int64x1x2_t;
11525    }
11526    _vld2_lane_s64(b.0, b.1, LANE as i64, a as _)
11527}
11528#[doc = "Load multiple 2-element structures to two registers"]
11529#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p64)"]
11530#[doc = "## Safety"]
11531#[doc = "  * Neon instrinsic unsafe"]
11532#[inline]
11533#[target_feature(enable = "neon,aes")]
11534#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11535#[rustc_legacy_const_generics(2)]
11536#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11537pub unsafe fn vld2_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x2_t) -> poly64x1x2_t {
11538    static_assert!(LANE == 0);
11539    transmute(vld2_lane_s64::<LANE>(transmute(a), transmute(b)))
11540}
11541#[doc = "Load multiple 2-element structures to two registers"]
11542#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u64)"]
11543#[doc = "## Safety"]
11544#[doc = "  * Neon instrinsic unsafe"]
11545#[inline]
11546#[target_feature(enable = "neon")]
11547#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11548#[rustc_legacy_const_generics(2)]
11549#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11550pub unsafe fn vld2_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x2_t) -> uint64x1x2_t {
11551    static_assert!(LANE == 0);
11552    transmute(vld2_lane_s64::<LANE>(transmute(a), transmute(b)))
11553}
11554#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11555#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"]
11556#[doc = "## Safety"]
11557#[doc = "  * Neon instrinsic unsafe"]
11558#[inline]
11559#[cfg(target_endian = "little")]
11560#[target_feature(enable = "neon,aes")]
11561#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11562#[cfg_attr(test, assert_instr(ld2r))]
11563pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t {
11564    transmute(vld2q_dup_s64(transmute(a)))
11565}
11566#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11567#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"]
11568#[doc = "## Safety"]
11569#[doc = "  * Neon instrinsic unsafe"]
11570#[inline]
11571#[cfg(target_endian = "big")]
11572#[target_feature(enable = "neon,aes")]
11573#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11574#[cfg_attr(test, assert_instr(ld2r))]
11575pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t {
11576    let mut ret_val: poly64x2x2_t = transmute(vld2q_dup_s64(transmute(a)));
11577    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11578    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11579    ret_val
11580}
11581#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11582#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"]
11583#[doc = "## Safety"]
11584#[doc = "  * Neon instrinsic unsafe"]
11585#[inline]
11586#[cfg(target_endian = "little")]
11587#[target_feature(enable = "neon")]
11588#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11589#[cfg_attr(test, assert_instr(ld2r))]
11590pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t {
11591    transmute(vld2q_dup_s64(transmute(a)))
11592}
11593#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11594#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"]
11595#[doc = "## Safety"]
11596#[doc = "  * Neon instrinsic unsafe"]
11597#[inline]
11598#[cfg(target_endian = "big")]
11599#[target_feature(enable = "neon")]
11600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11601#[cfg_attr(test, assert_instr(ld2r))]
11602pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t {
11603    let mut ret_val: uint64x2x2_t = transmute(vld2q_dup_s64(transmute(a)));
11604    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11605    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11606    ret_val
11607}
11608#[doc = "Load multiple 2-element structures to two registers"]
11609#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f64)"]
11610#[doc = "## Safety"]
11611#[doc = "  * Neon instrinsic unsafe"]
11612#[inline]
11613#[target_feature(enable = "neon")]
11614#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11615#[cfg_attr(test, assert_instr(ld2))]
11616pub unsafe fn vld2q_f64(a: *const f64) -> float64x2x2_t {
11617    unsafe extern "unadjusted" {
11618        #[cfg_attr(
11619            any(target_arch = "aarch64", target_arch = "arm64ec"),
11620            link_name = "llvm.aarch64.neon.ld2.v2f64.p0"
11621        )]
11622        fn _vld2q_f64(ptr: *const float64x2_t) -> float64x2x2_t;
11623    }
11624    _vld2q_f64(a as _)
11625}
11626#[doc = "Load multiple 2-element structures to two registers"]
11627#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s64)"]
11628#[doc = "## Safety"]
11629#[doc = "  * Neon instrinsic unsafe"]
11630#[inline]
11631#[target_feature(enable = "neon")]
11632#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11633#[cfg_attr(test, assert_instr(ld2))]
11634pub unsafe fn vld2q_s64(a: *const i64) -> int64x2x2_t {
11635    unsafe extern "unadjusted" {
11636        #[cfg_attr(
11637            any(target_arch = "aarch64", target_arch = "arm64ec"),
11638            link_name = "llvm.aarch64.neon.ld2.v2i64.p0"
11639        )]
11640        fn _vld2q_s64(ptr: *const int64x2_t) -> int64x2x2_t;
11641    }
11642    _vld2q_s64(a as _)
11643}
11644#[doc = "Load multiple 2-element structures to two registers"]
11645#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f64)"]
11646#[doc = "## Safety"]
11647#[doc = "  * Neon instrinsic unsafe"]
11648#[inline]
11649#[target_feature(enable = "neon")]
11650#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11651#[rustc_legacy_const_generics(2)]
11652#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11653pub unsafe fn vld2q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x2_t) -> float64x2x2_t {
11654    static_assert_uimm_bits!(LANE, 1);
11655    unsafe extern "unadjusted" {
11656        #[cfg_attr(
11657            any(target_arch = "aarch64", target_arch = "arm64ec"),
11658            link_name = "llvm.aarch64.neon.ld2lane.v2f64.p0"
11659        )]
11660        fn _vld2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *const i8)
11661            -> float64x2x2_t;
11662    }
11663    _vld2q_lane_f64(b.0, b.1, LANE as i64, a as _)
11664}
11665#[doc = "Load multiple 2-element structures to two registers"]
11666#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s8)"]
11667#[doc = "## Safety"]
11668#[doc = "  * Neon instrinsic unsafe"]
11669#[inline]
11670#[target_feature(enable = "neon")]
11671#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11672#[rustc_legacy_const_generics(2)]
11673#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11674pub unsafe fn vld2q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x2_t) -> int8x16x2_t {
11675    static_assert_uimm_bits!(LANE, 4);
11676    unsafe extern "unadjusted" {
11677        #[cfg_attr(
11678            any(target_arch = "aarch64", target_arch = "arm64ec"),
11679            link_name = "llvm.aarch64.neon.ld2lane.v16i8.p0"
11680        )]
11681        fn _vld2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *const i8) -> int8x16x2_t;
11682    }
11683    _vld2q_lane_s8(b.0, b.1, LANE as i64, a as _)
11684}
11685#[doc = "Load multiple 2-element structures to two registers"]
11686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s64)"]
11687#[doc = "## Safety"]
11688#[doc = "  * Neon instrinsic unsafe"]
11689#[inline]
11690#[target_feature(enable = "neon")]
11691#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11692#[rustc_legacy_const_generics(2)]
11693#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11694pub unsafe fn vld2q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x2_t) -> int64x2x2_t {
11695    static_assert_uimm_bits!(LANE, 1);
11696    unsafe extern "unadjusted" {
11697        #[cfg_attr(
11698            any(target_arch = "aarch64", target_arch = "arm64ec"),
11699            link_name = "llvm.aarch64.neon.ld2lane.v2i64.p0"
11700        )]
11701        fn _vld2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *const i8) -> int64x2x2_t;
11702    }
11703    _vld2q_lane_s64(b.0, b.1, LANE as i64, a as _)
11704}
11705#[doc = "Load multiple 2-element structures to two registers"]
11706#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p64)"]
11707#[doc = "## Safety"]
11708#[doc = "  * Neon instrinsic unsafe"]
11709#[inline]
11710#[target_feature(enable = "neon,aes")]
11711#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11712#[rustc_legacy_const_generics(2)]
11713#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11714pub unsafe fn vld2q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x2_t) -> poly64x2x2_t {
11715    static_assert_uimm_bits!(LANE, 1);
11716    transmute(vld2q_lane_s64::<LANE>(transmute(a), transmute(b)))
11717}
11718#[doc = "Load multiple 2-element structures to two registers"]
11719#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u8)"]
11720#[doc = "## Safety"]
11721#[doc = "  * Neon instrinsic unsafe"]
11722#[inline]
11723#[target_feature(enable = "neon")]
11724#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11725#[rustc_legacy_const_generics(2)]
11726#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11727pub unsafe fn vld2q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x2_t) -> uint8x16x2_t {
11728    static_assert_uimm_bits!(LANE, 4);
11729    transmute(vld2q_lane_s8::<LANE>(transmute(a), transmute(b)))
11730}
11731#[doc = "Load multiple 2-element structures to two registers"]
11732#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u64)"]
11733#[doc = "## Safety"]
11734#[doc = "  * Neon instrinsic unsafe"]
11735#[inline]
11736#[target_feature(enable = "neon")]
11737#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11738#[rustc_legacy_const_generics(2)]
11739#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11740pub unsafe fn vld2q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x2_t) -> uint64x2x2_t {
11741    static_assert_uimm_bits!(LANE, 1);
11742    transmute(vld2q_lane_s64::<LANE>(transmute(a), transmute(b)))
11743}
11744#[doc = "Load multiple 2-element structures to two registers"]
11745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p8)"]
11746#[doc = "## Safety"]
11747#[doc = "  * Neon instrinsic unsafe"]
11748#[inline]
11749#[target_feature(enable = "neon")]
11750#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11751#[rustc_legacy_const_generics(2)]
11752#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11753pub unsafe fn vld2q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x2_t) -> poly8x16x2_t {
11754    static_assert_uimm_bits!(LANE, 4);
11755    transmute(vld2q_lane_s8::<LANE>(transmute(a), transmute(b)))
11756}
11757#[doc = "Load multiple 2-element structures to two registers"]
11758#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"]
11759#[doc = "## Safety"]
11760#[doc = "  * Neon instrinsic unsafe"]
11761#[inline]
11762#[cfg(target_endian = "little")]
11763#[target_feature(enable = "neon,aes")]
11764#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11765#[cfg_attr(test, assert_instr(ld2))]
11766pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t {
11767    transmute(vld2q_s64(transmute(a)))
11768}
11769#[doc = "Load multiple 2-element structures to two registers"]
11770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"]
11771#[doc = "## Safety"]
11772#[doc = "  * Neon instrinsic unsafe"]
11773#[inline]
11774#[cfg(target_endian = "big")]
11775#[target_feature(enable = "neon,aes")]
11776#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11777#[cfg_attr(test, assert_instr(ld2))]
11778pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t {
11779    let mut ret_val: poly64x2x2_t = transmute(vld2q_s64(transmute(a)));
11780    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11781    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11782    ret_val
11783}
11784#[doc = "Load multiple 2-element structures to two registers"]
11785#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u64)"]
11786#[doc = "## Safety"]
11787#[doc = "  * Neon instrinsic unsafe"]
11788#[inline]
11789#[cfg(target_endian = "little")]
11790#[target_feature(enable = "neon")]
11791#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11792#[cfg_attr(test, assert_instr(ld2))]
11793pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t {
11794    transmute(vld2q_s64(transmute(a)))
11795}
11796#[doc = "Load multiple 2-element structures to two registers"]
11797#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u64)"]
11798#[doc = "## Safety"]
11799#[doc = "  * Neon instrinsic unsafe"]
11800#[inline]
11801#[cfg(target_endian = "big")]
11802#[target_feature(enable = "neon")]
11803#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11804#[cfg_attr(test, assert_instr(ld2))]
11805pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t {
11806    let mut ret_val: uint64x2x2_t = transmute(vld2q_s64(transmute(a)));
11807    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11808    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11809    ret_val
11810}
11811#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
11812#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f64)"]
11813#[doc = "## Safety"]
11814#[doc = "  * Neon instrinsic unsafe"]
11815#[inline]
11816#[target_feature(enable = "neon")]
11817#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11818#[cfg_attr(test, assert_instr(ld3r))]
11819pub unsafe fn vld3_dup_f64(a: *const f64) -> float64x1x3_t {
11820    unsafe extern "unadjusted" {
11821        #[cfg_attr(
11822            any(target_arch = "aarch64", target_arch = "arm64ec"),
11823            link_name = "llvm.aarch64.neon.ld3r.v1f64.p0"
11824        )]
11825        fn _vld3_dup_f64(ptr: *const f64) -> float64x1x3_t;
11826    }
11827    _vld3_dup_f64(a as _)
11828}
11829#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
11830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f64)"]
11831#[doc = "## Safety"]
11832#[doc = "  * Neon instrinsic unsafe"]
11833#[inline]
11834#[target_feature(enable = "neon")]
11835#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11836#[cfg_attr(test, assert_instr(ld3r))]
11837pub unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t {
11838    unsafe extern "unadjusted" {
11839        #[cfg_attr(
11840            any(target_arch = "aarch64", target_arch = "arm64ec"),
11841            link_name = "llvm.aarch64.neon.ld3r.v2f64.p0"
11842        )]
11843        fn _vld3q_dup_f64(ptr: *const f64) -> float64x2x3_t;
11844    }
11845    _vld3q_dup_f64(a as _)
11846}
11847#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
11848#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s64)"]
11849#[doc = "## Safety"]
11850#[doc = "  * Neon instrinsic unsafe"]
11851#[inline]
11852#[target_feature(enable = "neon")]
11853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11854#[cfg_attr(test, assert_instr(ld3r))]
11855pub unsafe fn vld3q_dup_s64(a: *const i64) -> int64x2x3_t {
11856    unsafe extern "unadjusted" {
11857        #[cfg_attr(
11858            any(target_arch = "aarch64", target_arch = "arm64ec"),
11859            link_name = "llvm.aarch64.neon.ld3r.v2i64.p0"
11860        )]
11861        fn _vld3q_dup_s64(ptr: *const i64) -> int64x2x3_t;
11862    }
11863    _vld3q_dup_s64(a as _)
11864}
11865#[doc = "Load multiple 3-element structures to three registers"]
11866#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f64)"]
11867#[doc = "## Safety"]
11868#[doc = "  * Neon instrinsic unsafe"]
11869#[inline]
11870#[target_feature(enable = "neon")]
11871#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11872#[cfg_attr(test, assert_instr(nop))]
11873pub unsafe fn vld3_f64(a: *const f64) -> float64x1x3_t {
11874    unsafe extern "unadjusted" {
11875        #[cfg_attr(
11876            any(target_arch = "aarch64", target_arch = "arm64ec"),
11877            link_name = "llvm.aarch64.neon.ld3.v1f64.p0"
11878        )]
11879        fn _vld3_f64(ptr: *const float64x1_t) -> float64x1x3_t;
11880    }
11881    _vld3_f64(a as _)
11882}
11883#[doc = "Load multiple 3-element structures to three registers"]
11884#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f64)"]
11885#[doc = "## Safety"]
11886#[doc = "  * Neon instrinsic unsafe"]
11887#[inline]
11888#[target_feature(enable = "neon")]
11889#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
11890#[rustc_legacy_const_generics(2)]
11891#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11892pub unsafe fn vld3_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x3_t) -> float64x1x3_t {
11893    static_assert!(LANE == 0);
11894    unsafe extern "unadjusted" {
11895        #[cfg_attr(
11896            any(target_arch = "aarch64", target_arch = "arm64ec"),
11897            link_name = "llvm.aarch64.neon.ld3lane.v1f64.p0"
11898        )]
11899        fn _vld3_lane_f64(
11900            a: float64x1_t,
11901            b: float64x1_t,
11902            c: float64x1_t,
11903            n: i64,
11904            ptr: *const i8,
11905        ) -> float64x1x3_t;
11906    }
11907    _vld3_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
11908}
11909#[doc = "Load multiple 3-element structures to three registers"]
11910#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p64)"]
11911#[doc = "## Safety"]
11912#[doc = "  * Neon instrinsic unsafe"]
11913#[inline]
11914#[target_feature(enable = "neon,aes")]
11915#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
11916#[rustc_legacy_const_generics(2)]
11917#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11918pub unsafe fn vld3_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x3_t) -> poly64x1x3_t {
11919    static_assert!(LANE == 0);
11920    transmute(vld3_lane_s64::<LANE>(transmute(a), transmute(b)))
11921}
11922#[doc = "Load multiple 3-element structures to two registers"]
11923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s64)"]
11924#[doc = "## Safety"]
11925#[doc = "  * Neon instrinsic unsafe"]
11926#[inline]
11927#[target_feature(enable = "neon")]
11928#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
11929#[rustc_legacy_const_generics(2)]
11930#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11931pub unsafe fn vld3_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x3_t) -> int64x1x3_t {
11932    static_assert!(LANE == 0);
11933    unsafe extern "unadjusted" {
11934        #[cfg_attr(
11935            any(target_arch = "aarch64", target_arch = "arm64ec"),
11936            link_name = "llvm.aarch64.neon.ld3lane.v1i64.p0"
11937        )]
11938        fn _vld3_lane_s64(
11939            a: int64x1_t,
11940            b: int64x1_t,
11941            c: int64x1_t,
11942            n: i64,
11943            ptr: *const i8,
11944        ) -> int64x1x3_t;
11945    }
11946    _vld3_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
11947}
11948#[doc = "Load multiple 3-element structures to three registers"]
11949#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u64)"]
11950#[doc = "## Safety"]
11951#[doc = "  * Neon instrinsic unsafe"]
11952#[inline]
11953#[target_feature(enable = "neon")]
11954#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
11955#[rustc_legacy_const_generics(2)]
11956#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11957pub unsafe fn vld3_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x3_t) -> uint64x1x3_t {
11958    static_assert!(LANE == 0);
11959    transmute(vld3_lane_s64::<LANE>(transmute(a), transmute(b)))
11960}
11961#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
11962#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"]
11963#[doc = "## Safety"]
11964#[doc = "  * Neon instrinsic unsafe"]
11965#[inline]
11966#[cfg(target_endian = "little")]
11967#[target_feature(enable = "neon,aes")]
11968#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11969#[cfg_attr(test, assert_instr(ld3r))]
11970pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t {
11971    transmute(vld3q_dup_s64(transmute(a)))
11972}
11973#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
11974#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"]
11975#[doc = "## Safety"]
11976#[doc = "  * Neon instrinsic unsafe"]
11977#[inline]
11978#[cfg(target_endian = "big")]
11979#[target_feature(enable = "neon,aes")]
11980#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11981#[cfg_attr(test, assert_instr(ld3r))]
11982pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t {
11983    let mut ret_val: poly64x2x3_t = transmute(vld3q_dup_s64(transmute(a)));
11984    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11985    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11986    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
11987    ret_val
11988}
11989#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
11990#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"]
11991#[doc = "## Safety"]
11992#[doc = "  * Neon instrinsic unsafe"]
11993#[inline]
11994#[cfg(target_endian = "little")]
11995#[target_feature(enable = "neon")]
11996#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11997#[cfg_attr(test, assert_instr(ld3r))]
11998pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t {
11999    transmute(vld3q_dup_s64(transmute(a)))
12000}
12001#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12002#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"]
12003#[doc = "## Safety"]
12004#[doc = "  * Neon instrinsic unsafe"]
12005#[inline]
12006#[cfg(target_endian = "big")]
12007#[target_feature(enable = "neon")]
12008#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12009#[cfg_attr(test, assert_instr(ld3r))]
12010pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t {
12011    let mut ret_val: uint64x2x3_t = transmute(vld3q_dup_s64(transmute(a)));
12012    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12013    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12014    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12015    ret_val
12016}
12017#[doc = "Load multiple 3-element structures to three registers"]
12018#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f64)"]
12019#[doc = "## Safety"]
12020#[doc = "  * Neon instrinsic unsafe"]
12021#[inline]
12022#[target_feature(enable = "neon")]
12023#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12024#[cfg_attr(test, assert_instr(ld3))]
12025pub unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t {
12026    unsafe extern "unadjusted" {
12027        #[cfg_attr(
12028            any(target_arch = "aarch64", target_arch = "arm64ec"),
12029            link_name = "llvm.aarch64.neon.ld3.v2f64.p0"
12030        )]
12031        fn _vld3q_f64(ptr: *const float64x2_t) -> float64x2x3_t;
12032    }
12033    _vld3q_f64(a as _)
12034}
12035#[doc = "Load multiple 3-element structures to three registers"]
12036#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s64)"]
12037#[doc = "## Safety"]
12038#[doc = "  * Neon instrinsic unsafe"]
12039#[inline]
12040#[target_feature(enable = "neon")]
12041#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12042#[cfg_attr(test, assert_instr(ld3))]
12043pub unsafe fn vld3q_s64(a: *const i64) -> int64x2x3_t {
12044    unsafe extern "unadjusted" {
12045        #[cfg_attr(
12046            any(target_arch = "aarch64", target_arch = "arm64ec"),
12047            link_name = "llvm.aarch64.neon.ld3.v2i64.p0"
12048        )]
12049        fn _vld3q_s64(ptr: *const int64x2_t) -> int64x2x3_t;
12050    }
12051    _vld3q_s64(a as _)
12052}
12053#[doc = "Load multiple 3-element structures to three registers"]
12054#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f64)"]
12055#[doc = "## Safety"]
12056#[doc = "  * Neon instrinsic unsafe"]
12057#[inline]
12058#[target_feature(enable = "neon")]
12059#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12060#[rustc_legacy_const_generics(2)]
12061#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12062pub unsafe fn vld3q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x3_t) -> float64x2x3_t {
12063    static_assert_uimm_bits!(LANE, 1);
12064    unsafe extern "unadjusted" {
12065        #[cfg_attr(
12066            any(target_arch = "aarch64", target_arch = "arm64ec"),
12067            link_name = "llvm.aarch64.neon.ld3lane.v2f64.p0"
12068        )]
12069        fn _vld3q_lane_f64(
12070            a: float64x2_t,
12071            b: float64x2_t,
12072            c: float64x2_t,
12073            n: i64,
12074            ptr: *const i8,
12075        ) -> float64x2x3_t;
12076    }
12077    _vld3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
12078}
12079#[doc = "Load multiple 3-element structures to three registers"]
12080#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p64)"]
12081#[doc = "## Safety"]
12082#[doc = "  * Neon instrinsic unsafe"]
12083#[inline]
12084#[target_feature(enable = "neon,aes")]
12085#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12086#[rustc_legacy_const_generics(2)]
12087#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12088pub unsafe fn vld3q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x3_t) -> poly64x2x3_t {
12089    static_assert_uimm_bits!(LANE, 1);
12090    transmute(vld3q_lane_s64::<LANE>(transmute(a), transmute(b)))
12091}
12092#[doc = "Load multiple 3-element structures to two registers"]
12093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s8)"]
12094#[doc = "## Safety"]
12095#[doc = "  * Neon instrinsic unsafe"]
12096#[inline]
12097#[target_feature(enable = "neon")]
12098#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12099#[rustc_legacy_const_generics(2)]
12100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12101pub unsafe fn vld3q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x3_t) -> int8x16x3_t {
12102    static_assert_uimm_bits!(LANE, 3);
12103    unsafe extern "unadjusted" {
12104        #[cfg_attr(
12105            any(target_arch = "aarch64", target_arch = "arm64ec"),
12106            link_name = "llvm.aarch64.neon.ld3lane.v16i8.p0"
12107        )]
12108        fn _vld3q_lane_s8(
12109            a: int8x16_t,
12110            b: int8x16_t,
12111            c: int8x16_t,
12112            n: i64,
12113            ptr: *const i8,
12114        ) -> int8x16x3_t;
12115    }
12116    _vld3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _)
12117}
12118#[doc = "Load multiple 3-element structures to two registers"]
12119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s64)"]
12120#[doc = "## Safety"]
12121#[doc = "  * Neon instrinsic unsafe"]
12122#[inline]
12123#[target_feature(enable = "neon")]
12124#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12125#[rustc_legacy_const_generics(2)]
12126#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12127pub unsafe fn vld3q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x3_t) -> int64x2x3_t {
12128    static_assert_uimm_bits!(LANE, 1);
12129    unsafe extern "unadjusted" {
12130        #[cfg_attr(
12131            any(target_arch = "aarch64", target_arch = "arm64ec"),
12132            link_name = "llvm.aarch64.neon.ld3lane.v2i64.p0"
12133        )]
12134        fn _vld3q_lane_s64(
12135            a: int64x2_t,
12136            b: int64x2_t,
12137            c: int64x2_t,
12138            n: i64,
12139            ptr: *const i8,
12140        ) -> int64x2x3_t;
12141    }
12142    _vld3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
12143}
12144#[doc = "Load multiple 3-element structures to three registers"]
12145#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u8)"]
12146#[doc = "## Safety"]
12147#[doc = "  * Neon instrinsic unsafe"]
12148#[inline]
12149#[target_feature(enable = "neon")]
12150#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12151#[rustc_legacy_const_generics(2)]
12152#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12153pub unsafe fn vld3q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x3_t) -> uint8x16x3_t {
12154    static_assert_uimm_bits!(LANE, 4);
12155    transmute(vld3q_lane_s8::<LANE>(transmute(a), transmute(b)))
12156}
12157#[doc = "Load multiple 3-element structures to three registers"]
12158#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u64)"]
12159#[doc = "## Safety"]
12160#[doc = "  * Neon instrinsic unsafe"]
12161#[inline]
12162#[target_feature(enable = "neon")]
12163#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12164#[rustc_legacy_const_generics(2)]
12165#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12166pub unsafe fn vld3q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x3_t) -> uint64x2x3_t {
12167    static_assert_uimm_bits!(LANE, 1);
12168    transmute(vld3q_lane_s64::<LANE>(transmute(a), transmute(b)))
12169}
12170#[doc = "Load multiple 3-element structures to three registers"]
12171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p8)"]
12172#[doc = "## Safety"]
12173#[doc = "  * Neon instrinsic unsafe"]
12174#[inline]
12175#[target_feature(enable = "neon")]
12176#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12177#[rustc_legacy_const_generics(2)]
12178#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12179pub unsafe fn vld3q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x3_t) -> poly8x16x3_t {
12180    static_assert_uimm_bits!(LANE, 4);
12181    transmute(vld3q_lane_s8::<LANE>(transmute(a), transmute(b)))
12182}
12183#[doc = "Load multiple 3-element structures to three registers"]
12184#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"]
12185#[doc = "## Safety"]
12186#[doc = "  * Neon instrinsic unsafe"]
12187#[inline]
12188#[cfg(target_endian = "little")]
12189#[target_feature(enable = "neon,aes")]
12190#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12191#[cfg_attr(test, assert_instr(ld3))]
12192pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t {
12193    transmute(vld3q_s64(transmute(a)))
12194}
12195#[doc = "Load multiple 3-element structures to three registers"]
12196#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"]
12197#[doc = "## Safety"]
12198#[doc = "  * Neon instrinsic unsafe"]
12199#[inline]
12200#[cfg(target_endian = "big")]
12201#[target_feature(enable = "neon,aes")]
12202#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12203#[cfg_attr(test, assert_instr(ld3))]
12204pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t {
12205    let mut ret_val: poly64x2x3_t = transmute(vld3q_s64(transmute(a)));
12206    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12207    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12208    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12209    ret_val
12210}
12211#[doc = "Load multiple 3-element structures to three registers"]
12212#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u64)"]
12213#[doc = "## Safety"]
12214#[doc = "  * Neon instrinsic unsafe"]
12215#[inline]
12216#[cfg(target_endian = "little")]
12217#[target_feature(enable = "neon")]
12218#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12219#[cfg_attr(test, assert_instr(ld3))]
12220pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t {
12221    transmute(vld3q_s64(transmute(a)))
12222}
12223#[doc = "Load multiple 3-element structures to three registers"]
12224#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u64)"]
12225#[doc = "## Safety"]
12226#[doc = "  * Neon instrinsic unsafe"]
12227#[inline]
12228#[cfg(target_endian = "big")]
12229#[target_feature(enable = "neon")]
12230#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12231#[cfg_attr(test, assert_instr(ld3))]
12232pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t {
12233    let mut ret_val: uint64x2x3_t = transmute(vld3q_s64(transmute(a)));
12234    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12235    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12236    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12237    ret_val
12238}
12239#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12240#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f64)"]
12241#[doc = "## Safety"]
12242#[doc = "  * Neon instrinsic unsafe"]
12243#[inline]
12244#[target_feature(enable = "neon")]
12245#[cfg_attr(test, assert_instr(ld4r))]
12246#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12247pub unsafe fn vld4_dup_f64(a: *const f64) -> float64x1x4_t {
12248    unsafe extern "unadjusted" {
12249        #[cfg_attr(
12250            any(target_arch = "aarch64", target_arch = "arm64ec"),
12251            link_name = "llvm.aarch64.neon.ld4r.v1f64.p0"
12252        )]
12253        fn _vld4_dup_f64(ptr: *const f64) -> float64x1x4_t;
12254    }
12255    _vld4_dup_f64(a as _)
12256}
12257#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12258#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f64)"]
12259#[doc = "## Safety"]
12260#[doc = "  * Neon instrinsic unsafe"]
12261#[inline]
12262#[target_feature(enable = "neon")]
12263#[cfg_attr(test, assert_instr(ld4r))]
12264#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12265pub unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t {
12266    unsafe extern "unadjusted" {
12267        #[cfg_attr(
12268            any(target_arch = "aarch64", target_arch = "arm64ec"),
12269            link_name = "llvm.aarch64.neon.ld4r.v2f64.p0"
12270        )]
12271        fn _vld4q_dup_f64(ptr: *const f64) -> float64x2x4_t;
12272    }
12273    _vld4q_dup_f64(a as _)
12274}
12275#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12276#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s64)"]
12277#[doc = "## Safety"]
12278#[doc = "  * Neon instrinsic unsafe"]
12279#[inline]
12280#[target_feature(enable = "neon")]
12281#[cfg_attr(test, assert_instr(ld4r))]
12282#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12283pub unsafe fn vld4q_dup_s64(a: *const i64) -> int64x2x4_t {
12284    unsafe extern "unadjusted" {
12285        #[cfg_attr(
12286            any(target_arch = "aarch64", target_arch = "arm64ec"),
12287            link_name = "llvm.aarch64.neon.ld4r.v2i64.p0"
12288        )]
12289        fn _vld4q_dup_s64(ptr: *const i64) -> int64x2x4_t;
12290    }
12291    _vld4q_dup_s64(a as _)
12292}
12293#[doc = "Load multiple 4-element structures to four registers"]
12294#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f64)"]
12295#[doc = "## Safety"]
12296#[doc = "  * Neon instrinsic unsafe"]
12297#[inline]
12298#[target_feature(enable = "neon")]
12299#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12300#[cfg_attr(test, assert_instr(nop))]
12301pub unsafe fn vld4_f64(a: *const f64) -> float64x1x4_t {
12302    unsafe extern "unadjusted" {
12303        #[cfg_attr(
12304            any(target_arch = "aarch64", target_arch = "arm64ec"),
12305            link_name = "llvm.aarch64.neon.ld4.v1f64.p0"
12306        )]
12307        fn _vld4_f64(ptr: *const float64x1_t) -> float64x1x4_t;
12308    }
12309    _vld4_f64(a as _)
12310}
12311#[doc = "Load multiple 4-element structures to four registers"]
12312#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f64)"]
12313#[doc = "## Safety"]
12314#[doc = "  * Neon instrinsic unsafe"]
12315#[inline]
12316#[target_feature(enable = "neon")]
12317#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12318#[rustc_legacy_const_generics(2)]
12319#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12320pub unsafe fn vld4_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x4_t) -> float64x1x4_t {
12321    static_assert!(LANE == 0);
12322    unsafe extern "unadjusted" {
12323        #[cfg_attr(
12324            any(target_arch = "aarch64", target_arch = "arm64ec"),
12325            link_name = "llvm.aarch64.neon.ld4lane.v1f64.p0"
12326        )]
12327        fn _vld4_lane_f64(
12328            a: float64x1_t,
12329            b: float64x1_t,
12330            c: float64x1_t,
12331            d: float64x1_t,
12332            n: i64,
12333            ptr: *const i8,
12334        ) -> float64x1x4_t;
12335    }
12336    _vld4_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12337}
12338#[doc = "Load multiple 4-element structures to four registers"]
12339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s64)"]
12340#[doc = "## Safety"]
12341#[doc = "  * Neon instrinsic unsafe"]
12342#[inline]
12343#[target_feature(enable = "neon")]
12344#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12345#[rustc_legacy_const_generics(2)]
12346#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12347pub unsafe fn vld4_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x4_t) -> int64x1x4_t {
12348    static_assert!(LANE == 0);
12349    unsafe extern "unadjusted" {
12350        #[cfg_attr(
12351            any(target_arch = "aarch64", target_arch = "arm64ec"),
12352            link_name = "llvm.aarch64.neon.ld4lane.v1i64.p0"
12353        )]
12354        fn _vld4_lane_s64(
12355            a: int64x1_t,
12356            b: int64x1_t,
12357            c: int64x1_t,
12358            d: int64x1_t,
12359            n: i64,
12360            ptr: *const i8,
12361        ) -> int64x1x4_t;
12362    }
12363    _vld4_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12364}
12365#[doc = "Load multiple 4-element structures to four registers"]
12366#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p64)"]
12367#[doc = "## Safety"]
12368#[doc = "  * Neon instrinsic unsafe"]
12369#[inline]
12370#[target_feature(enable = "neon,aes")]
12371#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12372#[rustc_legacy_const_generics(2)]
12373#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12374pub unsafe fn vld4_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x4_t) -> poly64x1x4_t {
12375    static_assert!(LANE == 0);
12376    transmute(vld4_lane_s64::<LANE>(transmute(a), transmute(b)))
12377}
12378#[doc = "Load multiple 4-element structures to four registers"]
12379#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u64)"]
12380#[doc = "## Safety"]
12381#[doc = "  * Neon instrinsic unsafe"]
12382#[inline]
12383#[target_feature(enable = "neon")]
12384#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12385#[rustc_legacy_const_generics(2)]
12386#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12387pub unsafe fn vld4_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x4_t) -> uint64x1x4_t {
12388    static_assert!(LANE == 0);
12389    transmute(vld4_lane_s64::<LANE>(transmute(a), transmute(b)))
12390}
12391#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12392#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"]
12393#[doc = "## Safety"]
12394#[doc = "  * Neon instrinsic unsafe"]
12395#[inline]
12396#[cfg(target_endian = "little")]
12397#[target_feature(enable = "neon,aes")]
12398#[cfg_attr(test, assert_instr(ld4r))]
12399#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12400pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t {
12401    transmute(vld4q_dup_s64(transmute(a)))
12402}
12403#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"]
12405#[doc = "## Safety"]
12406#[doc = "  * Neon instrinsic unsafe"]
12407#[inline]
12408#[cfg(target_endian = "big")]
12409#[target_feature(enable = "neon,aes")]
12410#[cfg_attr(test, assert_instr(ld4r))]
12411#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12412pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t {
12413    let mut ret_val: poly64x2x4_t = transmute(vld4q_dup_s64(transmute(a)));
12414    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12415    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12416    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12417    ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12418    ret_val
12419}
12420#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12421#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"]
12422#[doc = "## Safety"]
12423#[doc = "  * Neon instrinsic unsafe"]
12424#[inline]
12425#[cfg(target_endian = "little")]
12426#[target_feature(enable = "neon")]
12427#[cfg_attr(test, assert_instr(ld4r))]
12428#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12429pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t {
12430    transmute(vld4q_dup_s64(transmute(a)))
12431}
12432#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12433#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"]
12434#[doc = "## Safety"]
12435#[doc = "  * Neon instrinsic unsafe"]
12436#[inline]
12437#[cfg(target_endian = "big")]
12438#[target_feature(enable = "neon")]
12439#[cfg_attr(test, assert_instr(ld4r))]
12440#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12441pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t {
12442    let mut ret_val: uint64x2x4_t = transmute(vld4q_dup_s64(transmute(a)));
12443    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12444    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12445    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12446    ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12447    ret_val
12448}
12449#[doc = "Load multiple 4-element structures to four registers"]
12450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f64)"]
12451#[doc = "## Safety"]
12452#[doc = "  * Neon instrinsic unsafe"]
12453#[inline]
12454#[target_feature(enable = "neon")]
12455#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12456#[cfg_attr(test, assert_instr(ld4))]
12457pub unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t {
12458    unsafe extern "unadjusted" {
12459        #[cfg_attr(
12460            any(target_arch = "aarch64", target_arch = "arm64ec"),
12461            link_name = "llvm.aarch64.neon.ld4.v2f64.p0"
12462        )]
12463        fn _vld4q_f64(ptr: *const float64x2_t) -> float64x2x4_t;
12464    }
12465    _vld4q_f64(a as _)
12466}
12467#[doc = "Load multiple 4-element structures to four registers"]
12468#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s64)"]
12469#[doc = "## Safety"]
12470#[doc = "  * Neon instrinsic unsafe"]
12471#[inline]
12472#[target_feature(enable = "neon")]
12473#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12474#[cfg_attr(test, assert_instr(ld4))]
12475pub unsafe fn vld4q_s64(a: *const i64) -> int64x2x4_t {
12476    unsafe extern "unadjusted" {
12477        #[cfg_attr(
12478            any(target_arch = "aarch64", target_arch = "arm64ec"),
12479            link_name = "llvm.aarch64.neon.ld4.v2i64.p0"
12480        )]
12481        fn _vld4q_s64(ptr: *const int64x2_t) -> int64x2x4_t;
12482    }
12483    _vld4q_s64(a as _)
12484}
12485#[doc = "Load multiple 4-element structures to four registers"]
12486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f64)"]
12487#[doc = "## Safety"]
12488#[doc = "  * Neon instrinsic unsafe"]
12489#[inline]
12490#[target_feature(enable = "neon")]
12491#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12492#[rustc_legacy_const_generics(2)]
12493#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12494pub unsafe fn vld4q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x4_t) -> float64x2x4_t {
12495    static_assert_uimm_bits!(LANE, 1);
12496    unsafe extern "unadjusted" {
12497        #[cfg_attr(
12498            any(target_arch = "aarch64", target_arch = "arm64ec"),
12499            link_name = "llvm.aarch64.neon.ld4lane.v2f64.p0"
12500        )]
12501        fn _vld4q_lane_f64(
12502            a: float64x2_t,
12503            b: float64x2_t,
12504            c: float64x2_t,
12505            d: float64x2_t,
12506            n: i64,
12507            ptr: *const i8,
12508        ) -> float64x2x4_t;
12509    }
12510    _vld4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12511}
12512#[doc = "Load multiple 4-element structures to four registers"]
12513#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s8)"]
12514#[doc = "## Safety"]
12515#[doc = "  * Neon instrinsic unsafe"]
12516#[inline]
12517#[target_feature(enable = "neon")]
12518#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12519#[rustc_legacy_const_generics(2)]
12520#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12521pub unsafe fn vld4q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x4_t) -> int8x16x4_t {
12522    static_assert_uimm_bits!(LANE, 3);
12523    unsafe extern "unadjusted" {
12524        #[cfg_attr(
12525            any(target_arch = "aarch64", target_arch = "arm64ec"),
12526            link_name = "llvm.aarch64.neon.ld4lane.v16i8.p0"
12527        )]
12528        fn _vld4q_lane_s8(
12529            a: int8x16_t,
12530            b: int8x16_t,
12531            c: int8x16_t,
12532            d: int8x16_t,
12533            n: i64,
12534            ptr: *const i8,
12535        ) -> int8x16x4_t;
12536    }
12537    _vld4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12538}
12539#[doc = "Load multiple 4-element structures to four registers"]
12540#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s64)"]
12541#[doc = "## Safety"]
12542#[doc = "  * Neon instrinsic unsafe"]
12543#[inline]
12544#[target_feature(enable = "neon")]
12545#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12546#[rustc_legacy_const_generics(2)]
12547#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12548pub unsafe fn vld4q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x4_t) -> int64x2x4_t {
12549    static_assert_uimm_bits!(LANE, 1);
12550    unsafe extern "unadjusted" {
12551        #[cfg_attr(
12552            any(target_arch = "aarch64", target_arch = "arm64ec"),
12553            link_name = "llvm.aarch64.neon.ld4lane.v2i64.p0"
12554        )]
12555        fn _vld4q_lane_s64(
12556            a: int64x2_t,
12557            b: int64x2_t,
12558            c: int64x2_t,
12559            d: int64x2_t,
12560            n: i64,
12561            ptr: *const i8,
12562        ) -> int64x2x4_t;
12563    }
12564    _vld4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12565}
12566#[doc = "Load multiple 4-element structures to four registers"]
12567#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p64)"]
12568#[doc = "## Safety"]
12569#[doc = "  * Neon instrinsic unsafe"]
12570#[inline]
12571#[target_feature(enable = "neon,aes")]
12572#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12573#[rustc_legacy_const_generics(2)]
12574#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12575pub unsafe fn vld4q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x4_t) -> poly64x2x4_t {
12576    static_assert_uimm_bits!(LANE, 1);
12577    transmute(vld4q_lane_s64::<LANE>(transmute(a), transmute(b)))
12578}
12579#[doc = "Load multiple 4-element structures to four registers"]
12580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u8)"]
12581#[doc = "## Safety"]
12582#[doc = "  * Neon instrinsic unsafe"]
12583#[inline]
12584#[target_feature(enable = "neon")]
12585#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12586#[rustc_legacy_const_generics(2)]
12587#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12588pub unsafe fn vld4q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x4_t) -> uint8x16x4_t {
12589    static_assert_uimm_bits!(LANE, 4);
12590    transmute(vld4q_lane_s8::<LANE>(transmute(a), transmute(b)))
12591}
12592#[doc = "Load multiple 4-element structures to four registers"]
12593#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u64)"]
12594#[doc = "## Safety"]
12595#[doc = "  * Neon instrinsic unsafe"]
12596#[inline]
12597#[target_feature(enable = "neon")]
12598#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12599#[rustc_legacy_const_generics(2)]
12600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12601pub unsafe fn vld4q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x4_t) -> uint64x2x4_t {
12602    static_assert_uimm_bits!(LANE, 1);
12603    transmute(vld4q_lane_s64::<LANE>(transmute(a), transmute(b)))
12604}
12605#[doc = "Load multiple 4-element structures to four registers"]
12606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p8)"]
12607#[doc = "## Safety"]
12608#[doc = "  * Neon instrinsic unsafe"]
12609#[inline]
12610#[target_feature(enable = "neon")]
12611#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12612#[rustc_legacy_const_generics(2)]
12613#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12614pub unsafe fn vld4q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x4_t) -> poly8x16x4_t {
12615    static_assert_uimm_bits!(LANE, 4);
12616    transmute(vld4q_lane_s8::<LANE>(transmute(a), transmute(b)))
12617}
12618#[doc = "Load multiple 4-element structures to four registers"]
12619#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"]
12620#[doc = "## Safety"]
12621#[doc = "  * Neon instrinsic unsafe"]
12622#[inline]
12623#[cfg(target_endian = "little")]
12624#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12625#[target_feature(enable = "neon,aes")]
12626#[cfg_attr(test, assert_instr(ld4))]
12627pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t {
12628    transmute(vld4q_s64(transmute(a)))
12629}
12630#[doc = "Load multiple 4-element structures to four registers"]
12631#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"]
12632#[doc = "## Safety"]
12633#[doc = "  * Neon instrinsic unsafe"]
12634#[inline]
12635#[cfg(target_endian = "big")]
12636#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12637#[target_feature(enable = "neon,aes")]
12638#[cfg_attr(test, assert_instr(ld4))]
12639pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t {
12640    let mut ret_val: poly64x2x4_t = transmute(vld4q_s64(transmute(a)));
12641    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12642    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12643    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12644    ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12645    ret_val
12646}
12647#[doc = "Load multiple 4-element structures to four registers"]
12648#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u64)"]
12649#[doc = "## Safety"]
12650#[doc = "  * Neon instrinsic unsafe"]
12651#[inline]
12652#[cfg(target_endian = "little")]
12653#[target_feature(enable = "neon")]
12654#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12655#[cfg_attr(test, assert_instr(ld4))]
12656pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t {
12657    transmute(vld4q_s64(transmute(a)))
12658}
12659#[doc = "Load multiple 4-element structures to four registers"]
12660#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u64)"]
12661#[doc = "## Safety"]
12662#[doc = "  * Neon instrinsic unsafe"]
12663#[inline]
12664#[cfg(target_endian = "big")]
12665#[target_feature(enable = "neon")]
12666#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12667#[cfg_attr(test, assert_instr(ld4))]
12668pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t {
12669    let mut ret_val: uint64x2x4_t = transmute(vld4q_s64(transmute(a)));
12670    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12671    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12672    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12673    ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12674    ret_val
12675}
12676#[doc = "Lookup table read with 2-bit indices"]
12677#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_s8)"]
12678#[doc = "## Safety"]
12679#[doc = "  * Neon instrinsic unsafe"]
12680#[inline]
12681#[target_feature(enable = "neon,lut")]
12682#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12683#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12684#[rustc_legacy_const_generics(2)]
12685pub unsafe fn vluti2_lane_s8<const LANE: i32>(a: int8x8_t, b: uint8x8_t) -> int8x16_t {
12686    static_assert!(LANE >= 0 && LANE <= 1);
12687    unsafe extern "unadjusted" {
12688        #[cfg_attr(
12689            any(target_arch = "aarch64", target_arch = "arm64ec"),
12690            link_name = "llvm.aarch64.neon.vluti2.lane.v16i8.v8i8"
12691        )]
12692        fn _vluti2_lane_s8(a: int8x8_t, b: uint8x8_t, n: i32) -> int8x16_t;
12693    }
12694    _vluti2_lane_s8(a, b, LANE)
12695}
12696#[doc = "Lookup table read with 2-bit indices"]
12697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_s8)"]
12698#[doc = "## Safety"]
12699#[doc = "  * Neon instrinsic unsafe"]
12700#[inline]
12701#[target_feature(enable = "neon,lut")]
12702#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12703#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12704#[rustc_legacy_const_generics(2)]
12705pub unsafe fn vluti2q_lane_s8<const LANE: i32>(a: int8x16_t, b: uint8x8_t) -> int8x16_t {
12706    static_assert!(LANE >= 0 && LANE <= 1);
12707    unsafe extern "unadjusted" {
12708        #[cfg_attr(
12709            any(target_arch = "aarch64", target_arch = "arm64ec"),
12710            link_name = "llvm.aarch64.neon.vluti2.lane.v16i8.v16i8"
12711        )]
12712        fn _vluti2q_lane_s8(a: int8x16_t, b: uint8x8_t, n: i32) -> int8x16_t;
12713    }
12714    _vluti2q_lane_s8(a, b, LANE)
12715}
12716#[doc = "Lookup table read with 2-bit indices"]
12717#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_s16)"]
12718#[doc = "## Safety"]
12719#[doc = "  * Neon instrinsic unsafe"]
12720#[inline]
12721#[target_feature(enable = "neon,lut")]
12722#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12723#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12724#[rustc_legacy_const_generics(2)]
12725pub unsafe fn vluti2_lane_s16<const LANE: i32>(a: int16x4_t, b: uint8x8_t) -> int16x8_t {
12726    static_assert!(LANE >= 0 && LANE <= 3);
12727    unsafe extern "unadjusted" {
12728        #[cfg_attr(
12729            any(target_arch = "aarch64", target_arch = "arm64ec"),
12730            link_name = "llvm.aarch64.neon.vluti2.lane.v8i16.v4i16"
12731        )]
12732        fn _vluti2_lane_s16(a: int16x4_t, b: uint8x8_t, n: i32) -> int16x8_t;
12733    }
12734    _vluti2_lane_s16(a, b, LANE)
12735}
12736#[doc = "Lookup table read with 2-bit indices"]
12737#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_s16)"]
12738#[doc = "## Safety"]
12739#[doc = "  * Neon instrinsic unsafe"]
12740#[inline]
12741#[target_feature(enable = "neon,lut")]
12742#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12743#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12744#[rustc_legacy_const_generics(2)]
12745pub unsafe fn vluti2q_lane_s16<const LANE: i32>(a: int16x8_t, b: uint8x8_t) -> int16x8_t {
12746    static_assert!(LANE >= 0 && LANE <= 3);
12747    unsafe extern "unadjusted" {
12748        #[cfg_attr(
12749            any(target_arch = "aarch64", target_arch = "arm64ec"),
12750            link_name = "llvm.aarch64.neon.vluti2.lane.v8i16.v8i16"
12751        )]
12752        fn _vluti2q_lane_s16(a: int16x8_t, b: uint8x8_t, n: i32) -> int16x8_t;
12753    }
12754    _vluti2q_lane_s16(a, b, LANE)
12755}
12756#[doc = "Lookup table read with 2-bit indices"]
12757#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_u8)"]
12758#[doc = "## Safety"]
12759#[doc = "  * Neon instrinsic unsafe"]
12760#[inline]
12761#[target_feature(enable = "neon,lut")]
12762#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12763#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12764#[rustc_legacy_const_generics(2)]
12765pub unsafe fn vluti2_lane_u8<const LANE: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x16_t {
12766    static_assert!(LANE >= 0 && LANE <= 1);
12767    transmute(vluti2_lane_s8::<LANE>(transmute(a), b))
12768}
12769#[doc = "Lookup table read with 2-bit indices"]
12770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_u8)"]
12771#[doc = "## Safety"]
12772#[doc = "  * Neon instrinsic unsafe"]
12773#[inline]
12774#[target_feature(enable = "neon,lut")]
12775#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12776#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12777#[rustc_legacy_const_generics(2)]
12778pub unsafe fn vluti2q_lane_u8<const LANE: i32>(a: uint8x16_t, b: uint8x8_t) -> uint8x16_t {
12779    static_assert!(LANE >= 0 && LANE <= 1);
12780    transmute(vluti2q_lane_s8::<LANE>(transmute(a), b))
12781}
12782#[doc = "Lookup table read with 2-bit indices"]
12783#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_u16)"]
12784#[doc = "## Safety"]
12785#[doc = "  * Neon instrinsic unsafe"]
12786#[inline]
12787#[target_feature(enable = "neon,lut")]
12788#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12789#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12790#[rustc_legacy_const_generics(2)]
12791pub unsafe fn vluti2_lane_u16<const LANE: i32>(a: uint16x4_t, b: uint8x8_t) -> uint16x8_t {
12792    static_assert!(LANE >= 0 && LANE <= 3);
12793    transmute(vluti2_lane_s16::<LANE>(transmute(a), b))
12794}
12795#[doc = "Lookup table read with 2-bit indices"]
12796#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_u16)"]
12797#[doc = "## Safety"]
12798#[doc = "  * Neon instrinsic unsafe"]
12799#[inline]
12800#[target_feature(enable = "neon,lut")]
12801#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12802#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12803#[rustc_legacy_const_generics(2)]
12804pub unsafe fn vluti2q_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t {
12805    static_assert!(LANE >= 0 && LANE <= 3);
12806    transmute(vluti2q_lane_s16::<LANE>(transmute(a), b))
12807}
12808#[doc = "Lookup table read with 2-bit indices"]
12809#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_p8)"]
12810#[doc = "## Safety"]
12811#[doc = "  * Neon instrinsic unsafe"]
12812#[inline]
12813#[target_feature(enable = "neon,lut")]
12814#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12815#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12816#[rustc_legacy_const_generics(2)]
12817pub unsafe fn vluti2_lane_p8<const LANE: i32>(a: poly8x8_t, b: uint8x8_t) -> poly8x16_t {
12818    static_assert!(LANE >= 0 && LANE <= 1);
12819    transmute(vluti2_lane_s8::<LANE>(transmute(a), b))
12820}
12821#[doc = "Lookup table read with 2-bit indices"]
12822#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_p8)"]
12823#[doc = "## Safety"]
12824#[doc = "  * Neon instrinsic unsafe"]
12825#[inline]
12826#[target_feature(enable = "neon,lut")]
12827#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12828#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12829#[rustc_legacy_const_generics(2)]
12830pub unsafe fn vluti2q_lane_p8<const LANE: i32>(a: poly8x16_t, b: uint8x8_t) -> poly8x16_t {
12831    static_assert!(LANE >= 0 && LANE <= 1);
12832    transmute(vluti2q_lane_s8::<LANE>(transmute(a), b))
12833}
12834#[doc = "Lookup table read with 2-bit indices"]
12835#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_p16)"]
12836#[doc = "## Safety"]
12837#[doc = "  * Neon instrinsic unsafe"]
12838#[inline]
12839#[target_feature(enable = "neon,lut")]
12840#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12841#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12842#[rustc_legacy_const_generics(2)]
12843pub unsafe fn vluti2_lane_p16<const LANE: i32>(a: poly16x4_t, b: uint8x8_t) -> poly16x8_t {
12844    static_assert!(LANE >= 0 && LANE <= 3);
12845    transmute(vluti2_lane_s16::<LANE>(transmute(a), b))
12846}
12847#[doc = "Lookup table read with 2-bit indices"]
12848#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_p16)"]
12849#[doc = "## Safety"]
12850#[doc = "  * Neon instrinsic unsafe"]
12851#[inline]
12852#[target_feature(enable = "neon,lut")]
12853#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12854#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12855#[rustc_legacy_const_generics(2)]
12856pub unsafe fn vluti2q_lane_p16<const LANE: i32>(a: poly16x8_t, b: uint8x8_t) -> poly16x8_t {
12857    static_assert!(LANE >= 0 && LANE <= 3);
12858    transmute(vluti2q_lane_s16::<LANE>(transmute(a), b))
12859}
12860#[doc = "Lookup table read with 4-bit indices"]
12861#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_f16_x2)"]
12862#[doc = "## Safety"]
12863#[doc = "  * Neon instrinsic unsafe"]
12864#[inline]
12865#[target_feature(enable = "neon,lut,fp16")]
12866#[cfg_attr(test, assert_instr(nop, LANE = 0))]
12867#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12868#[rustc_legacy_const_generics(2)]
12869pub unsafe fn vluti4q_lane_f16_x2<const LANE: i32>(a: float16x8x2_t, b: uint8x8_t) -> float16x8_t {
12870    static_assert!(LANE >= 0 && LANE <= 1);
12871    transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
12872}
12873#[doc = "Lookup table read with 4-bit indices"]
12874#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_u16_x2)"]
12875#[doc = "## Safety"]
12876#[doc = "  * Neon instrinsic unsafe"]
12877#[inline]
12878#[target_feature(enable = "neon,lut")]
12879#[cfg_attr(test, assert_instr(nop, LANE = 0))]
12880#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12881#[rustc_legacy_const_generics(2)]
12882pub unsafe fn vluti4q_lane_u16_x2<const LANE: i32>(a: uint16x8x2_t, b: uint8x8_t) -> uint16x8_t {
12883    static_assert!(LANE >= 0 && LANE <= 1);
12884    transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
12885}
12886#[doc = "Lookup table read with 4-bit indices"]
12887#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_p16_x2)"]
12888#[doc = "## Safety"]
12889#[doc = "  * Neon instrinsic unsafe"]
12890#[inline]
12891#[target_feature(enable = "neon,lut")]
12892#[cfg_attr(test, assert_instr(nop, LANE = 0))]
12893#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12894#[rustc_legacy_const_generics(2)]
12895pub unsafe fn vluti4q_lane_p16_x2<const LANE: i32>(a: poly16x8x2_t, b: uint8x8_t) -> poly16x8_t {
12896    static_assert!(LANE >= 0 && LANE <= 1);
12897    transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
12898}
12899#[doc = "Lookup table read with 4-bit indices"]
12900#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_s16_x2)"]
12901#[doc = "## Safety"]
12902#[doc = "  * Neon instrinsic unsafe"]
12903#[inline]
12904#[target_feature(enable = "neon,lut")]
12905#[cfg_attr(test, assert_instr(nop, LANE = 0))]
12906#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12907#[rustc_legacy_const_generics(2)]
12908pub unsafe fn vluti4q_lane_s16_x2<const LANE: i32>(a: int16x8x2_t, b: uint8x8_t) -> int16x8_t {
12909    static_assert!(LANE >= 0 && LANE <= 1);
12910    unsafe extern "unadjusted" {
12911        #[cfg_attr(
12912            any(target_arch = "aarch64", target_arch = "arm64ec"),
12913            link_name = "llvm.aarch64.neon.vluti4q.lane.x2.v8i16"
12914        )]
12915        fn _vluti4q_lane_s16_x2(a: int16x8_t, a: int16x8_t, b: uint8x8_t, n: i32) -> int16x8_t;
12916    }
12917    _vluti4q_lane_s16_x2(a.0, a.1, b, LANE)
12918}
12919#[doc = "Lookup table read with 4-bit indices"]
12920#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_s8)"]
12921#[doc = "## Safety"]
12922#[doc = "  * Neon instrinsic unsafe"]
12923#[inline]
12924#[target_feature(enable = "neon,lut")]
12925#[cfg_attr(test, assert_instr(nop, LANE = 0))]
12926#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12927#[rustc_legacy_const_generics(2)]
12928pub unsafe fn vluti4q_lane_s8<const LANE: i32>(a: int8x16_t, b: uint8x8_t) -> int8x16_t {
12929    static_assert!(LANE == 0);
12930    unsafe extern "unadjusted" {
12931        #[cfg_attr(
12932            any(target_arch = "aarch64", target_arch = "arm64ec"),
12933            link_name = "llvm.aarch64.neon.vluti4q.lane.v8i8"
12934        )]
12935        fn _vluti4q_lane_s8(a: int8x16_t, b: uint8x8_t, n: i32) -> int8x16_t;
12936    }
12937    _vluti4q_lane_s8(a, b, LANE)
12938}
12939#[doc = "Lookup table read with 4-bit indices"]
12940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_u8)"]
12941#[doc = "## Safety"]
12942#[doc = "  * Neon instrinsic unsafe"]
12943#[inline]
12944#[target_feature(enable = "neon,lut")]
12945#[cfg_attr(test, assert_instr(nop, LANE = 0))]
12946#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12947#[rustc_legacy_const_generics(2)]
12948pub unsafe fn vluti4q_lane_u8<const LANE: i32>(a: uint8x16_t, b: uint8x8_t) -> uint8x16_t {
12949    static_assert!(LANE == 0);
12950    transmute(vluti4q_lane_s8::<LANE>(transmute(a), b))
12951}
12952#[doc = "Lookup table read with 4-bit indices"]
12953#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_p8)"]
12954#[doc = "## Safety"]
12955#[doc = "  * Neon instrinsic unsafe"]
12956#[inline]
12957#[target_feature(enable = "neon,lut")]
12958#[cfg_attr(test, assert_instr(nop, LANE = 0))]
12959#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12960#[rustc_legacy_const_generics(2)]
12961pub unsafe fn vluti4q_lane_p8<const LANE: i32>(a: poly8x16_t, b: uint8x8_t) -> poly8x16_t {
12962    static_assert!(LANE == 0);
12963    transmute(vluti4q_lane_s8::<LANE>(transmute(a), b))
12964}
12965#[doc = "Lookup table read with 4-bit indices"]
12966#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_f16_x2)"]
12967#[doc = "## Safety"]
12968#[doc = "  * Neon instrinsic unsafe"]
12969#[inline]
12970#[target_feature(enable = "neon,lut,fp16")]
12971#[cfg_attr(test, assert_instr(nop, LANE = 3))]
12972#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12973#[rustc_legacy_const_generics(2)]
12974pub unsafe fn vluti4q_laneq_f16_x2<const LANE: i32>(
12975    a: float16x8x2_t,
12976    b: uint8x16_t,
12977) -> float16x8_t {
12978    static_assert!(LANE >= 0 && LANE <= 3);
12979    transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
12980}
12981#[doc = "Lookup table read with 4-bit indices"]
12982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_u16_x2)"]
12983#[doc = "## Safety"]
12984#[doc = "  * Neon instrinsic unsafe"]
12985#[inline]
12986#[target_feature(enable = "neon,lut")]
12987#[cfg_attr(test, assert_instr(nop, LANE = 3))]
12988#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12989#[rustc_legacy_const_generics(2)]
12990pub unsafe fn vluti4q_laneq_u16_x2<const LANE: i32>(a: uint16x8x2_t, b: uint8x16_t) -> uint16x8_t {
12991    static_assert!(LANE >= 0 && LANE <= 3);
12992    transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
12993}
12994#[doc = "Lookup table read with 4-bit indices"]
12995#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_p16_x2)"]
12996#[doc = "## Safety"]
12997#[doc = "  * Neon instrinsic unsafe"]
12998#[inline]
12999#[target_feature(enable = "neon,lut")]
13000#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13001#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13002#[rustc_legacy_const_generics(2)]
13003pub unsafe fn vluti4q_laneq_p16_x2<const LANE: i32>(a: poly16x8x2_t, b: uint8x16_t) -> poly16x8_t {
13004    static_assert!(LANE >= 0 && LANE <= 3);
13005    transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
13006}
13007#[doc = "Lookup table read with 4-bit indices"]
13008#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_s16_x2)"]
13009#[doc = "## Safety"]
13010#[doc = "  * Neon instrinsic unsafe"]
13011#[inline]
13012#[target_feature(enable = "neon,lut")]
13013#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13014#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13015#[rustc_legacy_const_generics(2)]
13016pub unsafe fn vluti4q_laneq_s16_x2<const LANE: i32>(a: int16x8x2_t, b: uint8x16_t) -> int16x8_t {
13017    static_assert!(LANE >= 0 && LANE <= 3);
13018    unsafe extern "unadjusted" {
13019        #[cfg_attr(
13020            any(target_arch = "aarch64", target_arch = "arm64ec"),
13021            link_name = "llvm.aarch64.neon.vluti4q.laneq.x2.v8i16"
13022        )]
13023        fn _vluti4q_laneq_s16_x2(a: int16x8_t, b: int16x8_t, c: uint8x16_t, n: i32) -> int16x8_t;
13024    }
13025    _vluti4q_laneq_s16_x2(a.0, a.1, b, LANE)
13026}
13027#[doc = "Lookup table read with 4-bit indices"]
13028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_s8)"]
13029#[doc = "## Safety"]
13030#[doc = "  * Neon instrinsic unsafe"]
13031#[inline]
13032#[target_feature(enable = "neon,lut")]
13033#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13034#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13035#[rustc_legacy_const_generics(2)]
13036pub unsafe fn vluti4q_laneq_s8<const LANE: i32>(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
13037    static_assert!(LANE >= 0 && LANE <= 1);
13038    unsafe extern "unadjusted" {
13039        #[cfg_attr(
13040            any(target_arch = "aarch64", target_arch = "arm64ec"),
13041            link_name = "llvm.aarch64.neon.vluti4q.laneq.v16i8"
13042        )]
13043        fn _vluti4q_laneq_s8(a: int8x16_t, b: uint8x16_t, n: i32) -> int8x16_t;
13044    }
13045    _vluti4q_laneq_s8(a, b, LANE)
13046}
13047#[doc = "Lookup table read with 4-bit indices"]
13048#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_u8)"]
13049#[doc = "## Safety"]
13050#[doc = "  * Neon instrinsic unsafe"]
13051#[inline]
13052#[target_feature(enable = "neon,lut")]
13053#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13054#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13055#[rustc_legacy_const_generics(2)]
13056pub unsafe fn vluti4q_laneq_u8<const LANE: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
13057    static_assert!(LANE >= 0 && LANE <= 1);
13058    transmute(vluti4q_laneq_s8::<LANE>(transmute(a), b))
13059}
13060#[doc = "Lookup table read with 4-bit indices"]
13061#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_p8)"]
13062#[doc = "## Safety"]
13063#[doc = "  * Neon instrinsic unsafe"]
13064#[inline]
13065#[target_feature(enable = "neon,lut")]
13066#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13067#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13068#[rustc_legacy_const_generics(2)]
13069pub unsafe fn vluti4q_laneq_p8<const LANE: i32>(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t {
13070    static_assert!(LANE >= 0 && LANE <= 1);
13071    transmute(vluti4q_laneq_s8::<LANE>(transmute(a), b))
13072}
13073#[doc = "Maximum (vector)"]
13074#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f64)"]
13075#[inline]
13076#[target_feature(enable = "neon")]
13077#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13078#[cfg_attr(test, assert_instr(fmax))]
13079pub fn vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13080    unsafe extern "unadjusted" {
13081        #[cfg_attr(
13082            any(target_arch = "aarch64", target_arch = "arm64ec"),
13083            link_name = "llvm.aarch64.neon.fmax.v1f64"
13084        )]
13085        fn _vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13086    }
13087    unsafe { _vmax_f64(a, b) }
13088}
13089#[doc = "Maximum (vector)"]
13090#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f64)"]
13091#[inline]
13092#[target_feature(enable = "neon")]
13093#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13094#[cfg_attr(test, assert_instr(fmax))]
13095pub fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13096    unsafe extern "unadjusted" {
13097        #[cfg_attr(
13098            any(target_arch = "aarch64", target_arch = "arm64ec"),
13099            link_name = "llvm.aarch64.neon.fmax.v2f64"
13100        )]
13101        fn _vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13102    }
13103    unsafe { _vmaxq_f64(a, b) }
13104}
13105#[doc = "Maximum (vector)"]
13106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxh_f16)"]
13107#[inline]
13108#[target_feature(enable = "neon,fp16")]
13109#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13110#[cfg_attr(test, assert_instr(fmax))]
13111pub fn vmaxh_f16(a: f16, b: f16) -> f16 {
13112    unsafe extern "unadjusted" {
13113        #[cfg_attr(
13114            any(target_arch = "aarch64", target_arch = "arm64ec"),
13115            link_name = "llvm.aarch64.neon.fmax.f16"
13116        )]
13117        fn _vmaxh_f16(a: f16, b: f16) -> f16;
13118    }
13119    unsafe { _vmaxh_f16(a, b) }
13120}
13121#[doc = "Floating-point Maximum Number (vector)"]
13122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f64)"]
13123#[inline]
13124#[target_feature(enable = "neon")]
13125#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13126#[cfg_attr(test, assert_instr(fmaxnm))]
13127pub fn vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13128    unsafe { simd_fmax(a, b) }
13129}
13130#[doc = "Floating-point Maximum Number (vector)"]
13131#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f64)"]
13132#[inline]
13133#[target_feature(enable = "neon")]
13134#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13135#[cfg_attr(test, assert_instr(fmaxnm))]
13136pub fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13137    unsafe { simd_fmax(a, b) }
13138}
13139#[doc = "Floating-point Maximum Number"]
13140#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmh_f16)"]
13141#[inline]
13142#[target_feature(enable = "neon,fp16")]
13143#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13144#[cfg_attr(test, assert_instr(fmaxnm))]
13145pub fn vmaxnmh_f16(a: f16, b: f16) -> f16 {
13146    f16::max(a, b)
13147}
13148#[doc = "Floating-point maximum number across vector"]
13149#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f16)"]
13150#[inline]
13151#[target_feature(enable = "neon,fp16")]
13152#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13153#[cfg_attr(test, assert_instr(fmaxnmv))]
13154pub fn vmaxnmv_f16(a: float16x4_t) -> f16 {
13155    unsafe { simd_reduce_max(a) }
13156}
13157#[doc = "Floating-point maximum number across vector"]
13158#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f16)"]
13159#[inline]
13160#[target_feature(enable = "neon,fp16")]
13161#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13162#[cfg_attr(test, assert_instr(fmaxnmv))]
13163pub fn vmaxnmvq_f16(a: float16x8_t) -> f16 {
13164    unsafe { simd_reduce_max(a) }
13165}
13166#[doc = "Floating-point maximum number across vector"]
13167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f32)"]
13168#[inline]
13169#[target_feature(enable = "neon")]
13170#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13171#[cfg_attr(test, assert_instr(fmaxnmp))]
13172pub fn vmaxnmv_f32(a: float32x2_t) -> f32 {
13173    unsafe { simd_reduce_max(a) }
13174}
13175#[doc = "Floating-point maximum number across vector"]
13176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f64)"]
13177#[inline]
13178#[target_feature(enable = "neon")]
13179#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13180#[cfg_attr(test, assert_instr(fmaxnmp))]
13181pub fn vmaxnmvq_f64(a: float64x2_t) -> f64 {
13182    unsafe { simd_reduce_max(a) }
13183}
13184#[doc = "Floating-point maximum number across vector"]
13185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f32)"]
13186#[inline]
13187#[target_feature(enable = "neon")]
13188#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13189#[cfg_attr(test, assert_instr(fmaxnmv))]
13190pub fn vmaxnmvq_f32(a: float32x4_t) -> f32 {
13191    unsafe { simd_reduce_max(a) }
13192}
13193#[doc = "Floating-point maximum number across vector"]
13194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_f16)"]
13195#[inline]
13196#[target_feature(enable = "neon,fp16")]
13197#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13198#[cfg_attr(test, assert_instr(fmaxv))]
13199pub fn vmaxv_f16(a: float16x4_t) -> f16 {
13200    unsafe extern "unadjusted" {
13201        #[cfg_attr(
13202            any(target_arch = "aarch64", target_arch = "arm64ec"),
13203            link_name = "llvm.aarch64.neon.fmaxv.f16.v4f16"
13204        )]
13205        fn _vmaxv_f16(a: float16x4_t) -> f16;
13206    }
13207    unsafe { _vmaxv_f16(a) }
13208}
13209#[doc = "Floating-point maximum number across vector"]
13210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f16)"]
13211#[inline]
13212#[target_feature(enable = "neon,fp16")]
13213#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13214#[cfg_attr(test, assert_instr(fmaxv))]
13215pub fn vmaxvq_f16(a: float16x8_t) -> f16 {
13216    unsafe extern "unadjusted" {
13217        #[cfg_attr(
13218            any(target_arch = "aarch64", target_arch = "arm64ec"),
13219            link_name = "llvm.aarch64.neon.fmaxv.f16.v8f16"
13220        )]
13221        fn _vmaxvq_f16(a: float16x8_t) -> f16;
13222    }
13223    unsafe { _vmaxvq_f16(a) }
13224}
13225#[doc = "Horizontal vector max."]
13226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_f32)"]
13227#[inline]
13228#[target_feature(enable = "neon")]
13229#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13230#[cfg_attr(test, assert_instr(fmaxp))]
13231pub fn vmaxv_f32(a: float32x2_t) -> f32 {
13232    unsafe extern "unadjusted" {
13233        #[cfg_attr(
13234            any(target_arch = "aarch64", target_arch = "arm64ec"),
13235            link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32"
13236        )]
13237        fn _vmaxv_f32(a: float32x2_t) -> f32;
13238    }
13239    unsafe { _vmaxv_f32(a) }
13240}
13241#[doc = "Horizontal vector max."]
13242#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f32)"]
13243#[inline]
13244#[target_feature(enable = "neon")]
13245#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13246#[cfg_attr(test, assert_instr(fmaxv))]
13247pub fn vmaxvq_f32(a: float32x4_t) -> f32 {
13248    unsafe extern "unadjusted" {
13249        #[cfg_attr(
13250            any(target_arch = "aarch64", target_arch = "arm64ec"),
13251            link_name = "llvm.aarch64.neon.fmaxv.f32.v4f32"
13252        )]
13253        fn _vmaxvq_f32(a: float32x4_t) -> f32;
13254    }
13255    unsafe { _vmaxvq_f32(a) }
13256}
13257#[doc = "Horizontal vector max."]
13258#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f64)"]
13259#[inline]
13260#[target_feature(enable = "neon")]
13261#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13262#[cfg_attr(test, assert_instr(fmaxp))]
13263pub fn vmaxvq_f64(a: float64x2_t) -> f64 {
13264    unsafe extern "unadjusted" {
13265        #[cfg_attr(
13266            any(target_arch = "aarch64", target_arch = "arm64ec"),
13267            link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64"
13268        )]
13269        fn _vmaxvq_f64(a: float64x2_t) -> f64;
13270    }
13271    unsafe { _vmaxvq_f64(a) }
13272}
13273#[doc = "Horizontal vector max."]
13274#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s8)"]
13275#[inline]
13276#[target_feature(enable = "neon")]
13277#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13278#[cfg_attr(test, assert_instr(smaxv))]
13279pub fn vmaxv_s8(a: int8x8_t) -> i8 {
13280    unsafe { simd_reduce_max(a) }
13281}
13282#[doc = "Horizontal vector max."]
13283#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s8)"]
13284#[inline]
13285#[target_feature(enable = "neon")]
13286#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13287#[cfg_attr(test, assert_instr(smaxv))]
13288pub fn vmaxvq_s8(a: int8x16_t) -> i8 {
13289    unsafe { simd_reduce_max(a) }
13290}
13291#[doc = "Horizontal vector max."]
13292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s16)"]
13293#[inline]
13294#[target_feature(enable = "neon")]
13295#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13296#[cfg_attr(test, assert_instr(smaxv))]
13297pub fn vmaxv_s16(a: int16x4_t) -> i16 {
13298    unsafe { simd_reduce_max(a) }
13299}
13300#[doc = "Horizontal vector max."]
13301#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s16)"]
13302#[inline]
13303#[target_feature(enable = "neon")]
13304#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13305#[cfg_attr(test, assert_instr(smaxv))]
13306pub fn vmaxvq_s16(a: int16x8_t) -> i16 {
13307    unsafe { simd_reduce_max(a) }
13308}
13309#[doc = "Horizontal vector max."]
13310#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s32)"]
13311#[inline]
13312#[target_feature(enable = "neon")]
13313#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13314#[cfg_attr(test, assert_instr(smaxp))]
13315pub fn vmaxv_s32(a: int32x2_t) -> i32 {
13316    unsafe { simd_reduce_max(a) }
13317}
13318#[doc = "Horizontal vector max."]
13319#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s32)"]
13320#[inline]
13321#[target_feature(enable = "neon")]
13322#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13323#[cfg_attr(test, assert_instr(smaxv))]
13324pub fn vmaxvq_s32(a: int32x4_t) -> i32 {
13325    unsafe { simd_reduce_max(a) }
13326}
13327#[doc = "Horizontal vector max."]
13328#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u8)"]
13329#[inline]
13330#[target_feature(enable = "neon")]
13331#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13332#[cfg_attr(test, assert_instr(umaxv))]
13333pub fn vmaxv_u8(a: uint8x8_t) -> u8 {
13334    unsafe { simd_reduce_max(a) }
13335}
13336#[doc = "Horizontal vector max."]
13337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u8)"]
13338#[inline]
13339#[target_feature(enable = "neon")]
13340#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13341#[cfg_attr(test, assert_instr(umaxv))]
13342pub fn vmaxvq_u8(a: uint8x16_t) -> u8 {
13343    unsafe { simd_reduce_max(a) }
13344}
13345#[doc = "Horizontal vector max."]
13346#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u16)"]
13347#[inline]
13348#[target_feature(enable = "neon")]
13349#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13350#[cfg_attr(test, assert_instr(umaxv))]
13351pub fn vmaxv_u16(a: uint16x4_t) -> u16 {
13352    unsafe { simd_reduce_max(a) }
13353}
13354#[doc = "Horizontal vector max."]
13355#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u16)"]
13356#[inline]
13357#[target_feature(enable = "neon")]
13358#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13359#[cfg_attr(test, assert_instr(umaxv))]
13360pub fn vmaxvq_u16(a: uint16x8_t) -> u16 {
13361    unsafe { simd_reduce_max(a) }
13362}
13363#[doc = "Horizontal vector max."]
13364#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u32)"]
13365#[inline]
13366#[target_feature(enable = "neon")]
13367#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13368#[cfg_attr(test, assert_instr(umaxp))]
13369pub fn vmaxv_u32(a: uint32x2_t) -> u32 {
13370    unsafe { simd_reduce_max(a) }
13371}
13372#[doc = "Horizontal vector max."]
13373#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u32)"]
13374#[inline]
13375#[target_feature(enable = "neon")]
13376#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13377#[cfg_attr(test, assert_instr(umaxv))]
13378pub fn vmaxvq_u32(a: uint32x4_t) -> u32 {
13379    unsafe { simd_reduce_max(a) }
13380}
13381#[doc = "Minimum (vector)"]
13382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f64)"]
13383#[inline]
13384#[target_feature(enable = "neon")]
13385#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13386#[cfg_attr(test, assert_instr(fmin))]
13387pub fn vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13388    unsafe extern "unadjusted" {
13389        #[cfg_attr(
13390            any(target_arch = "aarch64", target_arch = "arm64ec"),
13391            link_name = "llvm.aarch64.neon.fmin.v1f64"
13392        )]
13393        fn _vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13394    }
13395    unsafe { _vmin_f64(a, b) }
13396}
13397#[doc = "Minimum (vector)"]
13398#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f64)"]
13399#[inline]
13400#[target_feature(enable = "neon")]
13401#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13402#[cfg_attr(test, assert_instr(fmin))]
13403pub fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13404    unsafe extern "unadjusted" {
13405        #[cfg_attr(
13406            any(target_arch = "aarch64", target_arch = "arm64ec"),
13407            link_name = "llvm.aarch64.neon.fmin.v2f64"
13408        )]
13409        fn _vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13410    }
13411    unsafe { _vminq_f64(a, b) }
13412}
13413#[doc = "Minimum (vector)"]
13414#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminh_f16)"]
13415#[inline]
13416#[target_feature(enable = "neon,fp16")]
13417#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13418#[cfg_attr(test, assert_instr(fmin))]
13419pub fn vminh_f16(a: f16, b: f16) -> f16 {
13420    unsafe extern "unadjusted" {
13421        #[cfg_attr(
13422            any(target_arch = "aarch64", target_arch = "arm64ec"),
13423            link_name = "llvm.aarch64.neon.fmin.f16"
13424        )]
13425        fn _vminh_f16(a: f16, b: f16) -> f16;
13426    }
13427    unsafe { _vminh_f16(a, b) }
13428}
13429#[doc = "Floating-point Minimum Number (vector)"]
13430#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f64)"]
13431#[inline]
13432#[target_feature(enable = "neon")]
13433#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13434#[cfg_attr(test, assert_instr(fminnm))]
13435pub fn vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13436    unsafe { simd_fmin(a, b) }
13437}
13438#[doc = "Floating-point Minimum Number (vector)"]
13439#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f64)"]
13440#[inline]
13441#[target_feature(enable = "neon")]
13442#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13443#[cfg_attr(test, assert_instr(fminnm))]
13444pub fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13445    unsafe { simd_fmin(a, b) }
13446}
13447#[doc = "Floating-point Minimum Number"]
13448#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmh_f16)"]
13449#[inline]
13450#[target_feature(enable = "neon,fp16")]
13451#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13452#[cfg_attr(test, assert_instr(fminnm))]
13453pub fn vminnmh_f16(a: f16, b: f16) -> f16 {
13454    f16::min(a, b)
13455}
13456#[doc = "Floating-point minimum number across vector"]
13457#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f16)"]
13458#[inline]
13459#[target_feature(enable = "neon,fp16")]
13460#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13461#[cfg_attr(test, assert_instr(fminnmv))]
13462pub fn vminnmv_f16(a: float16x4_t) -> f16 {
13463    unsafe { simd_reduce_min(a) }
13464}
13465#[doc = "Floating-point minimum number across vector"]
13466#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f16)"]
13467#[inline]
13468#[target_feature(enable = "neon,fp16")]
13469#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13470#[cfg_attr(test, assert_instr(fminnmv))]
13471pub fn vminnmvq_f16(a: float16x8_t) -> f16 {
13472    unsafe { simd_reduce_min(a) }
13473}
13474#[doc = "Floating-point minimum number across vector"]
13475#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f32)"]
13476#[inline]
13477#[target_feature(enable = "neon")]
13478#[cfg_attr(test, assert_instr(fminnmp))]
13479#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13480pub fn vminnmv_f32(a: float32x2_t) -> f32 {
13481    unsafe { simd_reduce_min(a) }
13482}
13483#[doc = "Floating-point minimum number across vector"]
13484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f64)"]
13485#[inline]
13486#[target_feature(enable = "neon")]
13487#[cfg_attr(test, assert_instr(fminnmp))]
13488#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13489pub fn vminnmvq_f64(a: float64x2_t) -> f64 {
13490    unsafe { simd_reduce_min(a) }
13491}
13492#[doc = "Floating-point minimum number across vector"]
13493#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f32)"]
13494#[inline]
13495#[target_feature(enable = "neon")]
13496#[cfg_attr(test, assert_instr(fminnmv))]
13497#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13498pub fn vminnmvq_f32(a: float32x4_t) -> f32 {
13499    unsafe { simd_reduce_min(a) }
13500}
13501#[doc = "Floating-point minimum number across vector"]
13502#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_f16)"]
13503#[inline]
13504#[target_feature(enable = "neon,fp16")]
13505#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13506#[cfg_attr(test, assert_instr(fminv))]
13507pub fn vminv_f16(a: float16x4_t) -> f16 {
13508    unsafe extern "unadjusted" {
13509        #[cfg_attr(
13510            any(target_arch = "aarch64", target_arch = "arm64ec"),
13511            link_name = "llvm.aarch64.neon.fminv.f16.v4f16"
13512        )]
13513        fn _vminv_f16(a: float16x4_t) -> f16;
13514    }
13515    unsafe { _vminv_f16(a) }
13516}
13517#[doc = "Floating-point minimum number across vector"]
13518#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f16)"]
13519#[inline]
13520#[target_feature(enable = "neon,fp16")]
13521#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13522#[cfg_attr(test, assert_instr(fminv))]
13523pub fn vminvq_f16(a: float16x8_t) -> f16 {
13524    unsafe extern "unadjusted" {
13525        #[cfg_attr(
13526            any(target_arch = "aarch64", target_arch = "arm64ec"),
13527            link_name = "llvm.aarch64.neon.fminv.f16.v8f16"
13528        )]
13529        fn _vminvq_f16(a: float16x8_t) -> f16;
13530    }
13531    unsafe { _vminvq_f16(a) }
13532}
13533#[doc = "Horizontal vector min."]
13534#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_f32)"]
13535#[inline]
13536#[target_feature(enable = "neon")]
13537#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13538#[cfg_attr(test, assert_instr(fminp))]
13539pub fn vminv_f32(a: float32x2_t) -> f32 {
13540    unsafe extern "unadjusted" {
13541        #[cfg_attr(
13542            any(target_arch = "aarch64", target_arch = "arm64ec"),
13543            link_name = "llvm.aarch64.neon.fminv.f32.v2f32"
13544        )]
13545        fn _vminv_f32(a: float32x2_t) -> f32;
13546    }
13547    unsafe { _vminv_f32(a) }
13548}
13549#[doc = "Horizontal vector min."]
13550#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f32)"]
13551#[inline]
13552#[target_feature(enable = "neon")]
13553#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13554#[cfg_attr(test, assert_instr(fminv))]
13555pub fn vminvq_f32(a: float32x4_t) -> f32 {
13556    unsafe extern "unadjusted" {
13557        #[cfg_attr(
13558            any(target_arch = "aarch64", target_arch = "arm64ec"),
13559            link_name = "llvm.aarch64.neon.fminv.f32.v4f32"
13560        )]
13561        fn _vminvq_f32(a: float32x4_t) -> f32;
13562    }
13563    unsafe { _vminvq_f32(a) }
13564}
13565#[doc = "Horizontal vector min."]
13566#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f64)"]
13567#[inline]
13568#[target_feature(enable = "neon")]
13569#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13570#[cfg_attr(test, assert_instr(fminp))]
13571pub fn vminvq_f64(a: float64x2_t) -> f64 {
13572    unsafe extern "unadjusted" {
13573        #[cfg_attr(
13574            any(target_arch = "aarch64", target_arch = "arm64ec"),
13575            link_name = "llvm.aarch64.neon.fminv.f64.v2f64"
13576        )]
13577        fn _vminvq_f64(a: float64x2_t) -> f64;
13578    }
13579    unsafe { _vminvq_f64(a) }
13580}
13581#[doc = "Horizontal vector min."]
13582#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s8)"]
13583#[inline]
13584#[target_feature(enable = "neon")]
13585#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13586#[cfg_attr(test, assert_instr(sminv))]
13587pub fn vminv_s8(a: int8x8_t) -> i8 {
13588    unsafe { simd_reduce_min(a) }
13589}
13590#[doc = "Horizontal vector min."]
13591#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s8)"]
13592#[inline]
13593#[target_feature(enable = "neon")]
13594#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13595#[cfg_attr(test, assert_instr(sminv))]
13596pub fn vminvq_s8(a: int8x16_t) -> i8 {
13597    unsafe { simd_reduce_min(a) }
13598}
13599#[doc = "Horizontal vector min."]
13600#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s16)"]
13601#[inline]
13602#[target_feature(enable = "neon")]
13603#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13604#[cfg_attr(test, assert_instr(sminv))]
13605pub fn vminv_s16(a: int16x4_t) -> i16 {
13606    unsafe { simd_reduce_min(a) }
13607}
13608#[doc = "Horizontal vector min."]
13609#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s16)"]
13610#[inline]
13611#[target_feature(enable = "neon")]
13612#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13613#[cfg_attr(test, assert_instr(sminv))]
13614pub fn vminvq_s16(a: int16x8_t) -> i16 {
13615    unsafe { simd_reduce_min(a) }
13616}
13617#[doc = "Horizontal vector min."]
13618#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s32)"]
13619#[inline]
13620#[target_feature(enable = "neon")]
13621#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13622#[cfg_attr(test, assert_instr(sminp))]
13623pub fn vminv_s32(a: int32x2_t) -> i32 {
13624    unsafe { simd_reduce_min(a) }
13625}
13626#[doc = "Horizontal vector min."]
13627#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s32)"]
13628#[inline]
13629#[target_feature(enable = "neon")]
13630#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13631#[cfg_attr(test, assert_instr(sminv))]
13632pub fn vminvq_s32(a: int32x4_t) -> i32 {
13633    unsafe { simd_reduce_min(a) }
13634}
13635#[doc = "Horizontal vector min."]
13636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u8)"]
13637#[inline]
13638#[target_feature(enable = "neon")]
13639#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13640#[cfg_attr(test, assert_instr(uminv))]
13641pub fn vminv_u8(a: uint8x8_t) -> u8 {
13642    unsafe { simd_reduce_min(a) }
13643}
13644#[doc = "Horizontal vector min."]
13645#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u8)"]
13646#[inline]
13647#[target_feature(enable = "neon")]
13648#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13649#[cfg_attr(test, assert_instr(uminv))]
13650pub fn vminvq_u8(a: uint8x16_t) -> u8 {
13651    unsafe { simd_reduce_min(a) }
13652}
13653#[doc = "Horizontal vector min."]
13654#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u16)"]
13655#[inline]
13656#[target_feature(enable = "neon")]
13657#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13658#[cfg_attr(test, assert_instr(uminv))]
13659pub fn vminv_u16(a: uint16x4_t) -> u16 {
13660    unsafe { simd_reduce_min(a) }
13661}
13662#[doc = "Horizontal vector min."]
13663#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u16)"]
13664#[inline]
13665#[target_feature(enable = "neon")]
13666#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13667#[cfg_attr(test, assert_instr(uminv))]
13668pub fn vminvq_u16(a: uint16x8_t) -> u16 {
13669    unsafe { simd_reduce_min(a) }
13670}
13671#[doc = "Horizontal vector min."]
13672#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u32)"]
13673#[inline]
13674#[target_feature(enable = "neon")]
13675#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13676#[cfg_attr(test, assert_instr(uminp))]
13677pub fn vminv_u32(a: uint32x2_t) -> u32 {
13678    unsafe { simd_reduce_min(a) }
13679}
13680#[doc = "Horizontal vector min."]
13681#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u32)"]
13682#[inline]
13683#[target_feature(enable = "neon")]
13684#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13685#[cfg_attr(test, assert_instr(uminv))]
13686pub fn vminvq_u32(a: uint32x4_t) -> u32 {
13687    unsafe { simd_reduce_min(a) }
13688}
13689#[doc = "Floating-point multiply-add to accumulator"]
13690#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f64)"]
13691#[inline]
13692#[target_feature(enable = "neon")]
13693#[cfg_attr(test, assert_instr(fmul))]
13694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13695pub fn vmla_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
13696    unsafe { simd_add(a, simd_mul(b, c)) }
13697}
13698#[doc = "Floating-point multiply-add to accumulator"]
13699#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f64)"]
13700#[inline]
13701#[target_feature(enable = "neon")]
13702#[cfg_attr(test, assert_instr(fmul))]
13703#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13704pub fn vmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
13705    unsafe { simd_add(a, simd_mul(b, c)) }
13706}
13707#[doc = "Multiply-add long"]
13708#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s16)"]
13709#[inline]
13710#[target_feature(enable = "neon")]
13711#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
13712#[rustc_legacy_const_generics(3)]
13713#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13714pub fn vmlal_high_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
13715    static_assert_uimm_bits!(LANE, 2);
13716    unsafe {
13717        vmlal_high_s16(
13718            a,
13719            b,
13720            simd_shuffle!(
13721                c,
13722                c,
13723                [
13724                    LANE as u32,
13725                    LANE as u32,
13726                    LANE as u32,
13727                    LANE as u32,
13728                    LANE as u32,
13729                    LANE as u32,
13730                    LANE as u32,
13731                    LANE as u32
13732                ]
13733            ),
13734        )
13735    }
13736}
13737#[doc = "Multiply-add long"]
13738#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s16)"]
13739#[inline]
13740#[target_feature(enable = "neon")]
13741#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
13742#[rustc_legacy_const_generics(3)]
13743#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13744pub fn vmlal_high_laneq_s16<const LANE: i32>(
13745    a: int32x4_t,
13746    b: int16x8_t,
13747    c: int16x8_t,
13748) -> int32x4_t {
13749    static_assert_uimm_bits!(LANE, 3);
13750    unsafe {
13751        vmlal_high_s16(
13752            a,
13753            b,
13754            simd_shuffle!(
13755                c,
13756                c,
13757                [
13758                    LANE as u32,
13759                    LANE as u32,
13760                    LANE as u32,
13761                    LANE as u32,
13762                    LANE as u32,
13763                    LANE as u32,
13764                    LANE as u32,
13765                    LANE as u32
13766                ]
13767            ),
13768        )
13769    }
13770}
13771#[doc = "Multiply-add long"]
13772#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s32)"]
13773#[inline]
13774#[target_feature(enable = "neon")]
13775#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
13776#[rustc_legacy_const_generics(3)]
13777#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13778pub fn vmlal_high_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
13779    static_assert_uimm_bits!(LANE, 1);
13780    unsafe {
13781        vmlal_high_s32(
13782            a,
13783            b,
13784            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
13785        )
13786    }
13787}
13788#[doc = "Multiply-add long"]
13789#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s32)"]
13790#[inline]
13791#[target_feature(enable = "neon")]
13792#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
13793#[rustc_legacy_const_generics(3)]
13794#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13795pub fn vmlal_high_laneq_s32<const LANE: i32>(
13796    a: int64x2_t,
13797    b: int32x4_t,
13798    c: int32x4_t,
13799) -> int64x2_t {
13800    static_assert_uimm_bits!(LANE, 2);
13801    unsafe {
13802        vmlal_high_s32(
13803            a,
13804            b,
13805            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
13806        )
13807    }
13808}
13809#[doc = "Multiply-add long"]
13810#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u16)"]
13811#[inline]
13812#[target_feature(enable = "neon")]
13813#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
13814#[rustc_legacy_const_generics(3)]
13815#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13816pub fn vmlal_high_lane_u16<const LANE: i32>(
13817    a: uint32x4_t,
13818    b: uint16x8_t,
13819    c: uint16x4_t,
13820) -> uint32x4_t {
13821    static_assert_uimm_bits!(LANE, 2);
13822    unsafe {
13823        vmlal_high_u16(
13824            a,
13825            b,
13826            simd_shuffle!(
13827                c,
13828                c,
13829                [
13830                    LANE as u32,
13831                    LANE as u32,
13832                    LANE as u32,
13833                    LANE as u32,
13834                    LANE as u32,
13835                    LANE as u32,
13836                    LANE as u32,
13837                    LANE as u32
13838                ]
13839            ),
13840        )
13841    }
13842}
13843#[doc = "Multiply-add long"]
13844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u16)"]
13845#[inline]
13846#[target_feature(enable = "neon")]
13847#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
13848#[rustc_legacy_const_generics(3)]
13849#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13850pub fn vmlal_high_laneq_u16<const LANE: i32>(
13851    a: uint32x4_t,
13852    b: uint16x8_t,
13853    c: uint16x8_t,
13854) -> uint32x4_t {
13855    static_assert_uimm_bits!(LANE, 3);
13856    unsafe {
13857        vmlal_high_u16(
13858            a,
13859            b,
13860            simd_shuffle!(
13861                c,
13862                c,
13863                [
13864                    LANE as u32,
13865                    LANE as u32,
13866                    LANE as u32,
13867                    LANE as u32,
13868                    LANE as u32,
13869                    LANE as u32,
13870                    LANE as u32,
13871                    LANE as u32
13872                ]
13873            ),
13874        )
13875    }
13876}
13877#[doc = "Multiply-add long"]
13878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u32)"]
13879#[inline]
13880#[target_feature(enable = "neon")]
13881#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
13882#[rustc_legacy_const_generics(3)]
13883#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13884pub fn vmlal_high_lane_u32<const LANE: i32>(
13885    a: uint64x2_t,
13886    b: uint32x4_t,
13887    c: uint32x2_t,
13888) -> uint64x2_t {
13889    static_assert_uimm_bits!(LANE, 1);
13890    unsafe {
13891        vmlal_high_u32(
13892            a,
13893            b,
13894            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
13895        )
13896    }
13897}
13898#[doc = "Multiply-add long"]
13899#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u32)"]
13900#[inline]
13901#[target_feature(enable = "neon")]
13902#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
13903#[rustc_legacy_const_generics(3)]
13904#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13905pub fn vmlal_high_laneq_u32<const LANE: i32>(
13906    a: uint64x2_t,
13907    b: uint32x4_t,
13908    c: uint32x4_t,
13909) -> uint64x2_t {
13910    static_assert_uimm_bits!(LANE, 2);
13911    unsafe {
13912        vmlal_high_u32(
13913            a,
13914            b,
13915            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
13916        )
13917    }
13918}
13919#[doc = "Multiply-add long"]
13920#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s16)"]
13921#[inline]
13922#[target_feature(enable = "neon")]
13923#[cfg_attr(test, assert_instr(smlal2))]
13924#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13925pub fn vmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
13926    vmlal_high_s16(a, b, vdupq_n_s16(c))
13927}
13928#[doc = "Multiply-add long"]
13929#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s32)"]
13930#[inline]
13931#[target_feature(enable = "neon")]
13932#[cfg_attr(test, assert_instr(smlal2))]
13933#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13934pub fn vmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
13935    vmlal_high_s32(a, b, vdupq_n_s32(c))
13936}
13937#[doc = "Multiply-add long"]
13938#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u16)"]
13939#[inline]
13940#[target_feature(enable = "neon")]
13941#[cfg_attr(test, assert_instr(umlal2))]
13942#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13943pub fn vmlal_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t {
13944    vmlal_high_u16(a, b, vdupq_n_u16(c))
13945}
13946#[doc = "Multiply-add long"]
13947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u32)"]
13948#[inline]
13949#[target_feature(enable = "neon")]
13950#[cfg_attr(test, assert_instr(umlal2))]
13951#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13952pub fn vmlal_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t {
13953    vmlal_high_u32(a, b, vdupq_n_u32(c))
13954}
13955#[doc = "Signed multiply-add long"]
13956#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s8)"]
13957#[inline]
13958#[target_feature(enable = "neon")]
13959#[cfg_attr(test, assert_instr(smlal2))]
13960#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13961pub fn vmlal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
13962    unsafe {
13963        let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
13964        let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
13965        vmlal_s8(a, b, c)
13966    }
13967}
13968#[doc = "Signed multiply-add long"]
13969#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s16)"]
13970#[inline]
13971#[target_feature(enable = "neon")]
13972#[cfg_attr(test, assert_instr(smlal2))]
13973#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13974pub fn vmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
13975    unsafe {
13976        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
13977        let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
13978        vmlal_s16(a, b, c)
13979    }
13980}
13981#[doc = "Signed multiply-add long"]
13982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s32)"]
13983#[inline]
13984#[target_feature(enable = "neon")]
13985#[cfg_attr(test, assert_instr(smlal2))]
13986#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13987pub fn vmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
13988    unsafe {
13989        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
13990        let c: int32x2_t = simd_shuffle!(c, c, [2, 3]);
13991        vmlal_s32(a, b, c)
13992    }
13993}
13994#[doc = "Unsigned multiply-add long"]
13995#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u8)"]
13996#[inline]
13997#[target_feature(enable = "neon")]
13998#[cfg_attr(test, assert_instr(umlal2))]
13999#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14000pub fn vmlal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
14001    unsafe {
14002        let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14003        let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14004        vmlal_u8(a, b, c)
14005    }
14006}
14007#[doc = "Unsigned multiply-add long"]
14008#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u16)"]
14009#[inline]
14010#[target_feature(enable = "neon")]
14011#[cfg_attr(test, assert_instr(umlal2))]
14012#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14013pub fn vmlal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
14014    unsafe {
14015        let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14016        let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14017        vmlal_u16(a, b, c)
14018    }
14019}
14020#[doc = "Unsigned multiply-add long"]
14021#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u32)"]
14022#[inline]
14023#[target_feature(enable = "neon")]
14024#[cfg_attr(test, assert_instr(umlal2))]
14025#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14026pub fn vmlal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
14027    unsafe {
14028        let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
14029        let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
14030        vmlal_u32(a, b, c)
14031    }
14032}
14033#[doc = "Floating-point multiply-subtract from accumulator"]
14034#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f64)"]
14035#[inline]
14036#[target_feature(enable = "neon")]
14037#[cfg_attr(test, assert_instr(fmul))]
14038#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14039pub fn vmls_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
14040    unsafe { simd_sub(a, simd_mul(b, c)) }
14041}
14042#[doc = "Floating-point multiply-subtract from accumulator"]
14043#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f64)"]
14044#[inline]
14045#[target_feature(enable = "neon")]
14046#[cfg_attr(test, assert_instr(fmul))]
14047#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14048pub fn vmlsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
14049    unsafe { simd_sub(a, simd_mul(b, c)) }
14050}
14051#[doc = "Multiply-subtract long"]
14052#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s16)"]
14053#[inline]
14054#[target_feature(enable = "neon")]
14055#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14056#[rustc_legacy_const_generics(3)]
14057#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14058pub fn vmlsl_high_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
14059    static_assert_uimm_bits!(LANE, 2);
14060    unsafe {
14061        vmlsl_high_s16(
14062            a,
14063            b,
14064            simd_shuffle!(
14065                c,
14066                c,
14067                [
14068                    LANE as u32,
14069                    LANE as u32,
14070                    LANE as u32,
14071                    LANE as u32,
14072                    LANE as u32,
14073                    LANE as u32,
14074                    LANE as u32,
14075                    LANE as u32
14076                ]
14077            ),
14078        )
14079    }
14080}
14081#[doc = "Multiply-subtract long"]
14082#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s16)"]
14083#[inline]
14084#[target_feature(enable = "neon")]
14085#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14086#[rustc_legacy_const_generics(3)]
14087#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14088pub fn vmlsl_high_laneq_s16<const LANE: i32>(
14089    a: int32x4_t,
14090    b: int16x8_t,
14091    c: int16x8_t,
14092) -> int32x4_t {
14093    static_assert_uimm_bits!(LANE, 3);
14094    unsafe {
14095        vmlsl_high_s16(
14096            a,
14097            b,
14098            simd_shuffle!(
14099                c,
14100                c,
14101                [
14102                    LANE as u32,
14103                    LANE as u32,
14104                    LANE as u32,
14105                    LANE as u32,
14106                    LANE as u32,
14107                    LANE as u32,
14108                    LANE as u32,
14109                    LANE as u32
14110                ]
14111            ),
14112        )
14113    }
14114}
14115#[doc = "Multiply-subtract long"]
14116#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s32)"]
14117#[inline]
14118#[target_feature(enable = "neon")]
14119#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14120#[rustc_legacy_const_generics(3)]
14121#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14122pub fn vmlsl_high_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
14123    static_assert_uimm_bits!(LANE, 1);
14124    unsafe {
14125        vmlsl_high_s32(
14126            a,
14127            b,
14128            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14129        )
14130    }
14131}
14132#[doc = "Multiply-subtract long"]
14133#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s32)"]
14134#[inline]
14135#[target_feature(enable = "neon")]
14136#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14137#[rustc_legacy_const_generics(3)]
14138#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14139pub fn vmlsl_high_laneq_s32<const LANE: i32>(
14140    a: int64x2_t,
14141    b: int32x4_t,
14142    c: int32x4_t,
14143) -> int64x2_t {
14144    static_assert_uimm_bits!(LANE, 2);
14145    unsafe {
14146        vmlsl_high_s32(
14147            a,
14148            b,
14149            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14150        )
14151    }
14152}
14153#[doc = "Multiply-subtract long"]
14154#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u16)"]
14155#[inline]
14156#[target_feature(enable = "neon")]
14157#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14158#[rustc_legacy_const_generics(3)]
14159#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14160pub fn vmlsl_high_lane_u16<const LANE: i32>(
14161    a: uint32x4_t,
14162    b: uint16x8_t,
14163    c: uint16x4_t,
14164) -> uint32x4_t {
14165    static_assert_uimm_bits!(LANE, 2);
14166    unsafe {
14167        vmlsl_high_u16(
14168            a,
14169            b,
14170            simd_shuffle!(
14171                c,
14172                c,
14173                [
14174                    LANE as u32,
14175                    LANE as u32,
14176                    LANE as u32,
14177                    LANE as u32,
14178                    LANE as u32,
14179                    LANE as u32,
14180                    LANE as u32,
14181                    LANE as u32
14182                ]
14183            ),
14184        )
14185    }
14186}
14187#[doc = "Multiply-subtract long"]
14188#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u16)"]
14189#[inline]
14190#[target_feature(enable = "neon")]
14191#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14192#[rustc_legacy_const_generics(3)]
14193#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14194pub fn vmlsl_high_laneq_u16<const LANE: i32>(
14195    a: uint32x4_t,
14196    b: uint16x8_t,
14197    c: uint16x8_t,
14198) -> uint32x4_t {
14199    static_assert_uimm_bits!(LANE, 3);
14200    unsafe {
14201        vmlsl_high_u16(
14202            a,
14203            b,
14204            simd_shuffle!(
14205                c,
14206                c,
14207                [
14208                    LANE as u32,
14209                    LANE as u32,
14210                    LANE as u32,
14211                    LANE as u32,
14212                    LANE as u32,
14213                    LANE as u32,
14214                    LANE as u32,
14215                    LANE as u32
14216                ]
14217            ),
14218        )
14219    }
14220}
14221#[doc = "Multiply-subtract long"]
14222#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u32)"]
14223#[inline]
14224#[target_feature(enable = "neon")]
14225#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14226#[rustc_legacy_const_generics(3)]
14227#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14228pub fn vmlsl_high_lane_u32<const LANE: i32>(
14229    a: uint64x2_t,
14230    b: uint32x4_t,
14231    c: uint32x2_t,
14232) -> uint64x2_t {
14233    static_assert_uimm_bits!(LANE, 1);
14234    unsafe {
14235        vmlsl_high_u32(
14236            a,
14237            b,
14238            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14239        )
14240    }
14241}
14242#[doc = "Multiply-subtract long"]
14243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u32)"]
14244#[inline]
14245#[target_feature(enable = "neon")]
14246#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14247#[rustc_legacy_const_generics(3)]
14248#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14249pub fn vmlsl_high_laneq_u32<const LANE: i32>(
14250    a: uint64x2_t,
14251    b: uint32x4_t,
14252    c: uint32x4_t,
14253) -> uint64x2_t {
14254    static_assert_uimm_bits!(LANE, 2);
14255    unsafe {
14256        vmlsl_high_u32(
14257            a,
14258            b,
14259            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14260        )
14261    }
14262}
14263#[doc = "Multiply-subtract long"]
14264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s16)"]
14265#[inline]
14266#[target_feature(enable = "neon")]
14267#[cfg_attr(test, assert_instr(smlsl2))]
14268#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14269pub fn vmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
14270    vmlsl_high_s16(a, b, vdupq_n_s16(c))
14271}
14272#[doc = "Multiply-subtract long"]
14273#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s32)"]
14274#[inline]
14275#[target_feature(enable = "neon")]
14276#[cfg_attr(test, assert_instr(smlsl2))]
14277#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14278pub fn vmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
14279    vmlsl_high_s32(a, b, vdupq_n_s32(c))
14280}
14281#[doc = "Multiply-subtract long"]
14282#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u16)"]
14283#[inline]
14284#[target_feature(enable = "neon")]
14285#[cfg_attr(test, assert_instr(umlsl2))]
14286#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14287pub fn vmlsl_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t {
14288    vmlsl_high_u16(a, b, vdupq_n_u16(c))
14289}
14290#[doc = "Multiply-subtract long"]
14291#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u32)"]
14292#[inline]
14293#[target_feature(enable = "neon")]
14294#[cfg_attr(test, assert_instr(umlsl2))]
14295#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14296pub fn vmlsl_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t {
14297    vmlsl_high_u32(a, b, vdupq_n_u32(c))
14298}
14299#[doc = "Signed multiply-subtract long"]
14300#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s8)"]
14301#[inline]
14302#[target_feature(enable = "neon")]
14303#[cfg_attr(test, assert_instr(smlsl2))]
14304#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14305pub fn vmlsl_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
14306    unsafe {
14307        let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14308        let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14309        vmlsl_s8(a, b, c)
14310    }
14311}
14312#[doc = "Signed multiply-subtract long"]
14313#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s16)"]
14314#[inline]
14315#[target_feature(enable = "neon")]
14316#[cfg_attr(test, assert_instr(smlsl2))]
14317#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14318pub fn vmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
14319    unsafe {
14320        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14321        let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14322        vmlsl_s16(a, b, c)
14323    }
14324}
14325#[doc = "Signed multiply-subtract long"]
14326#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s32)"]
14327#[inline]
14328#[target_feature(enable = "neon")]
14329#[cfg_attr(test, assert_instr(smlsl2))]
14330#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14331pub fn vmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
14332    unsafe {
14333        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
14334        let c: int32x2_t = simd_shuffle!(c, c, [2, 3]);
14335        vmlsl_s32(a, b, c)
14336    }
14337}
14338#[doc = "Unsigned multiply-subtract long"]
14339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u8)"]
14340#[inline]
14341#[target_feature(enable = "neon")]
14342#[cfg_attr(test, assert_instr(umlsl2))]
14343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14344pub fn vmlsl_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
14345    unsafe {
14346        let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14347        let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14348        vmlsl_u8(a, b, c)
14349    }
14350}
14351#[doc = "Unsigned multiply-subtract long"]
14352#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u16)"]
14353#[inline]
14354#[target_feature(enable = "neon")]
14355#[cfg_attr(test, assert_instr(umlsl2))]
14356#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14357pub fn vmlsl_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
14358    unsafe {
14359        let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14360        let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14361        vmlsl_u16(a, b, c)
14362    }
14363}
14364#[doc = "Unsigned multiply-subtract long"]
14365#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u32)"]
14366#[inline]
14367#[target_feature(enable = "neon")]
14368#[cfg_attr(test, assert_instr(umlsl2))]
14369#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14370pub fn vmlsl_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
14371    unsafe {
14372        let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
14373        let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
14374        vmlsl_u32(a, b, c)
14375    }
14376}
14377#[doc = "Vector move"]
14378#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s8)"]
14379#[inline]
14380#[target_feature(enable = "neon")]
14381#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14382#[cfg_attr(test, assert_instr(sxtl2))]
14383pub fn vmovl_high_s8(a: int8x16_t) -> int16x8_t {
14384    unsafe {
14385        let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
14386        vmovl_s8(a)
14387    }
14388}
14389#[doc = "Vector move"]
14390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s16)"]
14391#[inline]
14392#[target_feature(enable = "neon")]
14393#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14394#[cfg_attr(test, assert_instr(sxtl2))]
14395pub fn vmovl_high_s16(a: int16x8_t) -> int32x4_t {
14396    unsafe {
14397        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
14398        vmovl_s16(a)
14399    }
14400}
14401#[doc = "Vector move"]
14402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s32)"]
14403#[inline]
14404#[target_feature(enable = "neon")]
14405#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14406#[cfg_attr(test, assert_instr(sxtl2))]
14407pub fn vmovl_high_s32(a: int32x4_t) -> int64x2_t {
14408    unsafe {
14409        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
14410        vmovl_s32(a)
14411    }
14412}
14413#[doc = "Vector move"]
14414#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u8)"]
14415#[inline]
14416#[target_feature(enable = "neon")]
14417#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14418#[cfg_attr(test, assert_instr(uxtl2))]
14419pub fn vmovl_high_u8(a: uint8x16_t) -> uint16x8_t {
14420    unsafe {
14421        let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
14422        vmovl_u8(a)
14423    }
14424}
14425#[doc = "Vector move"]
14426#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u16)"]
14427#[inline]
14428#[target_feature(enable = "neon")]
14429#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14430#[cfg_attr(test, assert_instr(uxtl2))]
14431pub fn vmovl_high_u16(a: uint16x8_t) -> uint32x4_t {
14432    unsafe {
14433        let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
14434        vmovl_u16(a)
14435    }
14436}
14437#[doc = "Vector move"]
14438#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u32)"]
14439#[inline]
14440#[target_feature(enable = "neon")]
14441#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14442#[cfg_attr(test, assert_instr(uxtl2))]
14443pub fn vmovl_high_u32(a: uint32x4_t) -> uint64x2_t {
14444    unsafe {
14445        let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
14446        vmovl_u32(a)
14447    }
14448}
14449#[doc = "Extract narrow"]
14450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s16)"]
14451#[inline]
14452#[target_feature(enable = "neon")]
14453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14454#[cfg_attr(test, assert_instr(xtn2))]
14455pub fn vmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t {
14456    unsafe {
14457        let c: int8x8_t = simd_cast(b);
14458        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
14459    }
14460}
14461#[doc = "Extract narrow"]
14462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s32)"]
14463#[inline]
14464#[target_feature(enable = "neon")]
14465#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14466#[cfg_attr(test, assert_instr(xtn2))]
14467pub fn vmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t {
14468    unsafe {
14469        let c: int16x4_t = simd_cast(b);
14470        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7])
14471    }
14472}
14473#[doc = "Extract narrow"]
14474#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s64)"]
14475#[inline]
14476#[target_feature(enable = "neon")]
14477#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14478#[cfg_attr(test, assert_instr(xtn2))]
14479pub fn vmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t {
14480    unsafe {
14481        let c: int32x2_t = simd_cast(b);
14482        simd_shuffle!(a, c, [0, 1, 2, 3])
14483    }
14484}
14485#[doc = "Extract narrow"]
14486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u16)"]
14487#[inline]
14488#[target_feature(enable = "neon")]
14489#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14490#[cfg_attr(test, assert_instr(xtn2))]
14491pub fn vmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
14492    unsafe {
14493        let c: uint8x8_t = simd_cast(b);
14494        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
14495    }
14496}
14497#[doc = "Extract narrow"]
14498#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u32)"]
14499#[inline]
14500#[target_feature(enable = "neon")]
14501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14502#[cfg_attr(test, assert_instr(xtn2))]
14503pub fn vmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
14504    unsafe {
14505        let c: uint16x4_t = simd_cast(b);
14506        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7])
14507    }
14508}
14509#[doc = "Extract narrow"]
14510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u64)"]
14511#[inline]
14512#[target_feature(enable = "neon")]
14513#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14514#[cfg_attr(test, assert_instr(xtn2))]
14515pub fn vmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
14516    unsafe {
14517        let c: uint32x2_t = simd_cast(b);
14518        simd_shuffle!(a, c, [0, 1, 2, 3])
14519    }
14520}
14521#[doc = "Multiply"]
14522#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f64)"]
14523#[inline]
14524#[target_feature(enable = "neon")]
14525#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14526#[cfg_attr(test, assert_instr(fmul))]
14527pub fn vmul_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
14528    unsafe { simd_mul(a, b) }
14529}
14530#[doc = "Multiply"]
14531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f64)"]
14532#[inline]
14533#[target_feature(enable = "neon")]
14534#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14535#[cfg_attr(test, assert_instr(fmul))]
14536pub fn vmulq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
14537    unsafe { simd_mul(a, b) }
14538}
14539#[doc = "Floating-point multiply"]
14540#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f64)"]
14541#[inline]
14542#[target_feature(enable = "neon")]
14543#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14544#[rustc_legacy_const_generics(2)]
14545#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14546pub fn vmul_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) -> float64x1_t {
14547    static_assert!(LANE == 0);
14548    unsafe { simd_mul(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
14549}
14550#[doc = "Floating-point multiply"]
14551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f16)"]
14552#[inline]
14553#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14554#[rustc_legacy_const_generics(2)]
14555#[target_feature(enable = "neon,fp16")]
14556#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14557pub fn vmul_laneq_f16<const LANE: i32>(a: float16x4_t, b: float16x8_t) -> float16x4_t {
14558    static_assert_uimm_bits!(LANE, 3);
14559    unsafe {
14560        simd_mul(
14561            a,
14562            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14563        )
14564    }
14565}
14566#[doc = "Floating-point multiply"]
14567#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f16)"]
14568#[inline]
14569#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14570#[rustc_legacy_const_generics(2)]
14571#[target_feature(enable = "neon,fp16")]
14572#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14573pub fn vmulq_laneq_f16<const LANE: i32>(a: float16x8_t, b: float16x8_t) -> float16x8_t {
14574    static_assert_uimm_bits!(LANE, 3);
14575    unsafe {
14576        simd_mul(
14577            a,
14578            simd_shuffle!(
14579                b,
14580                b,
14581                [
14582                    LANE as u32,
14583                    LANE as u32,
14584                    LANE as u32,
14585                    LANE as u32,
14586                    LANE as u32,
14587                    LANE as u32,
14588                    LANE as u32,
14589                    LANE as u32
14590                ]
14591            ),
14592        )
14593    }
14594}
14595#[doc = "Floating-point multiply"]
14596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f64)"]
14597#[inline]
14598#[target_feature(enable = "neon")]
14599#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14600#[rustc_legacy_const_generics(2)]
14601#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14602pub fn vmul_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x2_t) -> float64x1_t {
14603    static_assert_uimm_bits!(LANE, 1);
14604    unsafe { simd_mul(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
14605}
14606#[doc = "Vector multiply by scalar"]
14607#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f64)"]
14608#[inline]
14609#[target_feature(enable = "neon")]
14610#[cfg_attr(test, assert_instr(fmul))]
14611#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14612pub fn vmul_n_f64(a: float64x1_t, b: f64) -> float64x1_t {
14613    unsafe { simd_mul(a, vdup_n_f64(b)) }
14614}
14615#[doc = "Vector multiply by scalar"]
14616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f64)"]
14617#[inline]
14618#[target_feature(enable = "neon")]
14619#[cfg_attr(test, assert_instr(fmul))]
14620#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14621pub fn vmulq_n_f64(a: float64x2_t, b: f64) -> float64x2_t {
14622    unsafe { simd_mul(a, vdupq_n_f64(b)) }
14623}
14624#[doc = "Floating-point multiply"]
14625#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_lane_f64)"]
14626#[inline]
14627#[target_feature(enable = "neon")]
14628#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14629#[rustc_legacy_const_generics(2)]
14630#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14631pub fn vmuld_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> f64 {
14632    static_assert!(LANE == 0);
14633    unsafe {
14634        let b: f64 = simd_extract!(b, LANE as u32);
14635        a * b
14636    }
14637}
14638#[doc = "Add"]
14639#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_f16)"]
14640#[inline]
14641#[target_feature(enable = "neon,fp16")]
14642#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14643#[cfg_attr(test, assert_instr(nop))]
14644pub fn vmulh_f16(a: f16, b: f16) -> f16 {
14645    a * b
14646}
14647#[doc = "Floating-point multiply"]
14648#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_lane_f16)"]
14649#[inline]
14650#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14651#[rustc_legacy_const_generics(2)]
14652#[target_feature(enable = "neon,fp16")]
14653#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14654pub fn vmulh_lane_f16<const LANE: i32>(a: f16, b: float16x4_t) -> f16 {
14655    static_assert_uimm_bits!(LANE, 2);
14656    unsafe {
14657        let b: f16 = simd_extract!(b, LANE as u32);
14658        a * b
14659    }
14660}
14661#[doc = "Floating-point multiply"]
14662#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_laneq_f16)"]
14663#[inline]
14664#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14665#[rustc_legacy_const_generics(2)]
14666#[target_feature(enable = "neon,fp16")]
14667#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14668pub fn vmulh_laneq_f16<const LANE: i32>(a: f16, b: float16x8_t) -> f16 {
14669    static_assert_uimm_bits!(LANE, 3);
14670    unsafe {
14671        let b: f16 = simd_extract!(b, LANE as u32);
14672        a * b
14673    }
14674}
14675#[doc = "Multiply long"]
14676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s16)"]
14677#[inline]
14678#[target_feature(enable = "neon")]
14679#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
14680#[rustc_legacy_const_generics(2)]
14681#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14682pub fn vmull_high_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int32x4_t {
14683    static_assert_uimm_bits!(LANE, 2);
14684    unsafe {
14685        vmull_high_s16(
14686            a,
14687            simd_shuffle!(
14688                b,
14689                b,
14690                [
14691                    LANE as u32,
14692                    LANE as u32,
14693                    LANE as u32,
14694                    LANE as u32,
14695                    LANE as u32,
14696                    LANE as u32,
14697                    LANE as u32,
14698                    LANE as u32
14699                ]
14700            ),
14701        )
14702    }
14703}
14704#[doc = "Multiply long"]
14705#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s16)"]
14706#[inline]
14707#[target_feature(enable = "neon")]
14708#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
14709#[rustc_legacy_const_generics(2)]
14710#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14711pub fn vmull_high_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t) -> int32x4_t {
14712    static_assert_uimm_bits!(LANE, 3);
14713    unsafe {
14714        vmull_high_s16(
14715            a,
14716            simd_shuffle!(
14717                b,
14718                b,
14719                [
14720                    LANE as u32,
14721                    LANE as u32,
14722                    LANE as u32,
14723                    LANE as u32,
14724                    LANE as u32,
14725                    LANE as u32,
14726                    LANE as u32,
14727                    LANE as u32
14728                ]
14729            ),
14730        )
14731    }
14732}
14733#[doc = "Multiply long"]
14734#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s32)"]
14735#[inline]
14736#[target_feature(enable = "neon")]
14737#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
14738#[rustc_legacy_const_generics(2)]
14739#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14740pub fn vmull_high_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int64x2_t {
14741    static_assert_uimm_bits!(LANE, 1);
14742    unsafe {
14743        vmull_high_s32(
14744            a,
14745            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14746        )
14747    }
14748}
14749#[doc = "Multiply long"]
14750#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s32)"]
14751#[inline]
14752#[target_feature(enable = "neon")]
14753#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
14754#[rustc_legacy_const_generics(2)]
14755#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14756pub fn vmull_high_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t) -> int64x2_t {
14757    static_assert_uimm_bits!(LANE, 2);
14758    unsafe {
14759        vmull_high_s32(
14760            a,
14761            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14762        )
14763    }
14764}
14765#[doc = "Multiply long"]
14766#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u16)"]
14767#[inline]
14768#[target_feature(enable = "neon")]
14769#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
14770#[rustc_legacy_const_generics(2)]
14771#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14772pub fn vmull_high_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x4_t) -> uint32x4_t {
14773    static_assert_uimm_bits!(LANE, 2);
14774    unsafe {
14775        vmull_high_u16(
14776            a,
14777            simd_shuffle!(
14778                b,
14779                b,
14780                [
14781                    LANE as u32,
14782                    LANE as u32,
14783                    LANE as u32,
14784                    LANE as u32,
14785                    LANE as u32,
14786                    LANE as u32,
14787                    LANE as u32,
14788                    LANE as u32
14789                ]
14790            ),
14791        )
14792    }
14793}
14794#[doc = "Multiply long"]
14795#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u16)"]
14796#[inline]
14797#[target_feature(enable = "neon")]
14798#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
14799#[rustc_legacy_const_generics(2)]
14800#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14801pub fn vmull_high_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
14802    static_assert_uimm_bits!(LANE, 3);
14803    unsafe {
14804        vmull_high_u16(
14805            a,
14806            simd_shuffle!(
14807                b,
14808                b,
14809                [
14810                    LANE as u32,
14811                    LANE as u32,
14812                    LANE as u32,
14813                    LANE as u32,
14814                    LANE as u32,
14815                    LANE as u32,
14816                    LANE as u32,
14817                    LANE as u32
14818                ]
14819            ),
14820        )
14821    }
14822}
14823#[doc = "Multiply long"]
14824#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u32)"]
14825#[inline]
14826#[target_feature(enable = "neon")]
14827#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
14828#[rustc_legacy_const_generics(2)]
14829#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14830pub fn vmull_high_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x2_t) -> uint64x2_t {
14831    static_assert_uimm_bits!(LANE, 1);
14832    unsafe {
14833        vmull_high_u32(
14834            a,
14835            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14836        )
14837    }
14838}
14839#[doc = "Multiply long"]
14840#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u32)"]
14841#[inline]
14842#[target_feature(enable = "neon")]
14843#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
14844#[rustc_legacy_const_generics(2)]
14845#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14846pub fn vmull_high_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
14847    static_assert_uimm_bits!(LANE, 2);
14848    unsafe {
14849        vmull_high_u32(
14850            a,
14851            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14852        )
14853    }
14854}
14855#[doc = "Multiply long"]
14856#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s16)"]
14857#[inline]
14858#[target_feature(enable = "neon")]
14859#[cfg_attr(test, assert_instr(smull2))]
14860#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14861pub fn vmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t {
14862    vmull_high_s16(a, vdupq_n_s16(b))
14863}
14864#[doc = "Multiply long"]
14865#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s32)"]
14866#[inline]
14867#[target_feature(enable = "neon")]
14868#[cfg_attr(test, assert_instr(smull2))]
14869#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14870pub fn vmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t {
14871    vmull_high_s32(a, vdupq_n_s32(b))
14872}
14873#[doc = "Multiply long"]
14874#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u16)"]
14875#[inline]
14876#[target_feature(enable = "neon")]
14877#[cfg_attr(test, assert_instr(umull2))]
14878#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14879pub fn vmull_high_n_u16(a: uint16x8_t, b: u16) -> uint32x4_t {
14880    vmull_high_u16(a, vdupq_n_u16(b))
14881}
14882#[doc = "Multiply long"]
14883#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u32)"]
14884#[inline]
14885#[target_feature(enable = "neon")]
14886#[cfg_attr(test, assert_instr(umull2))]
14887#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14888pub fn vmull_high_n_u32(a: uint32x4_t, b: u32) -> uint64x2_t {
14889    vmull_high_u32(a, vdupq_n_u32(b))
14890}
14891#[doc = "Polynomial multiply long"]
14892#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p64)"]
14893#[inline]
14894#[target_feature(enable = "neon,aes")]
14895#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14896#[cfg_attr(test, assert_instr(pmull2))]
14897pub fn vmull_high_p64(a: poly64x2_t, b: poly64x2_t) -> p128 {
14898    unsafe { vmull_p64(simd_extract!(a, 1), simd_extract!(b, 1)) }
14899}
14900#[doc = "Polynomial multiply long"]
14901#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p8)"]
14902#[inline]
14903#[target_feature(enable = "neon")]
14904#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14905#[cfg_attr(test, assert_instr(pmull2))]
14906pub fn vmull_high_p8(a: poly8x16_t, b: poly8x16_t) -> poly16x8_t {
14907    unsafe {
14908        let a: poly8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
14909        let b: poly8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14910        vmull_p8(a, b)
14911    }
14912}
14913#[doc = "Signed multiply long"]
14914#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s8)"]
14915#[inline]
14916#[target_feature(enable = "neon")]
14917#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14918#[cfg_attr(test, assert_instr(smull2))]
14919pub fn vmull_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
14920    unsafe {
14921        let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
14922        let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14923        vmull_s8(a, b)
14924    }
14925}
14926#[doc = "Signed multiply long"]
14927#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s16)"]
14928#[inline]
14929#[target_feature(enable = "neon")]
14930#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14931#[cfg_attr(test, assert_instr(smull2))]
14932pub fn vmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
14933    unsafe {
14934        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
14935        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14936        vmull_s16(a, b)
14937    }
14938}
14939#[doc = "Signed multiply long"]
14940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s32)"]
14941#[inline]
14942#[target_feature(enable = "neon")]
14943#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14944#[cfg_attr(test, assert_instr(smull2))]
14945pub fn vmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
14946    unsafe {
14947        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
14948        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
14949        vmull_s32(a, b)
14950    }
14951}
14952#[doc = "Unsigned multiply long"]
14953#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u8)"]
14954#[inline]
14955#[target_feature(enable = "neon")]
14956#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14957#[cfg_attr(test, assert_instr(umull2))]
14958pub fn vmull_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
14959    unsafe {
14960        let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
14961        let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14962        vmull_u8(a, b)
14963    }
14964}
14965#[doc = "Unsigned multiply long"]
14966#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u16)"]
14967#[inline]
14968#[target_feature(enable = "neon")]
14969#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14970#[cfg_attr(test, assert_instr(umull2))]
14971pub fn vmull_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
14972    unsafe {
14973        let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
14974        let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14975        vmull_u16(a, b)
14976    }
14977}
14978#[doc = "Unsigned multiply long"]
14979#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u32)"]
14980#[inline]
14981#[target_feature(enable = "neon")]
14982#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14983#[cfg_attr(test, assert_instr(umull2))]
14984pub fn vmull_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
14985    unsafe {
14986        let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
14987        let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
14988        vmull_u32(a, b)
14989    }
14990}
14991#[doc = "Polynomial multiply long"]
14992#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_p64)"]
14993#[inline]
14994#[target_feature(enable = "neon,aes")]
14995#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14996#[cfg_attr(test, assert_instr(pmull))]
14997pub fn vmull_p64(a: p64, b: p64) -> p128 {
14998    unsafe extern "unadjusted" {
14999        #[cfg_attr(
15000            any(target_arch = "aarch64", target_arch = "arm64ec"),
15001            link_name = "llvm.aarch64.neon.pmull64"
15002        )]
15003        fn _vmull_p64(a: p64, b: p64) -> int8x16_t;
15004    }
15005    unsafe { transmute(_vmull_p64(a, b)) }
15006}
15007#[doc = "Floating-point multiply"]
15008#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f64)"]
15009#[inline]
15010#[target_feature(enable = "neon")]
15011#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15012#[rustc_legacy_const_generics(2)]
15013#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15014pub fn vmulq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x1_t) -> float64x2_t {
15015    static_assert!(LANE == 0);
15016    unsafe { simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15017}
15018#[doc = "Floating-point multiply"]
15019#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f64)"]
15020#[inline]
15021#[target_feature(enable = "neon")]
15022#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15023#[rustc_legacy_const_generics(2)]
15024#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15025pub fn vmulq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15026    static_assert_uimm_bits!(LANE, 1);
15027    unsafe { simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15028}
15029#[doc = "Floating-point multiply"]
15030#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_lane_f32)"]
15031#[inline]
15032#[target_feature(enable = "neon")]
15033#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15034#[rustc_legacy_const_generics(2)]
15035#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15036pub fn vmuls_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> f32 {
15037    static_assert_uimm_bits!(LANE, 1);
15038    unsafe {
15039        let b: f32 = simd_extract!(b, LANE as u32);
15040        a * b
15041    }
15042}
15043#[doc = "Floating-point multiply"]
15044#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_laneq_f32)"]
15045#[inline]
15046#[target_feature(enable = "neon")]
15047#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15048#[rustc_legacy_const_generics(2)]
15049#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15050pub fn vmuls_laneq_f32<const LANE: i32>(a: f32, b: float32x4_t) -> f32 {
15051    static_assert_uimm_bits!(LANE, 2);
15052    unsafe {
15053        let b: f32 = simd_extract!(b, LANE as u32);
15054        a * b
15055    }
15056}
15057#[doc = "Floating-point multiply"]
15058#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_laneq_f64)"]
15059#[inline]
15060#[target_feature(enable = "neon")]
15061#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15062#[rustc_legacy_const_generics(2)]
15063#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15064pub fn vmuld_laneq_f64<const LANE: i32>(a: f64, b: float64x2_t) -> f64 {
15065    static_assert_uimm_bits!(LANE, 1);
15066    unsafe {
15067        let b: f64 = simd_extract!(b, LANE as u32);
15068        a * b
15069    }
15070}
15071#[doc = "Floating-point multiply extended"]
15072#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f16)"]
15073#[inline]
15074#[target_feature(enable = "neon,fp16")]
15075#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15076#[cfg_attr(test, assert_instr(fmulx))]
15077pub fn vmulx_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
15078    unsafe extern "unadjusted" {
15079        #[cfg_attr(
15080            any(target_arch = "aarch64", target_arch = "arm64ec"),
15081            link_name = "llvm.aarch64.neon.fmulx.v4f16"
15082        )]
15083        fn _vmulx_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
15084    }
15085    unsafe { _vmulx_f16(a, b) }
15086}
15087#[doc = "Floating-point multiply extended"]
15088#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f16)"]
15089#[inline]
15090#[target_feature(enable = "neon,fp16")]
15091#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15092#[cfg_attr(test, assert_instr(fmulx))]
15093pub fn vmulxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15094    unsafe extern "unadjusted" {
15095        #[cfg_attr(
15096            any(target_arch = "aarch64", target_arch = "arm64ec"),
15097            link_name = "llvm.aarch64.neon.fmulx.v8f16"
15098        )]
15099        fn _vmulxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
15100    }
15101    unsafe { _vmulxq_f16(a, b) }
15102}
15103#[doc = "Floating-point multiply extended"]
15104#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f32)"]
15105#[inline]
15106#[target_feature(enable = "neon")]
15107#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15108#[cfg_attr(test, assert_instr(fmulx))]
15109pub fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
15110    unsafe extern "unadjusted" {
15111        #[cfg_attr(
15112            any(target_arch = "aarch64", target_arch = "arm64ec"),
15113            link_name = "llvm.aarch64.neon.fmulx.v2f32"
15114        )]
15115        fn _vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
15116    }
15117    unsafe { _vmulx_f32(a, b) }
15118}
15119#[doc = "Floating-point multiply extended"]
15120#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f32)"]
15121#[inline]
15122#[target_feature(enable = "neon")]
15123#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15124#[cfg_attr(test, assert_instr(fmulx))]
15125pub fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15126    unsafe extern "unadjusted" {
15127        #[cfg_attr(
15128            any(target_arch = "aarch64", target_arch = "arm64ec"),
15129            link_name = "llvm.aarch64.neon.fmulx.v4f32"
15130        )]
15131        fn _vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
15132    }
15133    unsafe { _vmulxq_f32(a, b) }
15134}
15135#[doc = "Floating-point multiply extended"]
15136#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f64)"]
15137#[inline]
15138#[target_feature(enable = "neon")]
15139#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15140#[cfg_attr(test, assert_instr(fmulx))]
15141pub fn vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
15142    unsafe extern "unadjusted" {
15143        #[cfg_attr(
15144            any(target_arch = "aarch64", target_arch = "arm64ec"),
15145            link_name = "llvm.aarch64.neon.fmulx.v1f64"
15146        )]
15147        fn _vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
15148    }
15149    unsafe { _vmulx_f64(a, b) }
15150}
15151#[doc = "Floating-point multiply extended"]
15152#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f64)"]
15153#[inline]
15154#[target_feature(enable = "neon")]
15155#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15156#[cfg_attr(test, assert_instr(fmulx))]
15157pub fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15158    unsafe extern "unadjusted" {
15159        #[cfg_attr(
15160            any(target_arch = "aarch64", target_arch = "arm64ec"),
15161            link_name = "llvm.aarch64.neon.fmulx.v2f64"
15162        )]
15163        fn _vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
15164    }
15165    unsafe { _vmulxq_f64(a, b) }
15166}
15167#[doc = "Floating-point multiply extended"]
15168#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f16)"]
15169#[inline]
15170#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15171#[rustc_legacy_const_generics(2)]
15172#[target_feature(enable = "neon,fp16")]
15173#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15174pub fn vmulx_lane_f16<const LANE: i32>(a: float16x4_t, b: float16x4_t) -> float16x4_t {
15175    static_assert_uimm_bits!(LANE, 2);
15176    unsafe {
15177        vmulx_f16(
15178            a,
15179            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15180        )
15181    }
15182}
15183#[doc = "Floating-point multiply extended"]
15184#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f16)"]
15185#[inline]
15186#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15187#[rustc_legacy_const_generics(2)]
15188#[target_feature(enable = "neon,fp16")]
15189#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15190pub fn vmulx_laneq_f16<const LANE: i32>(a: float16x4_t, b: float16x8_t) -> float16x4_t {
15191    static_assert_uimm_bits!(LANE, 3);
15192    unsafe {
15193        vmulx_f16(
15194            a,
15195            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15196        )
15197    }
15198}
15199#[doc = "Floating-point multiply extended"]
15200#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f16)"]
15201#[inline]
15202#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15203#[rustc_legacy_const_generics(2)]
15204#[target_feature(enable = "neon,fp16")]
15205#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15206pub fn vmulxq_lane_f16<const LANE: i32>(a: float16x8_t, b: float16x4_t) -> float16x8_t {
15207    static_assert_uimm_bits!(LANE, 2);
15208    unsafe {
15209        vmulxq_f16(
15210            a,
15211            simd_shuffle!(
15212                b,
15213                b,
15214                [
15215                    LANE as u32,
15216                    LANE as u32,
15217                    LANE as u32,
15218                    LANE as u32,
15219                    LANE as u32,
15220                    LANE as u32,
15221                    LANE as u32,
15222                    LANE as u32
15223                ]
15224            ),
15225        )
15226    }
15227}
15228#[doc = "Floating-point multiply extended"]
15229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f16)"]
15230#[inline]
15231#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15232#[rustc_legacy_const_generics(2)]
15233#[target_feature(enable = "neon,fp16")]
15234#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15235pub fn vmulxq_laneq_f16<const LANE: i32>(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15236    static_assert_uimm_bits!(LANE, 3);
15237    unsafe {
15238        vmulxq_f16(
15239            a,
15240            simd_shuffle!(
15241                b,
15242                b,
15243                [
15244                    LANE as u32,
15245                    LANE as u32,
15246                    LANE as u32,
15247                    LANE as u32,
15248                    LANE as u32,
15249                    LANE as u32,
15250                    LANE as u32,
15251                    LANE as u32
15252                ]
15253            ),
15254        )
15255    }
15256}
15257#[doc = "Floating-point multiply extended"]
15258#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f32)"]
15259#[inline]
15260#[target_feature(enable = "neon")]
15261#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15262#[rustc_legacy_const_generics(2)]
15263#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15264pub fn vmulx_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t) -> float32x2_t {
15265    static_assert_uimm_bits!(LANE, 1);
15266    unsafe { vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15267}
15268#[doc = "Floating-point multiply extended"]
15269#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f32)"]
15270#[inline]
15271#[target_feature(enable = "neon")]
15272#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15273#[rustc_legacy_const_generics(2)]
15274#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15275pub fn vmulx_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x4_t) -> float32x2_t {
15276    static_assert_uimm_bits!(LANE, 2);
15277    unsafe { vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15278}
15279#[doc = "Floating-point multiply extended"]
15280#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f32)"]
15281#[inline]
15282#[target_feature(enable = "neon")]
15283#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15284#[rustc_legacy_const_generics(2)]
15285#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15286pub fn vmulxq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x2_t) -> float32x4_t {
15287    static_assert_uimm_bits!(LANE, 1);
15288    unsafe {
15289        vmulxq_f32(
15290            a,
15291            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15292        )
15293    }
15294}
15295#[doc = "Floating-point multiply extended"]
15296#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f32)"]
15297#[inline]
15298#[target_feature(enable = "neon")]
15299#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15300#[rustc_legacy_const_generics(2)]
15301#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15302pub fn vmulxq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15303    static_assert_uimm_bits!(LANE, 2);
15304    unsafe {
15305        vmulxq_f32(
15306            a,
15307            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15308        )
15309    }
15310}
15311#[doc = "Floating-point multiply extended"]
15312#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f64)"]
15313#[inline]
15314#[target_feature(enable = "neon")]
15315#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15316#[rustc_legacy_const_generics(2)]
15317#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15318pub fn vmulxq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15319    static_assert_uimm_bits!(LANE, 1);
15320    unsafe { vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15321}
15322#[doc = "Floating-point multiply extended"]
15323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f64)"]
15324#[inline]
15325#[target_feature(enable = "neon")]
15326#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15327#[rustc_legacy_const_generics(2)]
15328#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15329pub fn vmulx_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) -> float64x1_t {
15330    static_assert!(LANE == 0);
15331    unsafe { vmulx_f64(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
15332}
15333#[doc = "Floating-point multiply extended"]
15334#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f64)"]
15335#[inline]
15336#[target_feature(enable = "neon")]
15337#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15338#[rustc_legacy_const_generics(2)]
15339#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15340pub fn vmulx_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x2_t) -> float64x1_t {
15341    static_assert_uimm_bits!(LANE, 1);
15342    unsafe { vmulx_f64(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
15343}
15344#[doc = "Vector multiply by scalar"]
15345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_n_f16)"]
15346#[inline]
15347#[cfg_attr(test, assert_instr(fmulx))]
15348#[target_feature(enable = "neon,fp16")]
15349#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15350pub fn vmulx_n_f16(a: float16x4_t, b: f16) -> float16x4_t {
15351    vmulx_f16(a, vdup_n_f16(b))
15352}
15353#[doc = "Vector multiply by scalar"]
15354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_n_f16)"]
15355#[inline]
15356#[cfg_attr(test, assert_instr(fmulx))]
15357#[target_feature(enable = "neon,fp16")]
15358#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15359pub fn vmulxq_n_f16(a: float16x8_t, b: f16) -> float16x8_t {
15360    vmulxq_f16(a, vdupq_n_f16(b))
15361}
15362#[doc = "Floating-point multiply extended"]
15363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_f64)"]
15364#[inline]
15365#[target_feature(enable = "neon")]
15366#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15367#[cfg_attr(test, assert_instr(fmulx))]
15368pub fn vmulxd_f64(a: f64, b: f64) -> f64 {
15369    unsafe extern "unadjusted" {
15370        #[cfg_attr(
15371            any(target_arch = "aarch64", target_arch = "arm64ec"),
15372            link_name = "llvm.aarch64.neon.fmulx.f64"
15373        )]
15374        fn _vmulxd_f64(a: f64, b: f64) -> f64;
15375    }
15376    unsafe { _vmulxd_f64(a, b) }
15377}
15378#[doc = "Floating-point multiply extended"]
15379#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_f32)"]
15380#[inline]
15381#[target_feature(enable = "neon")]
15382#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15383#[cfg_attr(test, assert_instr(fmulx))]
15384pub fn vmulxs_f32(a: f32, b: f32) -> f32 {
15385    unsafe extern "unadjusted" {
15386        #[cfg_attr(
15387            any(target_arch = "aarch64", target_arch = "arm64ec"),
15388            link_name = "llvm.aarch64.neon.fmulx.f32"
15389        )]
15390        fn _vmulxs_f32(a: f32, b: f32) -> f32;
15391    }
15392    unsafe { _vmulxs_f32(a, b) }
15393}
15394#[doc = "Floating-point multiply extended"]
15395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_lane_f64)"]
15396#[inline]
15397#[target_feature(enable = "neon")]
15398#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15399#[rustc_legacy_const_generics(2)]
15400#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15401pub fn vmulxd_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> f64 {
15402    static_assert!(LANE == 0);
15403    unsafe { vmulxd_f64(a, simd_extract!(b, LANE as u32)) }
15404}
15405#[doc = "Floating-point multiply extended"]
15406#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_laneq_f64)"]
15407#[inline]
15408#[target_feature(enable = "neon")]
15409#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15410#[rustc_legacy_const_generics(2)]
15411#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15412pub fn vmulxd_laneq_f64<const LANE: i32>(a: f64, b: float64x2_t) -> f64 {
15413    static_assert_uimm_bits!(LANE, 1);
15414    unsafe { vmulxd_f64(a, simd_extract!(b, LANE as u32)) }
15415}
15416#[doc = "Floating-point multiply extended"]
15417#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_lane_f32)"]
15418#[inline]
15419#[target_feature(enable = "neon")]
15420#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15421#[rustc_legacy_const_generics(2)]
15422#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15423pub fn vmulxs_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> f32 {
15424    static_assert_uimm_bits!(LANE, 1);
15425    unsafe { vmulxs_f32(a, simd_extract!(b, LANE as u32)) }
15426}
15427#[doc = "Floating-point multiply extended"]
15428#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_laneq_f32)"]
15429#[inline]
15430#[target_feature(enable = "neon")]
15431#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15432#[rustc_legacy_const_generics(2)]
15433#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15434pub fn vmulxs_laneq_f32<const LANE: i32>(a: f32, b: float32x4_t) -> f32 {
15435    static_assert_uimm_bits!(LANE, 2);
15436    unsafe { vmulxs_f32(a, simd_extract!(b, LANE as u32)) }
15437}
15438#[doc = "Floating-point multiply extended"]
15439#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_f16)"]
15440#[inline]
15441#[target_feature(enable = "neon,fp16")]
15442#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15443#[cfg_attr(test, assert_instr(fmulx))]
15444pub fn vmulxh_f16(a: f16, b: f16) -> f16 {
15445    unsafe extern "unadjusted" {
15446        #[cfg_attr(
15447            any(target_arch = "aarch64", target_arch = "arm64ec"),
15448            link_name = "llvm.aarch64.neon.fmulx.f16"
15449        )]
15450        fn _vmulxh_f16(a: f16, b: f16) -> f16;
15451    }
15452    unsafe { _vmulxh_f16(a, b) }
15453}
15454#[doc = "Floating-point multiply extended"]
15455#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_lane_f16)"]
15456#[inline]
15457#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15458#[rustc_legacy_const_generics(2)]
15459#[target_feature(enable = "neon,fp16")]
15460#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15461pub fn vmulxh_lane_f16<const LANE: i32>(a: f16, b: float16x4_t) -> f16 {
15462    static_assert_uimm_bits!(LANE, 2);
15463    unsafe { vmulxh_f16(a, simd_extract!(b, LANE as u32)) }
15464}
15465#[doc = "Floating-point multiply extended"]
15466#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_laneq_f16)"]
15467#[inline]
15468#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15469#[rustc_legacy_const_generics(2)]
15470#[target_feature(enable = "neon,fp16")]
15471#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15472pub fn vmulxh_laneq_f16<const LANE: i32>(a: f16, b: float16x8_t) -> f16 {
15473    static_assert_uimm_bits!(LANE, 3);
15474    unsafe { vmulxh_f16(a, simd_extract!(b, LANE as u32)) }
15475}
15476#[doc = "Floating-point multiply extended"]
15477#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f64)"]
15478#[inline]
15479#[target_feature(enable = "neon")]
15480#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15481#[rustc_legacy_const_generics(2)]
15482#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15483pub fn vmulxq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x1_t) -> float64x2_t {
15484    static_assert!(LANE == 0);
15485    unsafe { vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15486}
15487#[doc = "Negate"]
15488#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f64)"]
15489#[inline]
15490#[target_feature(enable = "neon")]
15491#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15492#[cfg_attr(test, assert_instr(fneg))]
15493pub fn vneg_f64(a: float64x1_t) -> float64x1_t {
15494    unsafe { simd_neg(a) }
15495}
15496#[doc = "Negate"]
15497#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f64)"]
15498#[inline]
15499#[target_feature(enable = "neon")]
15500#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15501#[cfg_attr(test, assert_instr(fneg))]
15502pub fn vnegq_f64(a: float64x2_t) -> float64x2_t {
15503    unsafe { simd_neg(a) }
15504}
15505#[doc = "Negate"]
15506#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s64)"]
15507#[inline]
15508#[target_feature(enable = "neon")]
15509#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15510#[cfg_attr(test, assert_instr(neg))]
15511pub fn vneg_s64(a: int64x1_t) -> int64x1_t {
15512    unsafe { simd_neg(a) }
15513}
15514#[doc = "Negate"]
15515#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s64)"]
15516#[inline]
15517#[target_feature(enable = "neon")]
15518#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15519#[cfg_attr(test, assert_instr(neg))]
15520pub fn vnegq_s64(a: int64x2_t) -> int64x2_t {
15521    unsafe { simd_neg(a) }
15522}
15523#[doc = "Negate"]
15524#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegd_s64)"]
15525#[inline]
15526#[target_feature(enable = "neon")]
15527#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15528#[cfg_attr(test, assert_instr(neg))]
15529pub fn vnegd_s64(a: i64) -> i64 {
15530    a.wrapping_neg()
15531}
15532#[doc = "Negate"]
15533#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegh_f16)"]
15534#[inline]
15535#[target_feature(enable = "neon,fp16")]
15536#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15537#[cfg_attr(test, assert_instr(fneg))]
15538pub fn vnegh_f16(a: f16) -> f16 {
15539    -a
15540}
15541#[doc = "Floating-point add pairwise"]
15542#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_f64)"]
15543#[inline]
15544#[target_feature(enable = "neon")]
15545#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15546#[cfg_attr(test, assert_instr(nop))]
15547pub fn vpaddd_f64(a: float64x2_t) -> f64 {
15548    unsafe {
15549        let a1: f64 = simd_extract!(a, 0);
15550        let a2: f64 = simd_extract!(a, 1);
15551        a1 + a2
15552    }
15553}
15554#[doc = "Floating-point add pairwise"]
15555#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadds_f32)"]
15556#[inline]
15557#[target_feature(enable = "neon")]
15558#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15559#[cfg_attr(test, assert_instr(nop))]
15560pub fn vpadds_f32(a: float32x2_t) -> f32 {
15561    unsafe {
15562        let a1: f32 = simd_extract!(a, 0);
15563        let a2: f32 = simd_extract!(a, 1);
15564        a1 + a2
15565    }
15566}
15567#[doc = "Add pairwise"]
15568#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_s64)"]
15569#[inline]
15570#[target_feature(enable = "neon")]
15571#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15572#[cfg_attr(test, assert_instr(addp))]
15573pub fn vpaddd_s64(a: int64x2_t) -> i64 {
15574    unsafe { simd_reduce_add_unordered(a) }
15575}
15576#[doc = "Add pairwise"]
15577#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_u64)"]
15578#[inline]
15579#[target_feature(enable = "neon")]
15580#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15581#[cfg_attr(test, assert_instr(addp))]
15582pub fn vpaddd_u64(a: uint64x2_t) -> u64 {
15583    unsafe { simd_reduce_add_unordered(a) }
15584}
15585#[doc = "Floating-point add pairwise"]
15586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f16)"]
15587#[inline]
15588#[target_feature(enable = "neon,fp16")]
15589#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15590#[cfg_attr(test, assert_instr(faddp))]
15591pub fn vpaddq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15592    unsafe extern "unadjusted" {
15593        #[cfg_attr(
15594            any(target_arch = "aarch64", target_arch = "arm64ec"),
15595            link_name = "llvm.aarch64.neon.faddp.v8f16"
15596        )]
15597        fn _vpaddq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
15598    }
15599    unsafe { _vpaddq_f16(a, b) }
15600}
15601#[doc = "Floating-point add pairwise"]
15602#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f32)"]
15603#[inline]
15604#[target_feature(enable = "neon")]
15605#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15606#[cfg_attr(test, assert_instr(faddp))]
15607pub fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15608    unsafe extern "unadjusted" {
15609        #[cfg_attr(
15610            any(target_arch = "aarch64", target_arch = "arm64ec"),
15611            link_name = "llvm.aarch64.neon.faddp.v4f32"
15612        )]
15613        fn _vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
15614    }
15615    unsafe { _vpaddq_f32(a, b) }
15616}
15617#[doc = "Floating-point add pairwise"]
15618#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f64)"]
15619#[inline]
15620#[target_feature(enable = "neon")]
15621#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15622#[cfg_attr(test, assert_instr(faddp))]
15623pub fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15624    unsafe extern "unadjusted" {
15625        #[cfg_attr(
15626            any(target_arch = "aarch64", target_arch = "arm64ec"),
15627            link_name = "llvm.aarch64.neon.faddp.v2f64"
15628        )]
15629        fn _vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
15630    }
15631    unsafe { _vpaddq_f64(a, b) }
15632}
15633#[doc = "Add Pairwise"]
15634#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s8)"]
15635#[inline]
15636#[target_feature(enable = "neon")]
15637#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15638#[cfg_attr(test, assert_instr(addp))]
15639pub fn vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
15640    unsafe extern "unadjusted" {
15641        #[cfg_attr(
15642            any(target_arch = "aarch64", target_arch = "arm64ec"),
15643            link_name = "llvm.aarch64.neon.addp.v16i8"
15644        )]
15645        fn _vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t;
15646    }
15647    unsafe { _vpaddq_s8(a, b) }
15648}
15649#[doc = "Add Pairwise"]
15650#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s16)"]
15651#[inline]
15652#[target_feature(enable = "neon")]
15653#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15654#[cfg_attr(test, assert_instr(addp))]
15655pub fn vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
15656    unsafe extern "unadjusted" {
15657        #[cfg_attr(
15658            any(target_arch = "aarch64", target_arch = "arm64ec"),
15659            link_name = "llvm.aarch64.neon.addp.v8i16"
15660        )]
15661        fn _vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t;
15662    }
15663    unsafe { _vpaddq_s16(a, b) }
15664}
15665#[doc = "Add Pairwise"]
15666#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s32)"]
15667#[inline]
15668#[target_feature(enable = "neon")]
15669#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15670#[cfg_attr(test, assert_instr(addp))]
15671pub fn vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
15672    unsafe extern "unadjusted" {
15673        #[cfg_attr(
15674            any(target_arch = "aarch64", target_arch = "arm64ec"),
15675            link_name = "llvm.aarch64.neon.addp.v4i32"
15676        )]
15677        fn _vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t;
15678    }
15679    unsafe { _vpaddq_s32(a, b) }
15680}
15681#[doc = "Add Pairwise"]
15682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s64)"]
15683#[inline]
15684#[target_feature(enable = "neon")]
15685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15686#[cfg_attr(test, assert_instr(addp))]
15687pub fn vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
15688    unsafe extern "unadjusted" {
15689        #[cfg_attr(
15690            any(target_arch = "aarch64", target_arch = "arm64ec"),
15691            link_name = "llvm.aarch64.neon.addp.v2i64"
15692        )]
15693        fn _vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t;
15694    }
15695    unsafe { _vpaddq_s64(a, b) }
15696}
15697#[doc = "Add Pairwise"]
15698#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u8)"]
15699#[inline]
15700#[cfg(target_endian = "little")]
15701#[target_feature(enable = "neon")]
15702#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15703#[cfg_attr(test, assert_instr(addp))]
15704pub fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
15705    unsafe { transmute(vpaddq_s8(transmute(a), transmute(b))) }
15706}
15707#[doc = "Add Pairwise"]
15708#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u8)"]
15709#[inline]
15710#[cfg(target_endian = "big")]
15711#[target_feature(enable = "neon")]
15712#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15713#[cfg_attr(test, assert_instr(addp))]
15714pub fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
15715    let a: uint8x16_t =
15716        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
15717    let b: uint8x16_t =
15718        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
15719    unsafe {
15720        let ret_val: uint8x16_t = transmute(vpaddq_s8(transmute(a), transmute(b)));
15721        simd_shuffle!(
15722            ret_val,
15723            ret_val,
15724            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
15725        )
15726    }
15727}
15728#[doc = "Add Pairwise"]
15729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u16)"]
15730#[inline]
15731#[cfg(target_endian = "little")]
15732#[target_feature(enable = "neon")]
15733#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15734#[cfg_attr(test, assert_instr(addp))]
15735pub fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
15736    unsafe { transmute(vpaddq_s16(transmute(a), transmute(b))) }
15737}
15738#[doc = "Add Pairwise"]
15739#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u16)"]
15740#[inline]
15741#[cfg(target_endian = "big")]
15742#[target_feature(enable = "neon")]
15743#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15744#[cfg_attr(test, assert_instr(addp))]
15745pub fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
15746    let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
15747    let b: uint16x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
15748    unsafe {
15749        let ret_val: uint16x8_t = transmute(vpaddq_s16(transmute(a), transmute(b)));
15750        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
15751    }
15752}
15753#[doc = "Add Pairwise"]
15754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u32)"]
15755#[inline]
15756#[cfg(target_endian = "little")]
15757#[target_feature(enable = "neon")]
15758#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15759#[cfg_attr(test, assert_instr(addp))]
15760pub fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
15761    unsafe { transmute(vpaddq_s32(transmute(a), transmute(b))) }
15762}
15763#[doc = "Add Pairwise"]
15764#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u32)"]
15765#[inline]
15766#[cfg(target_endian = "big")]
15767#[target_feature(enable = "neon")]
15768#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15769#[cfg_attr(test, assert_instr(addp))]
15770pub fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
15771    let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
15772    let b: uint32x4_t = unsafe { simd_shuffle!(b, b, [3, 2, 1, 0]) };
15773    unsafe {
15774        let ret_val: uint32x4_t = transmute(vpaddq_s32(transmute(a), transmute(b)));
15775        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
15776    }
15777}
15778#[doc = "Add Pairwise"]
15779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u64)"]
15780#[inline]
15781#[cfg(target_endian = "little")]
15782#[target_feature(enable = "neon")]
15783#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15784#[cfg_attr(test, assert_instr(addp))]
15785pub fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
15786    unsafe { transmute(vpaddq_s64(transmute(a), transmute(b))) }
15787}
15788#[doc = "Add Pairwise"]
15789#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u64)"]
15790#[inline]
15791#[cfg(target_endian = "big")]
15792#[target_feature(enable = "neon")]
15793#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15794#[cfg_attr(test, assert_instr(addp))]
15795pub fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
15796    let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
15797    let b: uint64x2_t = unsafe { simd_shuffle!(b, b, [1, 0]) };
15798    unsafe {
15799        let ret_val: uint64x2_t = transmute(vpaddq_s64(transmute(a), transmute(b)));
15800        simd_shuffle!(ret_val, ret_val, [1, 0])
15801    }
15802}
15803#[doc = "Floating-point add pairwise"]
15804#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_f16)"]
15805#[inline]
15806#[target_feature(enable = "neon,fp16")]
15807#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15808#[cfg_attr(test, assert_instr(fmaxp))]
15809pub fn vpmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
15810    unsafe extern "unadjusted" {
15811        #[cfg_attr(
15812            any(target_arch = "aarch64", target_arch = "arm64ec"),
15813            link_name = "llvm.aarch64.neon.fmaxp.v4f16"
15814        )]
15815        fn _vpmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
15816    }
15817    unsafe { _vpmax_f16(a, b) }
15818}
15819#[doc = "Floating-point add pairwise"]
15820#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f16)"]
15821#[inline]
15822#[target_feature(enable = "neon,fp16")]
15823#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15824#[cfg_attr(test, assert_instr(fmaxp))]
15825pub fn vpmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15826    unsafe extern "unadjusted" {
15827        #[cfg_attr(
15828            any(target_arch = "aarch64", target_arch = "arm64ec"),
15829            link_name = "llvm.aarch64.neon.fmaxp.v8f16"
15830        )]
15831        fn _vpmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
15832    }
15833    unsafe { _vpmaxq_f16(a, b) }
15834}
15835#[doc = "Floating-point add pairwise"]
15836#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f16)"]
15837#[inline]
15838#[target_feature(enable = "neon,fp16")]
15839#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15840#[cfg_attr(test, assert_instr(fmaxnmp))]
15841pub fn vpmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
15842    unsafe extern "unadjusted" {
15843        #[cfg_attr(
15844            any(target_arch = "aarch64", target_arch = "arm64ec"),
15845            link_name = "llvm.aarch64.neon.fmaxnmp.v4f16"
15846        )]
15847        fn _vpmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
15848    }
15849    unsafe { _vpmaxnm_f16(a, b) }
15850}
15851#[doc = "Floating-point add pairwise"]
15852#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f16)"]
15853#[inline]
15854#[target_feature(enable = "neon,fp16")]
15855#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15856#[cfg_attr(test, assert_instr(fmaxnmp))]
15857pub fn vpmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15858    unsafe extern "unadjusted" {
15859        #[cfg_attr(
15860            any(target_arch = "aarch64", target_arch = "arm64ec"),
15861            link_name = "llvm.aarch64.neon.fmaxnmp.v8f16"
15862        )]
15863        fn _vpmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
15864    }
15865    unsafe { _vpmaxnmq_f16(a, b) }
15866}
15867#[doc = "Floating-point Maximum Number Pairwise (vector)."]
15868#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f32)"]
15869#[inline]
15870#[target_feature(enable = "neon")]
15871#[cfg_attr(test, assert_instr(fmaxnmp))]
15872#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15873pub fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
15874    unsafe extern "unadjusted" {
15875        #[cfg_attr(
15876            any(target_arch = "aarch64", target_arch = "arm64ec"),
15877            link_name = "llvm.aarch64.neon.fmaxnmp.v2f32"
15878        )]
15879        fn _vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
15880    }
15881    unsafe { _vpmaxnm_f32(a, b) }
15882}
15883#[doc = "Floating-point Maximum Number Pairwise (vector)."]
15884#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f32)"]
15885#[inline]
15886#[target_feature(enable = "neon")]
15887#[cfg_attr(test, assert_instr(fmaxnmp))]
15888#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15889pub fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15890    unsafe extern "unadjusted" {
15891        #[cfg_attr(
15892            any(target_arch = "aarch64", target_arch = "arm64ec"),
15893            link_name = "llvm.aarch64.neon.fmaxnmp.v4f32"
15894        )]
15895        fn _vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
15896    }
15897    unsafe { _vpmaxnmq_f32(a, b) }
15898}
15899#[doc = "Floating-point Maximum Number Pairwise (vector)."]
15900#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f64)"]
15901#[inline]
15902#[target_feature(enable = "neon")]
15903#[cfg_attr(test, assert_instr(fmaxnmp))]
15904#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15905pub fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15906    unsafe extern "unadjusted" {
15907        #[cfg_attr(
15908            any(target_arch = "aarch64", target_arch = "arm64ec"),
15909            link_name = "llvm.aarch64.neon.fmaxnmp.v2f64"
15910        )]
15911        fn _vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
15912    }
15913    unsafe { _vpmaxnmq_f64(a, b) }
15914}
15915#[doc = "Floating-point maximum number pairwise"]
15916#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmqd_f64)"]
15917#[inline]
15918#[target_feature(enable = "neon")]
15919#[cfg_attr(test, assert_instr(fmaxnmp))]
15920#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15921pub fn vpmaxnmqd_f64(a: float64x2_t) -> f64 {
15922    unsafe extern "unadjusted" {
15923        #[cfg_attr(
15924            any(target_arch = "aarch64", target_arch = "arm64ec"),
15925            link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64"
15926        )]
15927        fn _vpmaxnmqd_f64(a: float64x2_t) -> f64;
15928    }
15929    unsafe { _vpmaxnmqd_f64(a) }
15930}
15931#[doc = "Floating-point maximum number pairwise"]
15932#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnms_f32)"]
15933#[inline]
15934#[target_feature(enable = "neon")]
15935#[cfg_attr(test, assert_instr(fmaxnmp))]
15936#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15937pub fn vpmaxnms_f32(a: float32x2_t) -> f32 {
15938    unsafe extern "unadjusted" {
15939        #[cfg_attr(
15940            any(target_arch = "aarch64", target_arch = "arm64ec"),
15941            link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32"
15942        )]
15943        fn _vpmaxnms_f32(a: float32x2_t) -> f32;
15944    }
15945    unsafe { _vpmaxnms_f32(a) }
15946}
15947#[doc = "Folding maximum of adjacent pairs"]
15948#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f32)"]
15949#[inline]
15950#[target_feature(enable = "neon")]
15951#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15952#[cfg_attr(test, assert_instr(fmaxp))]
15953pub fn vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15954    unsafe extern "unadjusted" {
15955        #[cfg_attr(
15956            any(target_arch = "aarch64", target_arch = "arm64ec"),
15957            link_name = "llvm.aarch64.neon.fmaxp.v4f32"
15958        )]
15959        fn _vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
15960    }
15961    unsafe { _vpmaxq_f32(a, b) }
15962}
15963#[doc = "Folding maximum of adjacent pairs"]
15964#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f64)"]
15965#[inline]
15966#[target_feature(enable = "neon")]
15967#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15968#[cfg_attr(test, assert_instr(fmaxp))]
15969pub fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15970    unsafe extern "unadjusted" {
15971        #[cfg_attr(
15972            any(target_arch = "aarch64", target_arch = "arm64ec"),
15973            link_name = "llvm.aarch64.neon.fmaxp.v2f64"
15974        )]
15975        fn _vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
15976    }
15977    unsafe { _vpmaxq_f64(a, b) }
15978}
15979#[doc = "Folding maximum of adjacent pairs"]
15980#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s8)"]
15981#[inline]
15982#[target_feature(enable = "neon")]
15983#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15984#[cfg_attr(test, assert_instr(smaxp))]
15985pub fn vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
15986    unsafe extern "unadjusted" {
15987        #[cfg_attr(
15988            any(target_arch = "aarch64", target_arch = "arm64ec"),
15989            link_name = "llvm.aarch64.neon.smaxp.v16i8"
15990        )]
15991        fn _vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t;
15992    }
15993    unsafe { _vpmaxq_s8(a, b) }
15994}
15995#[doc = "Folding maximum of adjacent pairs"]
15996#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s16)"]
15997#[inline]
15998#[target_feature(enable = "neon")]
15999#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16000#[cfg_attr(test, assert_instr(smaxp))]
16001pub fn vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
16002    unsafe extern "unadjusted" {
16003        #[cfg_attr(
16004            any(target_arch = "aarch64", target_arch = "arm64ec"),
16005            link_name = "llvm.aarch64.neon.smaxp.v8i16"
16006        )]
16007        fn _vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t;
16008    }
16009    unsafe { _vpmaxq_s16(a, b) }
16010}
16011#[doc = "Folding maximum of adjacent pairs"]
16012#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s32)"]
16013#[inline]
16014#[target_feature(enable = "neon")]
16015#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16016#[cfg_attr(test, assert_instr(smaxp))]
16017pub fn vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
16018    unsafe extern "unadjusted" {
16019        #[cfg_attr(
16020            any(target_arch = "aarch64", target_arch = "arm64ec"),
16021            link_name = "llvm.aarch64.neon.smaxp.v4i32"
16022        )]
16023        fn _vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t;
16024    }
16025    unsafe { _vpmaxq_s32(a, b) }
16026}
16027#[doc = "Folding maximum of adjacent pairs"]
16028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u8)"]
16029#[inline]
16030#[target_feature(enable = "neon")]
16031#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16032#[cfg_attr(test, assert_instr(umaxp))]
16033pub fn vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16034    unsafe extern "unadjusted" {
16035        #[cfg_attr(
16036            any(target_arch = "aarch64", target_arch = "arm64ec"),
16037            link_name = "llvm.aarch64.neon.umaxp.v16i8"
16038        )]
16039        fn _vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t;
16040    }
16041    unsafe { _vpmaxq_u8(a, b) }
16042}
16043#[doc = "Folding maximum of adjacent pairs"]
16044#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u16)"]
16045#[inline]
16046#[target_feature(enable = "neon")]
16047#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16048#[cfg_attr(test, assert_instr(umaxp))]
16049pub fn vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16050    unsafe extern "unadjusted" {
16051        #[cfg_attr(
16052            any(target_arch = "aarch64", target_arch = "arm64ec"),
16053            link_name = "llvm.aarch64.neon.umaxp.v8i16"
16054        )]
16055        fn _vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t;
16056    }
16057    unsafe { _vpmaxq_u16(a, b) }
16058}
16059#[doc = "Folding maximum of adjacent pairs"]
16060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u32)"]
16061#[inline]
16062#[target_feature(enable = "neon")]
16063#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16064#[cfg_attr(test, assert_instr(umaxp))]
16065pub fn vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16066    unsafe extern "unadjusted" {
16067        #[cfg_attr(
16068            any(target_arch = "aarch64", target_arch = "arm64ec"),
16069            link_name = "llvm.aarch64.neon.umaxp.v4i32"
16070        )]
16071        fn _vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
16072    }
16073    unsafe { _vpmaxq_u32(a, b) }
16074}
16075#[doc = "Floating-point maximum pairwise"]
16076#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxqd_f64)"]
16077#[inline]
16078#[target_feature(enable = "neon")]
16079#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16080#[cfg_attr(test, assert_instr(fmaxp))]
16081pub fn vpmaxqd_f64(a: float64x2_t) -> f64 {
16082    unsafe extern "unadjusted" {
16083        #[cfg_attr(
16084            any(target_arch = "aarch64", target_arch = "arm64ec"),
16085            link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64"
16086        )]
16087        fn _vpmaxqd_f64(a: float64x2_t) -> f64;
16088    }
16089    unsafe { _vpmaxqd_f64(a) }
16090}
16091#[doc = "Floating-point maximum pairwise"]
16092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxs_f32)"]
16093#[inline]
16094#[target_feature(enable = "neon")]
16095#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16096#[cfg_attr(test, assert_instr(fmaxp))]
16097pub fn vpmaxs_f32(a: float32x2_t) -> f32 {
16098    unsafe extern "unadjusted" {
16099        #[cfg_attr(
16100            any(target_arch = "aarch64", target_arch = "arm64ec"),
16101            link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32"
16102        )]
16103        fn _vpmaxs_f32(a: float32x2_t) -> f32;
16104    }
16105    unsafe { _vpmaxs_f32(a) }
16106}
16107#[doc = "Floating-point add pairwise"]
16108#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_f16)"]
16109#[inline]
16110#[target_feature(enable = "neon,fp16")]
16111#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16112#[cfg_attr(test, assert_instr(fminp))]
16113pub fn vpmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16114    unsafe extern "unadjusted" {
16115        #[cfg_attr(
16116            any(target_arch = "aarch64", target_arch = "arm64ec"),
16117            link_name = "llvm.aarch64.neon.fminp.v4f16"
16118        )]
16119        fn _vpmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16120    }
16121    unsafe { _vpmin_f16(a, b) }
16122}
16123#[doc = "Floating-point add pairwise"]
16124#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f16)"]
16125#[inline]
16126#[target_feature(enable = "neon,fp16")]
16127#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16128#[cfg_attr(test, assert_instr(fminp))]
16129pub fn vpminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16130    unsafe extern "unadjusted" {
16131        #[cfg_attr(
16132            any(target_arch = "aarch64", target_arch = "arm64ec"),
16133            link_name = "llvm.aarch64.neon.fminp.v8f16"
16134        )]
16135        fn _vpminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16136    }
16137    unsafe { _vpminq_f16(a, b) }
16138}
16139#[doc = "Floating-point add pairwise"]
16140#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f16)"]
16141#[inline]
16142#[target_feature(enable = "neon,fp16")]
16143#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16144#[cfg_attr(test, assert_instr(fminnmp))]
16145pub fn vpminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16146    unsafe extern "unadjusted" {
16147        #[cfg_attr(
16148            any(target_arch = "aarch64", target_arch = "arm64ec"),
16149            link_name = "llvm.aarch64.neon.fminnmp.v4f16"
16150        )]
16151        fn _vpminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16152    }
16153    unsafe { _vpminnm_f16(a, b) }
16154}
16155#[doc = "Floating-point add pairwise"]
16156#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f16)"]
16157#[inline]
16158#[target_feature(enable = "neon,fp16")]
16159#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16160#[cfg_attr(test, assert_instr(fminnmp))]
16161pub fn vpminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16162    unsafe extern "unadjusted" {
16163        #[cfg_attr(
16164            any(target_arch = "aarch64", target_arch = "arm64ec"),
16165            link_name = "llvm.aarch64.neon.fminnmp.v8f16"
16166        )]
16167        fn _vpminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16168    }
16169    unsafe { _vpminnmq_f16(a, b) }
16170}
16171#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16172#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f32)"]
16173#[inline]
16174#[target_feature(enable = "neon")]
16175#[cfg_attr(test, assert_instr(fminnmp))]
16176#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16177pub fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
16178    unsafe extern "unadjusted" {
16179        #[cfg_attr(
16180            any(target_arch = "aarch64", target_arch = "arm64ec"),
16181            link_name = "llvm.aarch64.neon.fminnmp.v2f32"
16182        )]
16183        fn _vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
16184    }
16185    unsafe { _vpminnm_f32(a, b) }
16186}
16187#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16188#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f32)"]
16189#[inline]
16190#[target_feature(enable = "neon")]
16191#[cfg_attr(test, assert_instr(fminnmp))]
16192#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16193pub fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16194    unsafe extern "unadjusted" {
16195        #[cfg_attr(
16196            any(target_arch = "aarch64", target_arch = "arm64ec"),
16197            link_name = "llvm.aarch64.neon.fminnmp.v4f32"
16198        )]
16199        fn _vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16200    }
16201    unsafe { _vpminnmq_f32(a, b) }
16202}
16203#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16204#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f64)"]
16205#[inline]
16206#[target_feature(enable = "neon")]
16207#[cfg_attr(test, assert_instr(fminnmp))]
16208#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16209pub fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16210    unsafe extern "unadjusted" {
16211        #[cfg_attr(
16212            any(target_arch = "aarch64", target_arch = "arm64ec"),
16213            link_name = "llvm.aarch64.neon.fminnmp.v2f64"
16214        )]
16215        fn _vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16216    }
16217    unsafe { _vpminnmq_f64(a, b) }
16218}
16219#[doc = "Floating-point minimum number pairwise"]
16220#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmqd_f64)"]
16221#[inline]
16222#[target_feature(enable = "neon")]
16223#[cfg_attr(test, assert_instr(fminnmp))]
16224#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16225pub fn vpminnmqd_f64(a: float64x2_t) -> f64 {
16226    unsafe extern "unadjusted" {
16227        #[cfg_attr(
16228            any(target_arch = "aarch64", target_arch = "arm64ec"),
16229            link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64"
16230        )]
16231        fn _vpminnmqd_f64(a: float64x2_t) -> f64;
16232    }
16233    unsafe { _vpminnmqd_f64(a) }
16234}
16235#[doc = "Floating-point minimum number pairwise"]
16236#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnms_f32)"]
16237#[inline]
16238#[target_feature(enable = "neon")]
16239#[cfg_attr(test, assert_instr(fminnmp))]
16240#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16241pub fn vpminnms_f32(a: float32x2_t) -> f32 {
16242    unsafe extern "unadjusted" {
16243        #[cfg_attr(
16244            any(target_arch = "aarch64", target_arch = "arm64ec"),
16245            link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32"
16246        )]
16247        fn _vpminnms_f32(a: float32x2_t) -> f32;
16248    }
16249    unsafe { _vpminnms_f32(a) }
16250}
16251#[doc = "Folding minimum of adjacent pairs"]
16252#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f32)"]
16253#[inline]
16254#[target_feature(enable = "neon")]
16255#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16256#[cfg_attr(test, assert_instr(fminp))]
16257pub fn vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16258    unsafe extern "unadjusted" {
16259        #[cfg_attr(
16260            any(target_arch = "aarch64", target_arch = "arm64ec"),
16261            link_name = "llvm.aarch64.neon.fminp.v4f32"
16262        )]
16263        fn _vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16264    }
16265    unsafe { _vpminq_f32(a, b) }
16266}
16267#[doc = "Folding minimum of adjacent pairs"]
16268#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f64)"]
16269#[inline]
16270#[target_feature(enable = "neon")]
16271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16272#[cfg_attr(test, assert_instr(fminp))]
16273pub fn vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16274    unsafe extern "unadjusted" {
16275        #[cfg_attr(
16276            any(target_arch = "aarch64", target_arch = "arm64ec"),
16277            link_name = "llvm.aarch64.neon.fminp.v2f64"
16278        )]
16279        fn _vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16280    }
16281    unsafe { _vpminq_f64(a, b) }
16282}
16283#[doc = "Folding minimum of adjacent pairs"]
16284#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s8)"]
16285#[inline]
16286#[target_feature(enable = "neon")]
16287#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16288#[cfg_attr(test, assert_instr(sminp))]
16289pub fn vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
16290    unsafe extern "unadjusted" {
16291        #[cfg_attr(
16292            any(target_arch = "aarch64", target_arch = "arm64ec"),
16293            link_name = "llvm.aarch64.neon.sminp.v16i8"
16294        )]
16295        fn _vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t;
16296    }
16297    unsafe { _vpminq_s8(a, b) }
16298}
16299#[doc = "Folding minimum of adjacent pairs"]
16300#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s16)"]
16301#[inline]
16302#[target_feature(enable = "neon")]
16303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16304#[cfg_attr(test, assert_instr(sminp))]
16305pub fn vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
16306    unsafe extern "unadjusted" {
16307        #[cfg_attr(
16308            any(target_arch = "aarch64", target_arch = "arm64ec"),
16309            link_name = "llvm.aarch64.neon.sminp.v8i16"
16310        )]
16311        fn _vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t;
16312    }
16313    unsafe { _vpminq_s16(a, b) }
16314}
16315#[doc = "Folding minimum of adjacent pairs"]
16316#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s32)"]
16317#[inline]
16318#[target_feature(enable = "neon")]
16319#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16320#[cfg_attr(test, assert_instr(sminp))]
16321pub fn vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
16322    unsafe extern "unadjusted" {
16323        #[cfg_attr(
16324            any(target_arch = "aarch64", target_arch = "arm64ec"),
16325            link_name = "llvm.aarch64.neon.sminp.v4i32"
16326        )]
16327        fn _vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t;
16328    }
16329    unsafe { _vpminq_s32(a, b) }
16330}
16331#[doc = "Folding minimum of adjacent pairs"]
16332#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u8)"]
16333#[inline]
16334#[target_feature(enable = "neon")]
16335#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16336#[cfg_attr(test, assert_instr(uminp))]
16337pub fn vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16338    unsafe extern "unadjusted" {
16339        #[cfg_attr(
16340            any(target_arch = "aarch64", target_arch = "arm64ec"),
16341            link_name = "llvm.aarch64.neon.uminp.v16i8"
16342        )]
16343        fn _vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t;
16344    }
16345    unsafe { _vpminq_u8(a, b) }
16346}
16347#[doc = "Folding minimum of adjacent pairs"]
16348#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u16)"]
16349#[inline]
16350#[target_feature(enable = "neon")]
16351#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16352#[cfg_attr(test, assert_instr(uminp))]
16353pub fn vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16354    unsafe extern "unadjusted" {
16355        #[cfg_attr(
16356            any(target_arch = "aarch64", target_arch = "arm64ec"),
16357            link_name = "llvm.aarch64.neon.uminp.v8i16"
16358        )]
16359        fn _vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t;
16360    }
16361    unsafe { _vpminq_u16(a, b) }
16362}
16363#[doc = "Folding minimum of adjacent pairs"]
16364#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u32)"]
16365#[inline]
16366#[target_feature(enable = "neon")]
16367#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16368#[cfg_attr(test, assert_instr(uminp))]
16369pub fn vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16370    unsafe extern "unadjusted" {
16371        #[cfg_attr(
16372            any(target_arch = "aarch64", target_arch = "arm64ec"),
16373            link_name = "llvm.aarch64.neon.uminp.v4i32"
16374        )]
16375        fn _vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
16376    }
16377    unsafe { _vpminq_u32(a, b) }
16378}
16379#[doc = "Floating-point minimum pairwise"]
16380#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminqd_f64)"]
16381#[inline]
16382#[target_feature(enable = "neon")]
16383#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16384#[cfg_attr(test, assert_instr(fminp))]
16385pub fn vpminqd_f64(a: float64x2_t) -> f64 {
16386    unsafe extern "unadjusted" {
16387        #[cfg_attr(
16388            any(target_arch = "aarch64", target_arch = "arm64ec"),
16389            link_name = "llvm.aarch64.neon.fminv.f64.v2f64"
16390        )]
16391        fn _vpminqd_f64(a: float64x2_t) -> f64;
16392    }
16393    unsafe { _vpminqd_f64(a) }
16394}
16395#[doc = "Floating-point minimum pairwise"]
16396#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmins_f32)"]
16397#[inline]
16398#[target_feature(enable = "neon")]
16399#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16400#[cfg_attr(test, assert_instr(fminp))]
16401pub fn vpmins_f32(a: float32x2_t) -> f32 {
16402    unsafe extern "unadjusted" {
16403        #[cfg_attr(
16404            any(target_arch = "aarch64", target_arch = "arm64ec"),
16405            link_name = "llvm.aarch64.neon.fminv.f32.v2f32"
16406        )]
16407        fn _vpmins_f32(a: float32x2_t) -> f32;
16408    }
16409    unsafe { _vpmins_f32(a) }
16410}
16411#[doc = "Signed saturating Absolute value"]
16412#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s64)"]
16413#[inline]
16414#[target_feature(enable = "neon")]
16415#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16416#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16417pub fn vqabs_s64(a: int64x1_t) -> int64x1_t {
16418    unsafe extern "unadjusted" {
16419        #[cfg_attr(
16420            any(target_arch = "aarch64", target_arch = "arm64ec"),
16421            link_name = "llvm.aarch64.neon.sqabs.v1i64"
16422        )]
16423        fn _vqabs_s64(a: int64x1_t) -> int64x1_t;
16424    }
16425    unsafe { _vqabs_s64(a) }
16426}
16427#[doc = "Signed saturating Absolute value"]
16428#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s64)"]
16429#[inline]
16430#[target_feature(enable = "neon")]
16431#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16432#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16433pub fn vqabsq_s64(a: int64x2_t) -> int64x2_t {
16434    unsafe extern "unadjusted" {
16435        #[cfg_attr(
16436            any(target_arch = "aarch64", target_arch = "arm64ec"),
16437            link_name = "llvm.aarch64.neon.sqabs.v2i64"
16438        )]
16439        fn _vqabsq_s64(a: int64x2_t) -> int64x2_t;
16440    }
16441    unsafe { _vqabsq_s64(a) }
16442}
16443#[doc = "Signed saturating absolute value"]
16444#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsb_s8)"]
16445#[inline]
16446#[target_feature(enable = "neon")]
16447#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16448#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16449pub fn vqabsb_s8(a: i8) -> i8 {
16450    unsafe { simd_extract!(vqabs_s8(vdup_n_s8(a)), 0) }
16451}
16452#[doc = "Signed saturating absolute value"]
16453#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsh_s16)"]
16454#[inline]
16455#[target_feature(enable = "neon")]
16456#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16457#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16458pub fn vqabsh_s16(a: i16) -> i16 {
16459    unsafe { simd_extract!(vqabs_s16(vdup_n_s16(a)), 0) }
16460}
16461#[doc = "Signed saturating absolute value"]
16462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabss_s32)"]
16463#[inline]
16464#[target_feature(enable = "neon")]
16465#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16466#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16467pub fn vqabss_s32(a: i32) -> i32 {
16468    unsafe extern "unadjusted" {
16469        #[cfg_attr(
16470            any(target_arch = "aarch64", target_arch = "arm64ec"),
16471            link_name = "llvm.aarch64.neon.sqabs.i32"
16472        )]
16473        fn _vqabss_s32(a: i32) -> i32;
16474    }
16475    unsafe { _vqabss_s32(a) }
16476}
16477#[doc = "Signed saturating absolute value"]
16478#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsd_s64)"]
16479#[inline]
16480#[target_feature(enable = "neon")]
16481#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16482#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16483pub fn vqabsd_s64(a: i64) -> i64 {
16484    unsafe extern "unadjusted" {
16485        #[cfg_attr(
16486            any(target_arch = "aarch64", target_arch = "arm64ec"),
16487            link_name = "llvm.aarch64.neon.sqabs.i64"
16488        )]
16489        fn _vqabsd_s64(a: i64) -> i64;
16490    }
16491    unsafe { _vqabsd_s64(a) }
16492}
16493#[doc = "Saturating add"]
16494#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_s8)"]
16495#[inline]
16496#[target_feature(enable = "neon")]
16497#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16498#[cfg_attr(test, assert_instr(sqadd))]
16499pub fn vqaddb_s8(a: i8, b: i8) -> i8 {
16500    let a: int8x8_t = vdup_n_s8(a);
16501    let b: int8x8_t = vdup_n_s8(b);
16502    unsafe { simd_extract!(vqadd_s8(a, b), 0) }
16503}
16504#[doc = "Saturating add"]
16505#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_s16)"]
16506#[inline]
16507#[target_feature(enable = "neon")]
16508#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16509#[cfg_attr(test, assert_instr(sqadd))]
16510pub fn vqaddh_s16(a: i16, b: i16) -> i16 {
16511    let a: int16x4_t = vdup_n_s16(a);
16512    let b: int16x4_t = vdup_n_s16(b);
16513    unsafe { simd_extract!(vqadd_s16(a, b), 0) }
16514}
16515#[doc = "Saturating add"]
16516#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_u8)"]
16517#[inline]
16518#[target_feature(enable = "neon")]
16519#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16520#[cfg_attr(test, assert_instr(uqadd))]
16521pub fn vqaddb_u8(a: u8, b: u8) -> u8 {
16522    let a: uint8x8_t = vdup_n_u8(a);
16523    let b: uint8x8_t = vdup_n_u8(b);
16524    unsafe { simd_extract!(vqadd_u8(a, b), 0) }
16525}
16526#[doc = "Saturating add"]
16527#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_u16)"]
16528#[inline]
16529#[target_feature(enable = "neon")]
16530#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16531#[cfg_attr(test, assert_instr(uqadd))]
16532pub fn vqaddh_u16(a: u16, b: u16) -> u16 {
16533    let a: uint16x4_t = vdup_n_u16(a);
16534    let b: uint16x4_t = vdup_n_u16(b);
16535    unsafe { simd_extract!(vqadd_u16(a, b), 0) }
16536}
16537#[doc = "Saturating add"]
16538#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_s32)"]
16539#[inline]
16540#[target_feature(enable = "neon")]
16541#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16542#[cfg_attr(test, assert_instr(sqadd))]
16543pub fn vqadds_s32(a: i32, b: i32) -> i32 {
16544    unsafe extern "unadjusted" {
16545        #[cfg_attr(
16546            any(target_arch = "aarch64", target_arch = "arm64ec"),
16547            link_name = "llvm.aarch64.neon.sqadd.i32"
16548        )]
16549        fn _vqadds_s32(a: i32, b: i32) -> i32;
16550    }
16551    unsafe { _vqadds_s32(a, b) }
16552}
16553#[doc = "Saturating add"]
16554#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_s64)"]
16555#[inline]
16556#[target_feature(enable = "neon")]
16557#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16558#[cfg_attr(test, assert_instr(sqadd))]
16559pub fn vqaddd_s64(a: i64, b: i64) -> i64 {
16560    unsafe extern "unadjusted" {
16561        #[cfg_attr(
16562            any(target_arch = "aarch64", target_arch = "arm64ec"),
16563            link_name = "llvm.aarch64.neon.sqadd.i64"
16564        )]
16565        fn _vqaddd_s64(a: i64, b: i64) -> i64;
16566    }
16567    unsafe { _vqaddd_s64(a, b) }
16568}
16569#[doc = "Saturating add"]
16570#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_u32)"]
16571#[inline]
16572#[target_feature(enable = "neon")]
16573#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16574#[cfg_attr(test, assert_instr(uqadd))]
16575pub fn vqadds_u32(a: u32, b: u32) -> u32 {
16576    unsafe extern "unadjusted" {
16577        #[cfg_attr(
16578            any(target_arch = "aarch64", target_arch = "arm64ec"),
16579            link_name = "llvm.aarch64.neon.uqadd.i32"
16580        )]
16581        fn _vqadds_u32(a: u32, b: u32) -> u32;
16582    }
16583    unsafe { _vqadds_u32(a, b) }
16584}
16585#[doc = "Saturating add"]
16586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_u64)"]
16587#[inline]
16588#[target_feature(enable = "neon")]
16589#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16590#[cfg_attr(test, assert_instr(uqadd))]
16591pub fn vqaddd_u64(a: u64, b: u64) -> u64 {
16592    unsafe extern "unadjusted" {
16593        #[cfg_attr(
16594            any(target_arch = "aarch64", target_arch = "arm64ec"),
16595            link_name = "llvm.aarch64.neon.uqadd.i64"
16596        )]
16597        fn _vqaddd_u64(a: u64, b: u64) -> u64;
16598    }
16599    unsafe { _vqaddd_u64(a, b) }
16600}
16601#[doc = "Signed saturating doubling multiply-add long"]
16602#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s16)"]
16603#[inline]
16604#[target_feature(enable = "neon")]
16605#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
16606#[rustc_legacy_const_generics(3)]
16607#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16608pub fn vqdmlal_high_lane_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
16609    static_assert_uimm_bits!(N, 2);
16610    vqaddq_s32(a, vqdmull_high_lane_s16::<N>(b, c))
16611}
16612#[doc = "Signed saturating doubling multiply-add long"]
16613#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s16)"]
16614#[inline]
16615#[target_feature(enable = "neon")]
16616#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
16617#[rustc_legacy_const_generics(3)]
16618#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16619pub fn vqdmlal_high_laneq_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
16620    static_assert_uimm_bits!(N, 3);
16621    vqaddq_s32(a, vqdmull_high_laneq_s16::<N>(b, c))
16622}
16623#[doc = "Signed saturating doubling multiply-add long"]
16624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s32)"]
16625#[inline]
16626#[target_feature(enable = "neon")]
16627#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
16628#[rustc_legacy_const_generics(3)]
16629#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16630pub fn vqdmlal_high_lane_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
16631    static_assert_uimm_bits!(N, 1);
16632    vqaddq_s64(a, vqdmull_high_lane_s32::<N>(b, c))
16633}
16634#[doc = "Signed saturating doubling multiply-add long"]
16635#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s32)"]
16636#[inline]
16637#[target_feature(enable = "neon")]
16638#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
16639#[rustc_legacy_const_generics(3)]
16640#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16641pub fn vqdmlal_high_laneq_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
16642    static_assert_uimm_bits!(N, 2);
16643    vqaddq_s64(a, vqdmull_high_laneq_s32::<N>(b, c))
16644}
16645#[doc = "Signed saturating doubling multiply-add long"]
16646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s16)"]
16647#[inline]
16648#[target_feature(enable = "neon")]
16649#[cfg_attr(test, assert_instr(sqdmlal2))]
16650#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16651pub fn vqdmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
16652    vqaddq_s32(a, vqdmull_high_n_s16(b, c))
16653}
16654#[doc = "Signed saturating doubling multiply-add long"]
16655#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s16)"]
16656#[inline]
16657#[target_feature(enable = "neon")]
16658#[cfg_attr(test, assert_instr(sqdmlal2))]
16659#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16660pub fn vqdmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
16661    vqaddq_s32(a, vqdmull_high_s16(b, c))
16662}
16663#[doc = "Signed saturating doubling multiply-add long"]
16664#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s32)"]
16665#[inline]
16666#[target_feature(enable = "neon")]
16667#[cfg_attr(test, assert_instr(sqdmlal2))]
16668#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16669pub fn vqdmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
16670    vqaddq_s64(a, vqdmull_high_n_s32(b, c))
16671}
16672#[doc = "Signed saturating doubling multiply-add long"]
16673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s32)"]
16674#[inline]
16675#[target_feature(enable = "neon")]
16676#[cfg_attr(test, assert_instr(sqdmlal2))]
16677#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16678pub fn vqdmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
16679    vqaddq_s64(a, vqdmull_high_s32(b, c))
16680}
16681#[doc = "Vector widening saturating doubling multiply accumulate with scalar"]
16682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s16)"]
16683#[inline]
16684#[target_feature(enable = "neon")]
16685#[cfg_attr(test, assert_instr(sqdmlal, N = 2))]
16686#[rustc_legacy_const_generics(3)]
16687#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16688pub fn vqdmlal_laneq_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t {
16689    static_assert_uimm_bits!(N, 3);
16690    vqaddq_s32(a, vqdmull_laneq_s16::<N>(b, c))
16691}
16692#[doc = "Vector widening saturating doubling multiply accumulate with scalar"]
16693#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s32)"]
16694#[inline]
16695#[target_feature(enable = "neon")]
16696#[cfg_attr(test, assert_instr(sqdmlal, N = 1))]
16697#[rustc_legacy_const_generics(3)]
16698#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16699pub fn vqdmlal_laneq_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t {
16700    static_assert_uimm_bits!(N, 2);
16701    vqaddq_s64(a, vqdmull_laneq_s32::<N>(b, c))
16702}
16703#[doc = "Signed saturating doubling multiply-add long"]
16704#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_lane_s16)"]
16705#[inline]
16706#[target_feature(enable = "neon")]
16707#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
16708#[rustc_legacy_const_generics(3)]
16709#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16710pub fn vqdmlalh_lane_s16<const LANE: i32>(a: i32, b: i16, c: int16x4_t) -> i32 {
16711    static_assert_uimm_bits!(LANE, 2);
16712    unsafe { vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) }
16713}
16714#[doc = "Signed saturating doubling multiply-add long"]
16715#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_laneq_s16)"]
16716#[inline]
16717#[target_feature(enable = "neon")]
16718#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
16719#[rustc_legacy_const_generics(3)]
16720#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16721pub fn vqdmlalh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t) -> i32 {
16722    static_assert_uimm_bits!(LANE, 3);
16723    unsafe { vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) }
16724}
16725#[doc = "Signed saturating doubling multiply-add long"]
16726#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_lane_s32)"]
16727#[inline]
16728#[target_feature(enable = "neon")]
16729#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
16730#[rustc_legacy_const_generics(3)]
16731#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16732pub fn vqdmlals_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -> i64 {
16733    static_assert_uimm_bits!(LANE, 1);
16734    unsafe { vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) }
16735}
16736#[doc = "Signed saturating doubling multiply-add long"]
16737#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_laneq_s32)"]
16738#[inline]
16739#[target_feature(enable = "neon")]
16740#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
16741#[rustc_legacy_const_generics(3)]
16742#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16743pub fn vqdmlals_laneq_s32<const LANE: i32>(a: i64, b: i32, c: int32x4_t) -> i64 {
16744    static_assert_uimm_bits!(LANE, 2);
16745    unsafe { vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) }
16746}
16747#[doc = "Signed saturating doubling multiply-add long"]
16748#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_s16)"]
16749#[inline]
16750#[target_feature(enable = "neon")]
16751#[cfg_attr(test, assert_instr(sqdmlal))]
16752#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16753pub fn vqdmlalh_s16(a: i32, b: i16, c: i16) -> i32 {
16754    let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c));
16755    unsafe { vqadds_s32(a, simd_extract!(x, 0)) }
16756}
16757#[doc = "Signed saturating doubling multiply-add long"]
16758#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_s32)"]
16759#[inline]
16760#[target_feature(enable = "neon")]
16761#[cfg_attr(test, assert_instr(sqdmlal))]
16762#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16763pub fn vqdmlals_s32(a: i64, b: i32, c: i32) -> i64 {
16764    let x: i64 = vqaddd_s64(a, vqdmulls_s32(b, c));
16765    x
16766}
16767#[doc = "Signed saturating doubling multiply-subtract long"]
16768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s16)"]
16769#[inline]
16770#[target_feature(enable = "neon")]
16771#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
16772#[rustc_legacy_const_generics(3)]
16773#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16774pub fn vqdmlsl_high_lane_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
16775    static_assert_uimm_bits!(N, 2);
16776    vqsubq_s32(a, vqdmull_high_lane_s16::<N>(b, c))
16777}
16778#[doc = "Signed saturating doubling multiply-subtract long"]
16779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s16)"]
16780#[inline]
16781#[target_feature(enable = "neon")]
16782#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
16783#[rustc_legacy_const_generics(3)]
16784#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16785pub fn vqdmlsl_high_laneq_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
16786    static_assert_uimm_bits!(N, 3);
16787    vqsubq_s32(a, vqdmull_high_laneq_s16::<N>(b, c))
16788}
16789#[doc = "Signed saturating doubling multiply-subtract long"]
16790#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s32)"]
16791#[inline]
16792#[target_feature(enable = "neon")]
16793#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
16794#[rustc_legacy_const_generics(3)]
16795#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16796pub fn vqdmlsl_high_lane_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
16797    static_assert_uimm_bits!(N, 1);
16798    vqsubq_s64(a, vqdmull_high_lane_s32::<N>(b, c))
16799}
16800#[doc = "Signed saturating doubling multiply-subtract long"]
16801#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s32)"]
16802#[inline]
16803#[target_feature(enable = "neon")]
16804#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
16805#[rustc_legacy_const_generics(3)]
16806#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16807pub fn vqdmlsl_high_laneq_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
16808    static_assert_uimm_bits!(N, 2);
16809    vqsubq_s64(a, vqdmull_high_laneq_s32::<N>(b, c))
16810}
16811#[doc = "Signed saturating doubling multiply-subtract long"]
16812#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s16)"]
16813#[inline]
16814#[target_feature(enable = "neon")]
16815#[cfg_attr(test, assert_instr(sqdmlsl2))]
16816#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16817pub fn vqdmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
16818    vqsubq_s32(a, vqdmull_high_n_s16(b, c))
16819}
16820#[doc = "Signed saturating doubling multiply-subtract long"]
16821#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s16)"]
16822#[inline]
16823#[target_feature(enable = "neon")]
16824#[cfg_attr(test, assert_instr(sqdmlsl2))]
16825#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16826pub fn vqdmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
16827    vqsubq_s32(a, vqdmull_high_s16(b, c))
16828}
16829#[doc = "Signed saturating doubling multiply-subtract long"]
16830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s32)"]
16831#[inline]
16832#[target_feature(enable = "neon")]
16833#[cfg_attr(test, assert_instr(sqdmlsl2))]
16834#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16835pub fn vqdmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
16836    vqsubq_s64(a, vqdmull_high_n_s32(b, c))
16837}
16838#[doc = "Signed saturating doubling multiply-subtract long"]
16839#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s32)"]
16840#[inline]
16841#[target_feature(enable = "neon")]
16842#[cfg_attr(test, assert_instr(sqdmlsl2))]
16843#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16844pub fn vqdmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
16845    vqsubq_s64(a, vqdmull_high_s32(b, c))
16846}
16847#[doc = "Vector widening saturating doubling multiply subtract with scalar"]
16848#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s16)"]
16849#[inline]
16850#[target_feature(enable = "neon")]
16851#[cfg_attr(test, assert_instr(sqdmlsl, N = 2))]
16852#[rustc_legacy_const_generics(3)]
16853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16854pub fn vqdmlsl_laneq_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t {
16855    static_assert_uimm_bits!(N, 3);
16856    vqsubq_s32(a, vqdmull_laneq_s16::<N>(b, c))
16857}
16858#[doc = "Vector widening saturating doubling multiply subtract with scalar"]
16859#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s32)"]
16860#[inline]
16861#[target_feature(enable = "neon")]
16862#[cfg_attr(test, assert_instr(sqdmlsl, N = 1))]
16863#[rustc_legacy_const_generics(3)]
16864#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16865pub fn vqdmlsl_laneq_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t {
16866    static_assert_uimm_bits!(N, 2);
16867    vqsubq_s64(a, vqdmull_laneq_s32::<N>(b, c))
16868}
16869#[doc = "Signed saturating doubling multiply-subtract long"]
16870#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_lane_s16)"]
16871#[inline]
16872#[target_feature(enable = "neon")]
16873#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
16874#[rustc_legacy_const_generics(3)]
16875#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16876pub fn vqdmlslh_lane_s16<const LANE: i32>(a: i32, b: i16, c: int16x4_t) -> i32 {
16877    static_assert_uimm_bits!(LANE, 2);
16878    unsafe { vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) }
16879}
16880#[doc = "Signed saturating doubling multiply-subtract long"]
16881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_laneq_s16)"]
16882#[inline]
16883#[target_feature(enable = "neon")]
16884#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
16885#[rustc_legacy_const_generics(3)]
16886#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16887pub fn vqdmlslh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t) -> i32 {
16888    static_assert_uimm_bits!(LANE, 3);
16889    unsafe { vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) }
16890}
16891#[doc = "Signed saturating doubling multiply-subtract long"]
16892#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_lane_s32)"]
16893#[inline]
16894#[target_feature(enable = "neon")]
16895#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
16896#[rustc_legacy_const_generics(3)]
16897#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16898pub fn vqdmlsls_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -> i64 {
16899    static_assert_uimm_bits!(LANE, 1);
16900    unsafe { vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) }
16901}
16902#[doc = "Signed saturating doubling multiply-subtract long"]
16903#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_laneq_s32)"]
16904#[inline]
16905#[target_feature(enable = "neon")]
16906#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
16907#[rustc_legacy_const_generics(3)]
16908#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16909pub fn vqdmlsls_laneq_s32<const LANE: i32>(a: i64, b: i32, c: int32x4_t) -> i64 {
16910    static_assert_uimm_bits!(LANE, 2);
16911    unsafe { vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) }
16912}
16913#[doc = "Signed saturating doubling multiply-subtract long"]
16914#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_s16)"]
16915#[inline]
16916#[target_feature(enable = "neon")]
16917#[cfg_attr(test, assert_instr(sqdmlsl))]
16918#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16919pub fn vqdmlslh_s16(a: i32, b: i16, c: i16) -> i32 {
16920    let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c));
16921    unsafe { vqsubs_s32(a, simd_extract!(x, 0)) }
16922}
16923#[doc = "Signed saturating doubling multiply-subtract long"]
16924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_s32)"]
16925#[inline]
16926#[target_feature(enable = "neon")]
16927#[cfg_attr(test, assert_instr(sqdmlsl))]
16928#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16929pub fn vqdmlsls_s32(a: i64, b: i32, c: i32) -> i64 {
16930    let x: i64 = vqsubd_s64(a, vqdmulls_s32(b, c));
16931    x
16932}
16933#[doc = "Vector saturating doubling multiply high by scalar"]
16934#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s16)"]
16935#[inline]
16936#[target_feature(enable = "neon")]
16937#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
16938#[rustc_legacy_const_generics(2)]
16939#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16940pub fn vqdmulh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
16941    static_assert_uimm_bits!(LANE, 2);
16942    unsafe { vqdmulh_s16(a, vdup_n_s16(simd_extract!(b, LANE as u32))) }
16943}
16944#[doc = "Vector saturating doubling multiply high by scalar"]
16945#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s16)"]
16946#[inline]
16947#[target_feature(enable = "neon")]
16948#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
16949#[rustc_legacy_const_generics(2)]
16950#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16951pub fn vqdmulhq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int16x8_t {
16952    static_assert_uimm_bits!(LANE, 2);
16953    unsafe { vqdmulhq_s16(a, vdupq_n_s16(simd_extract!(b, LANE as u32))) }
16954}
16955#[doc = "Vector saturating doubling multiply high by scalar"]
16956#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s32)"]
16957#[inline]
16958#[target_feature(enable = "neon")]
16959#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
16960#[rustc_legacy_const_generics(2)]
16961#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16962pub fn vqdmulh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
16963    static_assert_uimm_bits!(LANE, 1);
16964    unsafe { vqdmulh_s32(a, vdup_n_s32(simd_extract!(b, LANE as u32))) }
16965}
16966#[doc = "Vector saturating doubling multiply high by scalar"]
16967#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s32)"]
16968#[inline]
16969#[target_feature(enable = "neon")]
16970#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
16971#[rustc_legacy_const_generics(2)]
16972#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16973pub fn vqdmulhq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int32x4_t {
16974    static_assert_uimm_bits!(LANE, 1);
16975    unsafe { vqdmulhq_s32(a, vdupq_n_s32(simd_extract!(b, LANE as u32))) }
16976}
16977#[doc = "Signed saturating doubling multiply returning high half"]
16978#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_lane_s16)"]
16979#[inline]
16980#[target_feature(enable = "neon")]
16981#[cfg_attr(test, assert_instr(sqdmulh, N = 2))]
16982#[rustc_legacy_const_generics(2)]
16983#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16984pub fn vqdmulhh_lane_s16<const N: i32>(a: i16, b: int16x4_t) -> i16 {
16985    static_assert_uimm_bits!(N, 2);
16986    unsafe {
16987        let b: i16 = simd_extract!(b, N as u32);
16988        vqdmulhh_s16(a, b)
16989    }
16990}
16991#[doc = "Signed saturating doubling multiply returning high half"]
16992#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_laneq_s16)"]
16993#[inline]
16994#[target_feature(enable = "neon")]
16995#[cfg_attr(test, assert_instr(sqdmulh, N = 2))]
16996#[rustc_legacy_const_generics(2)]
16997#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16998pub fn vqdmulhh_laneq_s16<const N: i32>(a: i16, b: int16x8_t) -> i16 {
16999    static_assert_uimm_bits!(N, 3);
17000    unsafe {
17001        let b: i16 = simd_extract!(b, N as u32);
17002        vqdmulhh_s16(a, b)
17003    }
17004}
17005#[doc = "Signed saturating doubling multiply returning high half"]
17006#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_s16)"]
17007#[inline]
17008#[target_feature(enable = "neon")]
17009#[cfg_attr(test, assert_instr(sqdmulh))]
17010#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17011pub fn vqdmulhh_s16(a: i16, b: i16) -> i16 {
17012    let a: int16x4_t = vdup_n_s16(a);
17013    let b: int16x4_t = vdup_n_s16(b);
17014    unsafe { simd_extract!(vqdmulh_s16(a, b), 0) }
17015}
17016#[doc = "Signed saturating doubling multiply returning high half"]
17017#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_s32)"]
17018#[inline]
17019#[target_feature(enable = "neon")]
17020#[cfg_attr(test, assert_instr(sqdmulh))]
17021#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17022pub fn vqdmulhs_s32(a: i32, b: i32) -> i32 {
17023    let a: int32x2_t = vdup_n_s32(a);
17024    let b: int32x2_t = vdup_n_s32(b);
17025    unsafe { simd_extract!(vqdmulh_s32(a, b), 0) }
17026}
17027#[doc = "Signed saturating doubling multiply returning high half"]
17028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_lane_s32)"]
17029#[inline]
17030#[target_feature(enable = "neon")]
17031#[cfg_attr(test, assert_instr(sqdmulh, N = 1))]
17032#[rustc_legacy_const_generics(2)]
17033#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17034pub fn vqdmulhs_lane_s32<const N: i32>(a: i32, b: int32x2_t) -> i32 {
17035    static_assert_uimm_bits!(N, 1);
17036    unsafe {
17037        let b: i32 = simd_extract!(b, N as u32);
17038        vqdmulhs_s32(a, b)
17039    }
17040}
17041#[doc = "Signed saturating doubling multiply returning high half"]
17042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_laneq_s32)"]
17043#[inline]
17044#[target_feature(enable = "neon")]
17045#[cfg_attr(test, assert_instr(sqdmulh, N = 1))]
17046#[rustc_legacy_const_generics(2)]
17047#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17048pub fn vqdmulhs_laneq_s32<const N: i32>(a: i32, b: int32x4_t) -> i32 {
17049    static_assert_uimm_bits!(N, 2);
17050    unsafe {
17051        let b: i32 = simd_extract!(b, N as u32);
17052        vqdmulhs_s32(a, b)
17053    }
17054}
17055#[doc = "Signed saturating doubling multiply long"]
17056#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s16)"]
17057#[inline]
17058#[target_feature(enable = "neon")]
17059#[cfg_attr(test, assert_instr(sqdmull2, N = 2))]
17060#[rustc_legacy_const_generics(2)]
17061#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17062pub fn vqdmull_high_lane_s16<const N: i32>(a: int16x8_t, b: int16x4_t) -> int32x4_t {
17063    static_assert_uimm_bits!(N, 2);
17064    unsafe {
17065        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17066        let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17067        vqdmull_s16(a, b)
17068    }
17069}
17070#[doc = "Signed saturating doubling multiply long"]
17071#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s32)"]
17072#[inline]
17073#[target_feature(enable = "neon")]
17074#[cfg_attr(test, assert_instr(sqdmull2, N = 2))]
17075#[rustc_legacy_const_generics(2)]
17076#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17077pub fn vqdmull_high_laneq_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int64x2_t {
17078    static_assert_uimm_bits!(N, 2);
17079    unsafe {
17080        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17081        let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17082        vqdmull_s32(a, b)
17083    }
17084}
17085#[doc = "Signed saturating doubling multiply long"]
17086#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s32)"]
17087#[inline]
17088#[target_feature(enable = "neon")]
17089#[cfg_attr(test, assert_instr(sqdmull2, N = 1))]
17090#[rustc_legacy_const_generics(2)]
17091#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17092pub fn vqdmull_high_lane_s32<const N: i32>(a: int32x4_t, b: int32x2_t) -> int64x2_t {
17093    static_assert_uimm_bits!(N, 1);
17094    unsafe {
17095        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17096        let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17097        vqdmull_s32(a, b)
17098    }
17099}
17100#[doc = "Signed saturating doubling multiply long"]
17101#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s16)"]
17102#[inline]
17103#[target_feature(enable = "neon")]
17104#[cfg_attr(test, assert_instr(sqdmull2, N = 4))]
17105#[rustc_legacy_const_generics(2)]
17106#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17107pub fn vqdmull_high_laneq_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int32x4_t {
17108    static_assert_uimm_bits!(N, 3);
17109    unsafe {
17110        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17111        let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17112        vqdmull_s16(a, b)
17113    }
17114}
17115#[doc = "Signed saturating doubling multiply long"]
17116#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s16)"]
17117#[inline]
17118#[target_feature(enable = "neon")]
17119#[cfg_attr(test, assert_instr(sqdmull2))]
17120#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17121pub fn vqdmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t {
17122    unsafe {
17123        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17124        let b: int16x4_t = vdup_n_s16(b);
17125        vqdmull_s16(a, b)
17126    }
17127}
17128#[doc = "Signed saturating doubling multiply long"]
17129#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s32)"]
17130#[inline]
17131#[target_feature(enable = "neon")]
17132#[cfg_attr(test, assert_instr(sqdmull2))]
17133#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17134pub fn vqdmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t {
17135    unsafe {
17136        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17137        let b: int32x2_t = vdup_n_s32(b);
17138        vqdmull_s32(a, b)
17139    }
17140}
17141#[doc = "Signed saturating doubling multiply long"]
17142#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s16)"]
17143#[inline]
17144#[target_feature(enable = "neon")]
17145#[cfg_attr(test, assert_instr(sqdmull2))]
17146#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17147pub fn vqdmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
17148    unsafe {
17149        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17150        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
17151        vqdmull_s16(a, b)
17152    }
17153}
17154#[doc = "Signed saturating doubling multiply long"]
17155#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s32)"]
17156#[inline]
17157#[target_feature(enable = "neon")]
17158#[cfg_attr(test, assert_instr(sqdmull2))]
17159#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17160pub fn vqdmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
17161    unsafe {
17162        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17163        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
17164        vqdmull_s32(a, b)
17165    }
17166}
17167#[doc = "Vector saturating doubling long multiply by scalar"]
17168#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s16)"]
17169#[inline]
17170#[target_feature(enable = "neon")]
17171#[cfg_attr(test, assert_instr(sqdmull, N = 4))]
17172#[rustc_legacy_const_generics(2)]
17173#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17174pub fn vqdmull_laneq_s16<const N: i32>(a: int16x4_t, b: int16x8_t) -> int32x4_t {
17175    static_assert_uimm_bits!(N, 3);
17176    unsafe {
17177        let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17178        vqdmull_s16(a, b)
17179    }
17180}
17181#[doc = "Vector saturating doubling long multiply by scalar"]
17182#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s32)"]
17183#[inline]
17184#[target_feature(enable = "neon")]
17185#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17186#[rustc_legacy_const_generics(2)]
17187#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17188pub fn vqdmull_laneq_s32<const N: i32>(a: int32x2_t, b: int32x4_t) -> int64x2_t {
17189    static_assert_uimm_bits!(N, 2);
17190    unsafe {
17191        let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17192        vqdmull_s32(a, b)
17193    }
17194}
17195#[doc = "Signed saturating doubling multiply long"]
17196#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_lane_s16)"]
17197#[inline]
17198#[target_feature(enable = "neon")]
17199#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17200#[rustc_legacy_const_generics(2)]
17201#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17202pub fn vqdmullh_lane_s16<const N: i32>(a: i16, b: int16x4_t) -> i32 {
17203    static_assert_uimm_bits!(N, 2);
17204    unsafe {
17205        let b: i16 = simd_extract!(b, N as u32);
17206        vqdmullh_s16(a, b)
17207    }
17208}
17209#[doc = "Signed saturating doubling multiply long"]
17210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_laneq_s32)"]
17211#[inline]
17212#[target_feature(enable = "neon")]
17213#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17214#[rustc_legacy_const_generics(2)]
17215#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17216pub fn vqdmulls_laneq_s32<const N: i32>(a: i32, b: int32x4_t) -> i64 {
17217    static_assert_uimm_bits!(N, 2);
17218    unsafe {
17219        let b: i32 = simd_extract!(b, N as u32);
17220        vqdmulls_s32(a, b)
17221    }
17222}
17223#[doc = "Signed saturating doubling multiply long"]
17224#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_laneq_s16)"]
17225#[inline]
17226#[target_feature(enable = "neon")]
17227#[cfg_attr(test, assert_instr(sqdmull, N = 4))]
17228#[rustc_legacy_const_generics(2)]
17229#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17230pub fn vqdmullh_laneq_s16<const N: i32>(a: i16, b: int16x8_t) -> i32 {
17231    static_assert_uimm_bits!(N, 3);
17232    unsafe {
17233        let b: i16 = simd_extract!(b, N as u32);
17234        vqdmullh_s16(a, b)
17235    }
17236}
17237#[doc = "Signed saturating doubling multiply long"]
17238#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_s16)"]
17239#[inline]
17240#[target_feature(enable = "neon")]
17241#[cfg_attr(test, assert_instr(sqdmull))]
17242#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17243pub fn vqdmullh_s16(a: i16, b: i16) -> i32 {
17244    let a: int16x4_t = vdup_n_s16(a);
17245    let b: int16x4_t = vdup_n_s16(b);
17246    unsafe { simd_extract!(vqdmull_s16(a, b), 0) }
17247}
17248#[doc = "Signed saturating doubling multiply long"]
17249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_lane_s32)"]
17250#[inline]
17251#[target_feature(enable = "neon")]
17252#[cfg_attr(test, assert_instr(sqdmull, N = 1))]
17253#[rustc_legacy_const_generics(2)]
17254#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17255pub fn vqdmulls_lane_s32<const N: i32>(a: i32, b: int32x2_t) -> i64 {
17256    static_assert_uimm_bits!(N, 1);
17257    unsafe {
17258        let b: i32 = simd_extract!(b, N as u32);
17259        vqdmulls_s32(a, b)
17260    }
17261}
17262#[doc = "Signed saturating doubling multiply long"]
17263#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_s32)"]
17264#[inline]
17265#[target_feature(enable = "neon")]
17266#[cfg_attr(test, assert_instr(sqdmull))]
17267#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17268pub fn vqdmulls_s32(a: i32, b: i32) -> i64 {
17269    unsafe extern "unadjusted" {
17270        #[cfg_attr(
17271            any(target_arch = "aarch64", target_arch = "arm64ec"),
17272            link_name = "llvm.aarch64.neon.sqdmulls.scalar"
17273        )]
17274        fn _vqdmulls_s32(a: i32, b: i32) -> i64;
17275    }
17276    unsafe { _vqdmulls_s32(a, b) }
17277}
17278#[doc = "Signed saturating extract narrow"]
17279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s16)"]
17280#[inline]
17281#[target_feature(enable = "neon")]
17282#[cfg_attr(test, assert_instr(sqxtn2))]
17283#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17284pub fn vqmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t {
17285    unsafe {
17286        simd_shuffle!(
17287            a,
17288            vqmovn_s16(b),
17289            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17290        )
17291    }
17292}
17293#[doc = "Signed saturating extract narrow"]
17294#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s32)"]
17295#[inline]
17296#[target_feature(enable = "neon")]
17297#[cfg_attr(test, assert_instr(sqxtn2))]
17298#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17299pub fn vqmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t {
17300    unsafe { simd_shuffle!(a, vqmovn_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17301}
17302#[doc = "Signed saturating extract narrow"]
17303#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s64)"]
17304#[inline]
17305#[target_feature(enable = "neon")]
17306#[cfg_attr(test, assert_instr(sqxtn2))]
17307#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17308pub fn vqmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t {
17309    unsafe { simd_shuffle!(a, vqmovn_s64(b), [0, 1, 2, 3]) }
17310}
17311#[doc = "Signed saturating extract narrow"]
17312#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u16)"]
17313#[inline]
17314#[target_feature(enable = "neon")]
17315#[cfg_attr(test, assert_instr(uqxtn2))]
17316#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17317pub fn vqmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
17318    unsafe {
17319        simd_shuffle!(
17320            a,
17321            vqmovn_u16(b),
17322            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17323        )
17324    }
17325}
17326#[doc = "Signed saturating extract narrow"]
17327#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u32)"]
17328#[inline]
17329#[target_feature(enable = "neon")]
17330#[cfg_attr(test, assert_instr(uqxtn2))]
17331#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17332pub fn vqmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
17333    unsafe { simd_shuffle!(a, vqmovn_u32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17334}
17335#[doc = "Signed saturating extract narrow"]
17336#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u64)"]
17337#[inline]
17338#[target_feature(enable = "neon")]
17339#[cfg_attr(test, assert_instr(uqxtn2))]
17340#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17341pub fn vqmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
17342    unsafe { simd_shuffle!(a, vqmovn_u64(b), [0, 1, 2, 3]) }
17343}
17344#[doc = "Saturating extract narrow"]
17345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_s64)"]
17346#[inline]
17347#[target_feature(enable = "neon")]
17348#[cfg_attr(test, assert_instr(sqxtn))]
17349#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17350pub fn vqmovnd_s64(a: i64) -> i32 {
17351    unsafe extern "unadjusted" {
17352        #[cfg_attr(
17353            any(target_arch = "aarch64", target_arch = "arm64ec"),
17354            link_name = "llvm.aarch64.neon.scalar.sqxtn.i32.i64"
17355        )]
17356        fn _vqmovnd_s64(a: i64) -> i32;
17357    }
17358    unsafe { _vqmovnd_s64(a) }
17359}
17360#[doc = "Saturating extract narrow"]
17361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_u64)"]
17362#[inline]
17363#[target_feature(enable = "neon")]
17364#[cfg_attr(test, assert_instr(uqxtn))]
17365#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17366pub fn vqmovnd_u64(a: u64) -> u32 {
17367    unsafe extern "unadjusted" {
17368        #[cfg_attr(
17369            any(target_arch = "aarch64", target_arch = "arm64ec"),
17370            link_name = "llvm.aarch64.neon.scalar.uqxtn.i32.i64"
17371        )]
17372        fn _vqmovnd_u64(a: u64) -> u32;
17373    }
17374    unsafe { _vqmovnd_u64(a) }
17375}
17376#[doc = "Saturating extract narrow"]
17377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_s16)"]
17378#[inline]
17379#[target_feature(enable = "neon")]
17380#[cfg_attr(test, assert_instr(sqxtn))]
17381#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17382pub fn vqmovnh_s16(a: i16) -> i8 {
17383    unsafe { simd_extract!(vqmovn_s16(vdupq_n_s16(a)), 0) }
17384}
17385#[doc = "Saturating extract narrow"]
17386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_s32)"]
17387#[inline]
17388#[target_feature(enable = "neon")]
17389#[cfg_attr(test, assert_instr(sqxtn))]
17390#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17391pub fn vqmovns_s32(a: i32) -> i16 {
17392    unsafe { simd_extract!(vqmovn_s32(vdupq_n_s32(a)), 0) }
17393}
17394#[doc = "Saturating extract narrow"]
17395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_u16)"]
17396#[inline]
17397#[target_feature(enable = "neon")]
17398#[cfg_attr(test, assert_instr(uqxtn))]
17399#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17400pub fn vqmovnh_u16(a: u16) -> u8 {
17401    unsafe { simd_extract!(vqmovn_u16(vdupq_n_u16(a)), 0) }
17402}
17403#[doc = "Saturating extract narrow"]
17404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_u32)"]
17405#[inline]
17406#[target_feature(enable = "neon")]
17407#[cfg_attr(test, assert_instr(uqxtn))]
17408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17409pub fn vqmovns_u32(a: u32) -> u16 {
17410    unsafe { simd_extract!(vqmovn_u32(vdupq_n_u32(a)), 0) }
17411}
17412#[doc = "Signed saturating extract unsigned narrow"]
17413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s16)"]
17414#[inline]
17415#[target_feature(enable = "neon")]
17416#[cfg_attr(test, assert_instr(sqxtun2))]
17417#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17418pub fn vqmovun_high_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
17419    unsafe {
17420        simd_shuffle!(
17421            a,
17422            vqmovun_s16(b),
17423            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17424        )
17425    }
17426}
17427#[doc = "Signed saturating extract unsigned narrow"]
17428#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s32)"]
17429#[inline]
17430#[target_feature(enable = "neon")]
17431#[cfg_attr(test, assert_instr(sqxtun2))]
17432#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17433pub fn vqmovun_high_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
17434    unsafe { simd_shuffle!(a, vqmovun_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17435}
17436#[doc = "Signed saturating extract unsigned narrow"]
17437#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s64)"]
17438#[inline]
17439#[target_feature(enable = "neon")]
17440#[cfg_attr(test, assert_instr(sqxtun2))]
17441#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17442pub fn vqmovun_high_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
17443    unsafe { simd_shuffle!(a, vqmovun_s64(b), [0, 1, 2, 3]) }
17444}
17445#[doc = "Signed saturating extract unsigned narrow"]
17446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovunh_s16)"]
17447#[inline]
17448#[target_feature(enable = "neon")]
17449#[cfg_attr(test, assert_instr(sqxtun))]
17450#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17451pub fn vqmovunh_s16(a: i16) -> u8 {
17452    unsafe { simd_extract!(vqmovun_s16(vdupq_n_s16(a)), 0) }
17453}
17454#[doc = "Signed saturating extract unsigned narrow"]
17455#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovuns_s32)"]
17456#[inline]
17457#[target_feature(enable = "neon")]
17458#[cfg_attr(test, assert_instr(sqxtun))]
17459#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17460pub fn vqmovuns_s32(a: i32) -> u16 {
17461    unsafe { simd_extract!(vqmovun_s32(vdupq_n_s32(a)), 0) }
17462}
17463#[doc = "Signed saturating extract unsigned narrow"]
17464#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovund_s64)"]
17465#[inline]
17466#[target_feature(enable = "neon")]
17467#[cfg_attr(test, assert_instr(sqxtun))]
17468#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17469pub fn vqmovund_s64(a: i64) -> u32 {
17470    unsafe { simd_extract!(vqmovun_s64(vdupq_n_s64(a)), 0) }
17471}
17472#[doc = "Signed saturating negate"]
17473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s64)"]
17474#[inline]
17475#[target_feature(enable = "neon")]
17476#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17477#[cfg_attr(test, assert_instr(sqneg))]
17478pub fn vqneg_s64(a: int64x1_t) -> int64x1_t {
17479    unsafe extern "unadjusted" {
17480        #[cfg_attr(
17481            any(target_arch = "aarch64", target_arch = "arm64ec"),
17482            link_name = "llvm.aarch64.neon.sqneg.v1i64"
17483        )]
17484        fn _vqneg_s64(a: int64x1_t) -> int64x1_t;
17485    }
17486    unsafe { _vqneg_s64(a) }
17487}
17488#[doc = "Signed saturating negate"]
17489#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s64)"]
17490#[inline]
17491#[target_feature(enable = "neon")]
17492#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17493#[cfg_attr(test, assert_instr(sqneg))]
17494pub fn vqnegq_s64(a: int64x2_t) -> int64x2_t {
17495    unsafe extern "unadjusted" {
17496        #[cfg_attr(
17497            any(target_arch = "aarch64", target_arch = "arm64ec"),
17498            link_name = "llvm.aarch64.neon.sqneg.v2i64"
17499        )]
17500        fn _vqnegq_s64(a: int64x2_t) -> int64x2_t;
17501    }
17502    unsafe { _vqnegq_s64(a) }
17503}
17504#[doc = "Signed saturating negate"]
17505#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegb_s8)"]
17506#[inline]
17507#[target_feature(enable = "neon")]
17508#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17509#[cfg_attr(test, assert_instr(sqneg))]
17510pub fn vqnegb_s8(a: i8) -> i8 {
17511    unsafe { simd_extract!(vqneg_s8(vdup_n_s8(a)), 0) }
17512}
17513#[doc = "Signed saturating negate"]
17514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegh_s16)"]
17515#[inline]
17516#[target_feature(enable = "neon")]
17517#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17518#[cfg_attr(test, assert_instr(sqneg))]
17519pub fn vqnegh_s16(a: i16) -> i16 {
17520    unsafe { simd_extract!(vqneg_s16(vdup_n_s16(a)), 0) }
17521}
17522#[doc = "Signed saturating negate"]
17523#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegs_s32)"]
17524#[inline]
17525#[target_feature(enable = "neon")]
17526#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17527#[cfg_attr(test, assert_instr(sqneg))]
17528pub fn vqnegs_s32(a: i32) -> i32 {
17529    unsafe { simd_extract!(vqneg_s32(vdup_n_s32(a)), 0) }
17530}
17531#[doc = "Signed saturating negate"]
17532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegd_s64)"]
17533#[inline]
17534#[target_feature(enable = "neon")]
17535#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17536#[cfg_attr(test, assert_instr(sqneg))]
17537pub fn vqnegd_s64(a: i64) -> i64 {
17538    unsafe { simd_extract!(vqneg_s64(vdup_n_s64(a)), 0) }
17539}
17540#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17541#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s16)"]
17542#[inline]
17543#[target_feature(enable = "rdm")]
17544#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17545#[rustc_legacy_const_generics(3)]
17546#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17547pub fn vqrdmlah_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
17548    static_assert_uimm_bits!(LANE, 2);
17549    unsafe {
17550        let c: int16x4_t =
17551            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
17552        vqrdmlah_s16(a, b, c)
17553    }
17554}
17555#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17556#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s32)"]
17557#[inline]
17558#[target_feature(enable = "rdm")]
17559#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17560#[rustc_legacy_const_generics(3)]
17561#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17562pub fn vqrdmlah_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
17563    static_assert_uimm_bits!(LANE, 1);
17564    unsafe {
17565        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
17566        vqrdmlah_s32(a, b, c)
17567    }
17568}
17569#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17570#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s16)"]
17571#[inline]
17572#[target_feature(enable = "rdm")]
17573#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17574#[rustc_legacy_const_generics(3)]
17575#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17576pub fn vqrdmlah_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t {
17577    static_assert_uimm_bits!(LANE, 3);
17578    unsafe {
17579        let c: int16x4_t =
17580            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
17581        vqrdmlah_s16(a, b, c)
17582    }
17583}
17584#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17585#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s32)"]
17586#[inline]
17587#[target_feature(enable = "rdm")]
17588#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17589#[rustc_legacy_const_generics(3)]
17590#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17591pub fn vqrdmlah_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t {
17592    static_assert_uimm_bits!(LANE, 2);
17593    unsafe {
17594        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
17595        vqrdmlah_s32(a, b, c)
17596    }
17597}
17598#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17599#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s16)"]
17600#[inline]
17601#[target_feature(enable = "rdm")]
17602#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17603#[rustc_legacy_const_generics(3)]
17604#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17605pub fn vqrdmlahq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t {
17606    static_assert_uimm_bits!(LANE, 2);
17607    unsafe {
17608        let c: int16x8_t = simd_shuffle!(
17609            c,
17610            c,
17611            [
17612                LANE as u32,
17613                LANE as u32,
17614                LANE as u32,
17615                LANE as u32,
17616                LANE as u32,
17617                LANE as u32,
17618                LANE as u32,
17619                LANE as u32
17620            ]
17621        );
17622        vqrdmlahq_s16(a, b, c)
17623    }
17624}
17625#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17626#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s32)"]
17627#[inline]
17628#[target_feature(enable = "rdm")]
17629#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17630#[rustc_legacy_const_generics(3)]
17631#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17632pub fn vqrdmlahq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t {
17633    static_assert_uimm_bits!(LANE, 1);
17634    unsafe {
17635        let c: int32x4_t =
17636            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
17637        vqrdmlahq_s32(a, b, c)
17638    }
17639}
17640#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17641#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s16)"]
17642#[inline]
17643#[target_feature(enable = "rdm")]
17644#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17645#[rustc_legacy_const_generics(3)]
17646#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17647pub fn vqrdmlahq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
17648    static_assert_uimm_bits!(LANE, 3);
17649    unsafe {
17650        let c: int16x8_t = simd_shuffle!(
17651            c,
17652            c,
17653            [
17654                LANE as u32,
17655                LANE as u32,
17656                LANE as u32,
17657                LANE as u32,
17658                LANE as u32,
17659                LANE as u32,
17660                LANE as u32,
17661                LANE as u32
17662            ]
17663        );
17664        vqrdmlahq_s16(a, b, c)
17665    }
17666}
17667#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17668#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s32)"]
17669#[inline]
17670#[target_feature(enable = "rdm")]
17671#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17672#[rustc_legacy_const_generics(3)]
17673#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17674pub fn vqrdmlahq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
17675    static_assert_uimm_bits!(LANE, 2);
17676    unsafe {
17677        let c: int32x4_t =
17678            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
17679        vqrdmlahq_s32(a, b, c)
17680    }
17681}
17682#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17683#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s16)"]
17684#[inline]
17685#[target_feature(enable = "rdm")]
17686#[cfg_attr(test, assert_instr(sqrdmlah))]
17687#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17688pub fn vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
17689    unsafe extern "unadjusted" {
17690        #[cfg_attr(
17691            any(target_arch = "aarch64", target_arch = "arm64ec"),
17692            link_name = "llvm.aarch64.neon.sqrdmlah.v4i16"
17693        )]
17694        fn _vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t;
17695    }
17696    unsafe { _vqrdmlah_s16(a, b, c) }
17697}
17698#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17699#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s16)"]
17700#[inline]
17701#[target_feature(enable = "rdm")]
17702#[cfg_attr(test, assert_instr(sqrdmlah))]
17703#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17704pub fn vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
17705    unsafe extern "unadjusted" {
17706        #[cfg_attr(
17707            any(target_arch = "aarch64", target_arch = "arm64ec"),
17708            link_name = "llvm.aarch64.neon.sqrdmlah.v8i16"
17709        )]
17710        fn _vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
17711    }
17712    unsafe { _vqrdmlahq_s16(a, b, c) }
17713}
17714#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17715#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s32)"]
17716#[inline]
17717#[target_feature(enable = "rdm")]
17718#[cfg_attr(test, assert_instr(sqrdmlah))]
17719#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17720pub fn vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
17721    unsafe extern "unadjusted" {
17722        #[cfg_attr(
17723            any(target_arch = "aarch64", target_arch = "arm64ec"),
17724            link_name = "llvm.aarch64.neon.sqrdmlah.v2i32"
17725        )]
17726        fn _vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t;
17727    }
17728    unsafe { _vqrdmlah_s32(a, b, c) }
17729}
17730#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s32)"]
17732#[inline]
17733#[target_feature(enable = "rdm")]
17734#[cfg_attr(test, assert_instr(sqrdmlah))]
17735#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17736pub fn vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
17737    unsafe extern "unadjusted" {
17738        #[cfg_attr(
17739            any(target_arch = "aarch64", target_arch = "arm64ec"),
17740            link_name = "llvm.aarch64.neon.sqrdmlah.v4i32"
17741        )]
17742        fn _vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
17743    }
17744    unsafe { _vqrdmlahq_s32(a, b, c) }
17745}
17746#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17747#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_lane_s16)"]
17748#[inline]
17749#[target_feature(enable = "rdm")]
17750#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17751#[rustc_legacy_const_generics(3)]
17752#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17753pub fn vqrdmlahh_lane_s16<const LANE: i32>(a: i16, b: i16, c: int16x4_t) -> i16 {
17754    static_assert_uimm_bits!(LANE, 2);
17755    unsafe { vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) }
17756}
17757#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17758#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_laneq_s16)"]
17759#[inline]
17760#[target_feature(enable = "rdm")]
17761#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17762#[rustc_legacy_const_generics(3)]
17763#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17764pub fn vqrdmlahh_laneq_s16<const LANE: i32>(a: i16, b: i16, c: int16x8_t) -> i16 {
17765    static_assert_uimm_bits!(LANE, 3);
17766    unsafe { vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) }
17767}
17768#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17769#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_lane_s32)"]
17770#[inline]
17771#[target_feature(enable = "rdm")]
17772#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17773#[rustc_legacy_const_generics(3)]
17774#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17775pub fn vqrdmlahs_lane_s32<const LANE: i32>(a: i32, b: i32, c: int32x2_t) -> i32 {
17776    static_assert_uimm_bits!(LANE, 1);
17777    unsafe { vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) }
17778}
17779#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17780#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_laneq_s32)"]
17781#[inline]
17782#[target_feature(enable = "rdm")]
17783#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17784#[rustc_legacy_const_generics(3)]
17785#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17786pub fn vqrdmlahs_laneq_s32<const LANE: i32>(a: i32, b: i32, c: int32x4_t) -> i32 {
17787    static_assert_uimm_bits!(LANE, 2);
17788    unsafe { vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) }
17789}
17790#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17791#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_s16)"]
17792#[inline]
17793#[target_feature(enable = "rdm")]
17794#[cfg_attr(test, assert_instr(sqrdmlah))]
17795#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17796pub fn vqrdmlahh_s16(a: i16, b: i16, c: i16) -> i16 {
17797    let a: int16x4_t = vdup_n_s16(a);
17798    let b: int16x4_t = vdup_n_s16(b);
17799    let c: int16x4_t = vdup_n_s16(c);
17800    unsafe { simd_extract!(vqrdmlah_s16(a, b, c), 0) }
17801}
17802#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_s32)"]
17804#[inline]
17805#[target_feature(enable = "rdm")]
17806#[cfg_attr(test, assert_instr(sqrdmlah))]
17807#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17808pub fn vqrdmlahs_s32(a: i32, b: i32, c: i32) -> i32 {
17809    let a: int32x2_t = vdup_n_s32(a);
17810    let b: int32x2_t = vdup_n_s32(b);
17811    let c: int32x2_t = vdup_n_s32(c);
17812    unsafe { simd_extract!(vqrdmlah_s32(a, b, c), 0) }
17813}
17814#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s16)"]
17816#[inline]
17817#[target_feature(enable = "rdm")]
17818#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17819#[rustc_legacy_const_generics(3)]
17820#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17821pub fn vqrdmlsh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
17822    static_assert_uimm_bits!(LANE, 2);
17823    unsafe {
17824        let c: int16x4_t =
17825            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
17826        vqrdmlsh_s16(a, b, c)
17827    }
17828}
17829#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s32)"]
17831#[inline]
17832#[target_feature(enable = "rdm")]
17833#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17834#[rustc_legacy_const_generics(3)]
17835#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17836pub fn vqrdmlsh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
17837    static_assert_uimm_bits!(LANE, 1);
17838    unsafe {
17839        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
17840        vqrdmlsh_s32(a, b, c)
17841    }
17842}
17843#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s16)"]
17845#[inline]
17846#[target_feature(enable = "rdm")]
17847#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17848#[rustc_legacy_const_generics(3)]
17849#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17850pub fn vqrdmlsh_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t {
17851    static_assert_uimm_bits!(LANE, 3);
17852    unsafe {
17853        let c: int16x4_t =
17854            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
17855        vqrdmlsh_s16(a, b, c)
17856    }
17857}
17858#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17859#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s32)"]
17860#[inline]
17861#[target_feature(enable = "rdm")]
17862#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17863#[rustc_legacy_const_generics(3)]
17864#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17865pub fn vqrdmlsh_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t {
17866    static_assert_uimm_bits!(LANE, 2);
17867    unsafe {
17868        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
17869        vqrdmlsh_s32(a, b, c)
17870    }
17871}
17872#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17873#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s16)"]
17874#[inline]
17875#[target_feature(enable = "rdm")]
17876#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17877#[rustc_legacy_const_generics(3)]
17878#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17879pub fn vqrdmlshq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t {
17880    static_assert_uimm_bits!(LANE, 2);
17881    unsafe {
17882        let c: int16x8_t = simd_shuffle!(
17883            c,
17884            c,
17885            [
17886                LANE as u32,
17887                LANE as u32,
17888                LANE as u32,
17889                LANE as u32,
17890                LANE as u32,
17891                LANE as u32,
17892                LANE as u32,
17893                LANE as u32
17894            ]
17895        );
17896        vqrdmlshq_s16(a, b, c)
17897    }
17898}
17899#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17900#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s32)"]
17901#[inline]
17902#[target_feature(enable = "rdm")]
17903#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17904#[rustc_legacy_const_generics(3)]
17905#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17906pub fn vqrdmlshq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t {
17907    static_assert_uimm_bits!(LANE, 1);
17908    unsafe {
17909        let c: int32x4_t =
17910            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
17911        vqrdmlshq_s32(a, b, c)
17912    }
17913}
17914#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17915#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s16)"]
17916#[inline]
17917#[target_feature(enable = "rdm")]
17918#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17919#[rustc_legacy_const_generics(3)]
17920#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17921pub fn vqrdmlshq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
17922    static_assert_uimm_bits!(LANE, 3);
17923    unsafe {
17924        let c: int16x8_t = simd_shuffle!(
17925            c,
17926            c,
17927            [
17928                LANE as u32,
17929                LANE as u32,
17930                LANE as u32,
17931                LANE as u32,
17932                LANE as u32,
17933                LANE as u32,
17934                LANE as u32,
17935                LANE as u32
17936            ]
17937        );
17938        vqrdmlshq_s16(a, b, c)
17939    }
17940}
17941#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17942#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s32)"]
17943#[inline]
17944#[target_feature(enable = "rdm")]
17945#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17946#[rustc_legacy_const_generics(3)]
17947#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17948pub fn vqrdmlshq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
17949    static_assert_uimm_bits!(LANE, 2);
17950    unsafe {
17951        let c: int32x4_t =
17952            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
17953        vqrdmlshq_s32(a, b, c)
17954    }
17955}
17956#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s16)"]
17958#[inline]
17959#[target_feature(enable = "rdm")]
17960#[cfg_attr(test, assert_instr(sqrdmlsh))]
17961#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17962pub fn vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
17963    unsafe extern "unadjusted" {
17964        #[cfg_attr(
17965            any(target_arch = "aarch64", target_arch = "arm64ec"),
17966            link_name = "llvm.aarch64.neon.sqrdmlsh.v4i16"
17967        )]
17968        fn _vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t;
17969    }
17970    unsafe { _vqrdmlsh_s16(a, b, c) }
17971}
17972#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17973#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s16)"]
17974#[inline]
17975#[target_feature(enable = "rdm")]
17976#[cfg_attr(test, assert_instr(sqrdmlsh))]
17977#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17978pub fn vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
17979    unsafe extern "unadjusted" {
17980        #[cfg_attr(
17981            any(target_arch = "aarch64", target_arch = "arm64ec"),
17982            link_name = "llvm.aarch64.neon.sqrdmlsh.v8i16"
17983        )]
17984        fn _vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
17985    }
17986    unsafe { _vqrdmlshq_s16(a, b, c) }
17987}
17988#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17989#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s32)"]
17990#[inline]
17991#[target_feature(enable = "rdm")]
17992#[cfg_attr(test, assert_instr(sqrdmlsh))]
17993#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17994pub fn vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
17995    unsafe extern "unadjusted" {
17996        #[cfg_attr(
17997            any(target_arch = "aarch64", target_arch = "arm64ec"),
17998            link_name = "llvm.aarch64.neon.sqrdmlsh.v2i32"
17999        )]
18000        fn _vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t;
18001    }
18002    unsafe { _vqrdmlsh_s32(a, b, c) }
18003}
18004#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18005#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s32)"]
18006#[inline]
18007#[target_feature(enable = "rdm")]
18008#[cfg_attr(test, assert_instr(sqrdmlsh))]
18009#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18010pub fn vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
18011    unsafe extern "unadjusted" {
18012        #[cfg_attr(
18013            any(target_arch = "aarch64", target_arch = "arm64ec"),
18014            link_name = "llvm.aarch64.neon.sqrdmlsh.v4i32"
18015        )]
18016        fn _vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
18017    }
18018    unsafe { _vqrdmlshq_s32(a, b, c) }
18019}
18020#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18021#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_lane_s16)"]
18022#[inline]
18023#[target_feature(enable = "rdm")]
18024#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18025#[rustc_legacy_const_generics(3)]
18026#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18027pub fn vqrdmlshh_lane_s16<const LANE: i32>(a: i16, b: i16, c: int16x4_t) -> i16 {
18028    static_assert_uimm_bits!(LANE, 2);
18029    unsafe { vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) }
18030}
18031#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18032#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_laneq_s16)"]
18033#[inline]
18034#[target_feature(enable = "rdm")]
18035#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18036#[rustc_legacy_const_generics(3)]
18037#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18038pub fn vqrdmlshh_laneq_s16<const LANE: i32>(a: i16, b: i16, c: int16x8_t) -> i16 {
18039    static_assert_uimm_bits!(LANE, 3);
18040    unsafe { vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) }
18041}
18042#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18043#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_lane_s32)"]
18044#[inline]
18045#[target_feature(enable = "rdm")]
18046#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18047#[rustc_legacy_const_generics(3)]
18048#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18049pub fn vqrdmlshs_lane_s32<const LANE: i32>(a: i32, b: i32, c: int32x2_t) -> i32 {
18050    static_assert_uimm_bits!(LANE, 1);
18051    unsafe { vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) }
18052}
18053#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18054#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_laneq_s32)"]
18055#[inline]
18056#[target_feature(enable = "rdm")]
18057#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18058#[rustc_legacy_const_generics(3)]
18059#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18060pub fn vqrdmlshs_laneq_s32<const LANE: i32>(a: i32, b: i32, c: int32x4_t) -> i32 {
18061    static_assert_uimm_bits!(LANE, 2);
18062    unsafe { vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) }
18063}
18064#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18065#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_s16)"]
18066#[inline]
18067#[target_feature(enable = "rdm")]
18068#[cfg_attr(test, assert_instr(sqrdmlsh))]
18069#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18070pub fn vqrdmlshh_s16(a: i16, b: i16, c: i16) -> i16 {
18071    let a: int16x4_t = vdup_n_s16(a);
18072    let b: int16x4_t = vdup_n_s16(b);
18073    let c: int16x4_t = vdup_n_s16(c);
18074    unsafe { simd_extract!(vqrdmlsh_s16(a, b, c), 0) }
18075}
18076#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18077#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_s32)"]
18078#[inline]
18079#[target_feature(enable = "rdm")]
18080#[cfg_attr(test, assert_instr(sqrdmlsh))]
18081#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18082pub fn vqrdmlshs_s32(a: i32, b: i32, c: i32) -> i32 {
18083    let a: int32x2_t = vdup_n_s32(a);
18084    let b: int32x2_t = vdup_n_s32(b);
18085    let c: int32x2_t = vdup_n_s32(c);
18086    unsafe { simd_extract!(vqrdmlsh_s32(a, b, c), 0) }
18087}
18088#[doc = "Signed saturating rounding doubling multiply returning high half"]
18089#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_lane_s16)"]
18090#[inline]
18091#[target_feature(enable = "neon")]
18092#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18093#[rustc_legacy_const_generics(2)]
18094#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18095pub fn vqrdmulhh_lane_s16<const LANE: i32>(a: i16, b: int16x4_t) -> i16 {
18096    static_assert_uimm_bits!(LANE, 2);
18097    unsafe { vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) }
18098}
18099#[doc = "Signed saturating rounding doubling multiply returning high half"]
18100#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_laneq_s16)"]
18101#[inline]
18102#[target_feature(enable = "neon")]
18103#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18104#[rustc_legacy_const_generics(2)]
18105#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18106pub fn vqrdmulhh_laneq_s16<const LANE: i32>(a: i16, b: int16x8_t) -> i16 {
18107    static_assert_uimm_bits!(LANE, 3);
18108    unsafe { vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) }
18109}
18110#[doc = "Signed saturating rounding doubling multiply returning high half"]
18111#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_lane_s32)"]
18112#[inline]
18113#[target_feature(enable = "neon")]
18114#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18115#[rustc_legacy_const_generics(2)]
18116#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18117pub fn vqrdmulhs_lane_s32<const LANE: i32>(a: i32, b: int32x2_t) -> i32 {
18118    static_assert_uimm_bits!(LANE, 1);
18119    unsafe { vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) }
18120}
18121#[doc = "Signed saturating rounding doubling multiply returning high half"]
18122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_laneq_s32)"]
18123#[inline]
18124#[target_feature(enable = "neon")]
18125#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18126#[rustc_legacy_const_generics(2)]
18127#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18128pub fn vqrdmulhs_laneq_s32<const LANE: i32>(a: i32, b: int32x4_t) -> i32 {
18129    static_assert_uimm_bits!(LANE, 2);
18130    unsafe { vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) }
18131}
18132#[doc = "Signed saturating rounding doubling multiply returning high half"]
18133#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_s16)"]
18134#[inline]
18135#[target_feature(enable = "neon")]
18136#[cfg_attr(test, assert_instr(sqrdmulh))]
18137#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18138pub fn vqrdmulhh_s16(a: i16, b: i16) -> i16 {
18139    unsafe { simd_extract!(vqrdmulh_s16(vdup_n_s16(a), vdup_n_s16(b)), 0) }
18140}
18141#[doc = "Signed saturating rounding doubling multiply returning high half"]
18142#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_s32)"]
18143#[inline]
18144#[target_feature(enable = "neon")]
18145#[cfg_attr(test, assert_instr(sqrdmulh))]
18146#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18147pub fn vqrdmulhs_s32(a: i32, b: i32) -> i32 {
18148    unsafe { simd_extract!(vqrdmulh_s32(vdup_n_s32(a), vdup_n_s32(b)), 0) }
18149}
18150#[doc = "Signed saturating rounding shift left"]
18151#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_s8)"]
18152#[inline]
18153#[target_feature(enable = "neon")]
18154#[cfg_attr(test, assert_instr(sqrshl))]
18155#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18156pub fn vqrshlb_s8(a: i8, b: i8) -> i8 {
18157    let a: int8x8_t = vdup_n_s8(a);
18158    let b: int8x8_t = vdup_n_s8(b);
18159    unsafe { simd_extract!(vqrshl_s8(a, b), 0) }
18160}
18161#[doc = "Signed saturating rounding shift left"]
18162#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_s16)"]
18163#[inline]
18164#[target_feature(enable = "neon")]
18165#[cfg_attr(test, assert_instr(sqrshl))]
18166#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18167pub fn vqrshlh_s16(a: i16, b: i16) -> i16 {
18168    let a: int16x4_t = vdup_n_s16(a);
18169    let b: int16x4_t = vdup_n_s16(b);
18170    unsafe { simd_extract!(vqrshl_s16(a, b), 0) }
18171}
18172#[doc = "Unsigned signed saturating rounding shift left"]
18173#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_u8)"]
18174#[inline]
18175#[target_feature(enable = "neon")]
18176#[cfg_attr(test, assert_instr(uqrshl))]
18177#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18178pub fn vqrshlb_u8(a: u8, b: i8) -> u8 {
18179    let a: uint8x8_t = vdup_n_u8(a);
18180    let b: int8x8_t = vdup_n_s8(b);
18181    unsafe { simd_extract!(vqrshl_u8(a, b), 0) }
18182}
18183#[doc = "Unsigned signed saturating rounding shift left"]
18184#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_u16)"]
18185#[inline]
18186#[target_feature(enable = "neon")]
18187#[cfg_attr(test, assert_instr(uqrshl))]
18188#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18189pub fn vqrshlh_u16(a: u16, b: i16) -> u16 {
18190    let a: uint16x4_t = vdup_n_u16(a);
18191    let b: int16x4_t = vdup_n_s16(b);
18192    unsafe { simd_extract!(vqrshl_u16(a, b), 0) }
18193}
18194#[doc = "Signed saturating rounding shift left"]
18195#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_s64)"]
18196#[inline]
18197#[target_feature(enable = "neon")]
18198#[cfg_attr(test, assert_instr(sqrshl))]
18199#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18200pub fn vqrshld_s64(a: i64, b: i64) -> i64 {
18201    unsafe extern "unadjusted" {
18202        #[cfg_attr(
18203            any(target_arch = "aarch64", target_arch = "arm64ec"),
18204            link_name = "llvm.aarch64.neon.sqrshl.i64"
18205        )]
18206        fn _vqrshld_s64(a: i64, b: i64) -> i64;
18207    }
18208    unsafe { _vqrshld_s64(a, b) }
18209}
18210#[doc = "Signed saturating rounding shift left"]
18211#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_s32)"]
18212#[inline]
18213#[target_feature(enable = "neon")]
18214#[cfg_attr(test, assert_instr(sqrshl))]
18215#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18216pub fn vqrshls_s32(a: i32, b: i32) -> i32 {
18217    unsafe extern "unadjusted" {
18218        #[cfg_attr(
18219            any(target_arch = "aarch64", target_arch = "arm64ec"),
18220            link_name = "llvm.aarch64.neon.sqrshl.i32"
18221        )]
18222        fn _vqrshls_s32(a: i32, b: i32) -> i32;
18223    }
18224    unsafe { _vqrshls_s32(a, b) }
18225}
18226#[doc = "Unsigned signed saturating rounding shift left"]
18227#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_u32)"]
18228#[inline]
18229#[target_feature(enable = "neon")]
18230#[cfg_attr(test, assert_instr(uqrshl))]
18231#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18232pub fn vqrshls_u32(a: u32, b: i32) -> u32 {
18233    unsafe extern "unadjusted" {
18234        #[cfg_attr(
18235            any(target_arch = "aarch64", target_arch = "arm64ec"),
18236            link_name = "llvm.aarch64.neon.uqrshl.i32"
18237        )]
18238        fn _vqrshls_u32(a: u32, b: i32) -> u32;
18239    }
18240    unsafe { _vqrshls_u32(a, b) }
18241}
18242#[doc = "Unsigned signed saturating rounding shift left"]
18243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_u64)"]
18244#[inline]
18245#[target_feature(enable = "neon")]
18246#[cfg_attr(test, assert_instr(uqrshl))]
18247#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18248pub fn vqrshld_u64(a: u64, b: i64) -> u64 {
18249    unsafe extern "unadjusted" {
18250        #[cfg_attr(
18251            any(target_arch = "aarch64", target_arch = "arm64ec"),
18252            link_name = "llvm.aarch64.neon.uqrshl.i64"
18253        )]
18254        fn _vqrshld_u64(a: u64, b: i64) -> u64;
18255    }
18256    unsafe { _vqrshld_u64(a, b) }
18257}
18258#[doc = "Signed saturating rounded shift right narrow"]
18259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s16)"]
18260#[inline]
18261#[target_feature(enable = "neon")]
18262#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18263#[rustc_legacy_const_generics(2)]
18264#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18265pub fn vqrshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
18266    static_assert!(N >= 1 && N <= 8);
18267    unsafe {
18268        simd_shuffle!(
18269            a,
18270            vqrshrn_n_s16::<N>(b),
18271            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18272        )
18273    }
18274}
18275#[doc = "Signed saturating rounded shift right narrow"]
18276#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s32)"]
18277#[inline]
18278#[target_feature(enable = "neon")]
18279#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18280#[rustc_legacy_const_generics(2)]
18281#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18282pub fn vqrshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
18283    static_assert!(N >= 1 && N <= 16);
18284    unsafe { simd_shuffle!(a, vqrshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18285}
18286#[doc = "Signed saturating rounded shift right narrow"]
18287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s64)"]
18288#[inline]
18289#[target_feature(enable = "neon")]
18290#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18291#[rustc_legacy_const_generics(2)]
18292#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18293pub fn vqrshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
18294    static_assert!(N >= 1 && N <= 32);
18295    unsafe { simd_shuffle!(a, vqrshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
18296}
18297#[doc = "Unsigned saturating rounded shift right narrow"]
18298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u16)"]
18299#[inline]
18300#[target_feature(enable = "neon")]
18301#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18302#[rustc_legacy_const_generics(2)]
18303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18304pub fn vqrshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
18305    static_assert!(N >= 1 && N <= 8);
18306    unsafe {
18307        simd_shuffle!(
18308            a,
18309            vqrshrn_n_u16::<N>(b),
18310            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18311        )
18312    }
18313}
18314#[doc = "Unsigned saturating rounded shift right narrow"]
18315#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u32)"]
18316#[inline]
18317#[target_feature(enable = "neon")]
18318#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18319#[rustc_legacy_const_generics(2)]
18320#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18321pub fn vqrshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
18322    static_assert!(N >= 1 && N <= 16);
18323    unsafe { simd_shuffle!(a, vqrshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18324}
18325#[doc = "Unsigned saturating rounded shift right narrow"]
18326#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u64)"]
18327#[inline]
18328#[target_feature(enable = "neon")]
18329#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18330#[rustc_legacy_const_generics(2)]
18331#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18332pub fn vqrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
18333    static_assert!(N >= 1 && N <= 32);
18334    unsafe { simd_shuffle!(a, vqrshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
18335}
18336#[doc = "Unsigned saturating rounded shift right narrow"]
18337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_u64)"]
18338#[inline]
18339#[target_feature(enable = "neon")]
18340#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18341#[rustc_legacy_const_generics(1)]
18342#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18343pub fn vqrshrnd_n_u64<const N: i32>(a: u64) -> u32 {
18344    static_assert!(N >= 1 && N <= 32);
18345    let a: uint64x2_t = vdupq_n_u64(a);
18346    unsafe { simd_extract!(vqrshrn_n_u64::<N>(a), 0) }
18347}
18348#[doc = "Unsigned saturating rounded shift right narrow"]
18349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_u16)"]
18350#[inline]
18351#[target_feature(enable = "neon")]
18352#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18353#[rustc_legacy_const_generics(1)]
18354#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18355pub fn vqrshrnh_n_u16<const N: i32>(a: u16) -> u8 {
18356    static_assert!(N >= 1 && N <= 8);
18357    let a: uint16x8_t = vdupq_n_u16(a);
18358    unsafe { simd_extract!(vqrshrn_n_u16::<N>(a), 0) }
18359}
18360#[doc = "Unsigned saturating rounded shift right narrow"]
18361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_u32)"]
18362#[inline]
18363#[target_feature(enable = "neon")]
18364#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18365#[rustc_legacy_const_generics(1)]
18366#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18367pub fn vqrshrns_n_u32<const N: i32>(a: u32) -> u16 {
18368    static_assert!(N >= 1 && N <= 16);
18369    let a: uint32x4_t = vdupq_n_u32(a);
18370    unsafe { simd_extract!(vqrshrn_n_u32::<N>(a), 0) }
18371}
18372#[doc = "Signed saturating rounded shift right narrow"]
18373#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_s16)"]
18374#[inline]
18375#[target_feature(enable = "neon")]
18376#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18377#[rustc_legacy_const_generics(1)]
18378#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18379pub fn vqrshrnh_n_s16<const N: i32>(a: i16) -> i8 {
18380    static_assert!(N >= 1 && N <= 8);
18381    let a: int16x8_t = vdupq_n_s16(a);
18382    unsafe { simd_extract!(vqrshrn_n_s16::<N>(a), 0) }
18383}
18384#[doc = "Signed saturating rounded shift right narrow"]
18385#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_s32)"]
18386#[inline]
18387#[target_feature(enable = "neon")]
18388#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18389#[rustc_legacy_const_generics(1)]
18390#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18391pub fn vqrshrns_n_s32<const N: i32>(a: i32) -> i16 {
18392    static_assert!(N >= 1 && N <= 16);
18393    let a: int32x4_t = vdupq_n_s32(a);
18394    unsafe { simd_extract!(vqrshrn_n_s32::<N>(a), 0) }
18395}
18396#[doc = "Signed saturating rounded shift right narrow"]
18397#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_s64)"]
18398#[inline]
18399#[target_feature(enable = "neon")]
18400#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18401#[rustc_legacy_const_generics(1)]
18402#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18403pub fn vqrshrnd_n_s64<const N: i32>(a: i64) -> i32 {
18404    static_assert!(N >= 1 && N <= 32);
18405    let a: int64x2_t = vdupq_n_s64(a);
18406    unsafe { simd_extract!(vqrshrn_n_s64::<N>(a), 0) }
18407}
18408#[doc = "Signed saturating rounded shift right unsigned narrow"]
18409#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s16)"]
18410#[inline]
18411#[target_feature(enable = "neon")]
18412#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18413#[rustc_legacy_const_generics(2)]
18414#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18415pub fn vqrshrun_high_n_s16<const N: i32>(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
18416    static_assert!(N >= 1 && N <= 8);
18417    unsafe {
18418        simd_shuffle!(
18419            a,
18420            vqrshrun_n_s16::<N>(b),
18421            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18422        )
18423    }
18424}
18425#[doc = "Signed saturating rounded shift right unsigned narrow"]
18426#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s32)"]
18427#[inline]
18428#[target_feature(enable = "neon")]
18429#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18430#[rustc_legacy_const_generics(2)]
18431#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18432pub fn vqrshrun_high_n_s32<const N: i32>(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
18433    static_assert!(N >= 1 && N <= 16);
18434    unsafe { simd_shuffle!(a, vqrshrun_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18435}
18436#[doc = "Signed saturating rounded shift right unsigned narrow"]
18437#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s64)"]
18438#[inline]
18439#[target_feature(enable = "neon")]
18440#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18441#[rustc_legacy_const_generics(2)]
18442#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18443pub fn vqrshrun_high_n_s64<const N: i32>(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
18444    static_assert!(N >= 1 && N <= 32);
18445    unsafe { simd_shuffle!(a, vqrshrun_n_s64::<N>(b), [0, 1, 2, 3]) }
18446}
18447#[doc = "Signed saturating rounded shift right unsigned narrow"]
18448#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrund_n_s64)"]
18449#[inline]
18450#[target_feature(enable = "neon")]
18451#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
18452#[rustc_legacy_const_generics(1)]
18453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18454pub fn vqrshrund_n_s64<const N: i32>(a: i64) -> u32 {
18455    static_assert!(N >= 1 && N <= 32);
18456    let a: int64x2_t = vdupq_n_s64(a);
18457    unsafe { simd_extract!(vqrshrun_n_s64::<N>(a), 0) }
18458}
18459#[doc = "Signed saturating rounded shift right unsigned narrow"]
18460#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrunh_n_s16)"]
18461#[inline]
18462#[target_feature(enable = "neon")]
18463#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
18464#[rustc_legacy_const_generics(1)]
18465#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18466pub fn vqrshrunh_n_s16<const N: i32>(a: i16) -> u8 {
18467    static_assert!(N >= 1 && N <= 8);
18468    let a: int16x8_t = vdupq_n_s16(a);
18469    unsafe { simd_extract!(vqrshrun_n_s16::<N>(a), 0) }
18470}
18471#[doc = "Signed saturating rounded shift right unsigned narrow"]
18472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshruns_n_s32)"]
18473#[inline]
18474#[target_feature(enable = "neon")]
18475#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
18476#[rustc_legacy_const_generics(1)]
18477#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18478pub fn vqrshruns_n_s32<const N: i32>(a: i32) -> u16 {
18479    static_assert!(N >= 1 && N <= 16);
18480    let a: int32x4_t = vdupq_n_s32(a);
18481    unsafe { simd_extract!(vqrshrun_n_s32::<N>(a), 0) }
18482}
18483#[doc = "Signed saturating shift left"]
18484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_s8)"]
18485#[inline]
18486#[target_feature(enable = "neon")]
18487#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18488#[rustc_legacy_const_generics(1)]
18489#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18490pub fn vqshlb_n_s8<const N: i32>(a: i8) -> i8 {
18491    static_assert_uimm_bits!(N, 3);
18492    unsafe { simd_extract!(vqshl_n_s8::<N>(vdup_n_s8(a)), 0) }
18493}
18494#[doc = "Signed saturating shift left"]
18495#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_s64)"]
18496#[inline]
18497#[target_feature(enable = "neon")]
18498#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18499#[rustc_legacy_const_generics(1)]
18500#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18501pub fn vqshld_n_s64<const N: i32>(a: i64) -> i64 {
18502    static_assert_uimm_bits!(N, 6);
18503    unsafe { simd_extract!(vqshl_n_s64::<N>(vdup_n_s64(a)), 0) }
18504}
18505#[doc = "Signed saturating shift left"]
18506#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_s16)"]
18507#[inline]
18508#[target_feature(enable = "neon")]
18509#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18510#[rustc_legacy_const_generics(1)]
18511#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18512pub fn vqshlh_n_s16<const N: i32>(a: i16) -> i16 {
18513    static_assert_uimm_bits!(N, 4);
18514    unsafe { simd_extract!(vqshl_n_s16::<N>(vdup_n_s16(a)), 0) }
18515}
18516#[doc = "Signed saturating shift left"]
18517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_s32)"]
18518#[inline]
18519#[target_feature(enable = "neon")]
18520#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18521#[rustc_legacy_const_generics(1)]
18522#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18523pub fn vqshls_n_s32<const N: i32>(a: i32) -> i32 {
18524    static_assert_uimm_bits!(N, 5);
18525    unsafe { simd_extract!(vqshl_n_s32::<N>(vdup_n_s32(a)), 0) }
18526}
18527#[doc = "Unsigned saturating shift left"]
18528#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_u8)"]
18529#[inline]
18530#[target_feature(enable = "neon")]
18531#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18532#[rustc_legacy_const_generics(1)]
18533#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18534pub fn vqshlb_n_u8<const N: i32>(a: u8) -> u8 {
18535    static_assert_uimm_bits!(N, 3);
18536    unsafe { simd_extract!(vqshl_n_u8::<N>(vdup_n_u8(a)), 0) }
18537}
18538#[doc = "Unsigned saturating shift left"]
18539#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_u64)"]
18540#[inline]
18541#[target_feature(enable = "neon")]
18542#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18543#[rustc_legacy_const_generics(1)]
18544#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18545pub fn vqshld_n_u64<const N: i32>(a: u64) -> u64 {
18546    static_assert_uimm_bits!(N, 6);
18547    unsafe { simd_extract!(vqshl_n_u64::<N>(vdup_n_u64(a)), 0) }
18548}
18549#[doc = "Unsigned saturating shift left"]
18550#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_u16)"]
18551#[inline]
18552#[target_feature(enable = "neon")]
18553#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18554#[rustc_legacy_const_generics(1)]
18555#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18556pub fn vqshlh_n_u16<const N: i32>(a: u16) -> u16 {
18557    static_assert_uimm_bits!(N, 4);
18558    unsafe { simd_extract!(vqshl_n_u16::<N>(vdup_n_u16(a)), 0) }
18559}
18560#[doc = "Unsigned saturating shift left"]
18561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_u32)"]
18562#[inline]
18563#[target_feature(enable = "neon")]
18564#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18565#[rustc_legacy_const_generics(1)]
18566#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18567pub fn vqshls_n_u32<const N: i32>(a: u32) -> u32 {
18568    static_assert_uimm_bits!(N, 5);
18569    unsafe { simd_extract!(vqshl_n_u32::<N>(vdup_n_u32(a)), 0) }
18570}
18571#[doc = "Signed saturating shift left"]
18572#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_s8)"]
18573#[inline]
18574#[target_feature(enable = "neon")]
18575#[cfg_attr(test, assert_instr(sqshl))]
18576#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18577pub fn vqshlb_s8(a: i8, b: i8) -> i8 {
18578    let c: int8x8_t = vqshl_s8(vdup_n_s8(a), vdup_n_s8(b));
18579    unsafe { simd_extract!(c, 0) }
18580}
18581#[doc = "Signed saturating shift left"]
18582#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_s16)"]
18583#[inline]
18584#[target_feature(enable = "neon")]
18585#[cfg_attr(test, assert_instr(sqshl))]
18586#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18587pub fn vqshlh_s16(a: i16, b: i16) -> i16 {
18588    let c: int16x4_t = vqshl_s16(vdup_n_s16(a), vdup_n_s16(b));
18589    unsafe { simd_extract!(c, 0) }
18590}
18591#[doc = "Signed saturating shift left"]
18592#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_s32)"]
18593#[inline]
18594#[target_feature(enable = "neon")]
18595#[cfg_attr(test, assert_instr(sqshl))]
18596#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18597pub fn vqshls_s32(a: i32, b: i32) -> i32 {
18598    let c: int32x2_t = vqshl_s32(vdup_n_s32(a), vdup_n_s32(b));
18599    unsafe { simd_extract!(c, 0) }
18600}
18601#[doc = "Unsigned saturating shift left"]
18602#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_u8)"]
18603#[inline]
18604#[target_feature(enable = "neon")]
18605#[cfg_attr(test, assert_instr(uqshl))]
18606#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18607pub fn vqshlb_u8(a: u8, b: i8) -> u8 {
18608    let c: uint8x8_t = vqshl_u8(vdup_n_u8(a), vdup_n_s8(b));
18609    unsafe { simd_extract!(c, 0) }
18610}
18611#[doc = "Unsigned saturating shift left"]
18612#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_u16)"]
18613#[inline]
18614#[target_feature(enable = "neon")]
18615#[cfg_attr(test, assert_instr(uqshl))]
18616#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18617pub fn vqshlh_u16(a: u16, b: i16) -> u16 {
18618    let c: uint16x4_t = vqshl_u16(vdup_n_u16(a), vdup_n_s16(b));
18619    unsafe { simd_extract!(c, 0) }
18620}
18621#[doc = "Unsigned saturating shift left"]
18622#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_u32)"]
18623#[inline]
18624#[target_feature(enable = "neon")]
18625#[cfg_attr(test, assert_instr(uqshl))]
18626#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18627pub fn vqshls_u32(a: u32, b: i32) -> u32 {
18628    let c: uint32x2_t = vqshl_u32(vdup_n_u32(a), vdup_n_s32(b));
18629    unsafe { simd_extract!(c, 0) }
18630}
18631#[doc = "Signed saturating shift left"]
18632#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_s64)"]
18633#[inline]
18634#[target_feature(enable = "neon")]
18635#[cfg_attr(test, assert_instr(sqshl))]
18636#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18637pub fn vqshld_s64(a: i64, b: i64) -> i64 {
18638    unsafe extern "unadjusted" {
18639        #[cfg_attr(
18640            any(target_arch = "aarch64", target_arch = "arm64ec"),
18641            link_name = "llvm.aarch64.neon.sqshl.i64"
18642        )]
18643        fn _vqshld_s64(a: i64, b: i64) -> i64;
18644    }
18645    unsafe { _vqshld_s64(a, b) }
18646}
18647#[doc = "Unsigned saturating shift left"]
18648#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_u64)"]
18649#[inline]
18650#[target_feature(enable = "neon")]
18651#[cfg_attr(test, assert_instr(uqshl))]
18652#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18653pub fn vqshld_u64(a: u64, b: i64) -> u64 {
18654    unsafe extern "unadjusted" {
18655        #[cfg_attr(
18656            any(target_arch = "aarch64", target_arch = "arm64ec"),
18657            link_name = "llvm.aarch64.neon.uqshl.i64"
18658        )]
18659        fn _vqshld_u64(a: u64, b: i64) -> u64;
18660    }
18661    unsafe { _vqshld_u64(a, b) }
18662}
18663#[doc = "Signed saturating shift left unsigned"]
18664#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlub_n_s8)"]
18665#[inline]
18666#[target_feature(enable = "neon")]
18667#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
18668#[rustc_legacy_const_generics(1)]
18669#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18670pub fn vqshlub_n_s8<const N: i32>(a: i8) -> u8 {
18671    static_assert_uimm_bits!(N, 3);
18672    unsafe { simd_extract!(vqshlu_n_s8::<N>(vdup_n_s8(a)), 0) }
18673}
18674#[doc = "Signed saturating shift left unsigned"]
18675#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlud_n_s64)"]
18676#[inline]
18677#[target_feature(enable = "neon")]
18678#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
18679#[rustc_legacy_const_generics(1)]
18680#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18681pub fn vqshlud_n_s64<const N: i32>(a: i64) -> u64 {
18682    static_assert_uimm_bits!(N, 6);
18683    unsafe { simd_extract!(vqshlu_n_s64::<N>(vdup_n_s64(a)), 0) }
18684}
18685#[doc = "Signed saturating shift left unsigned"]
18686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluh_n_s16)"]
18687#[inline]
18688#[target_feature(enable = "neon")]
18689#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
18690#[rustc_legacy_const_generics(1)]
18691#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18692pub fn vqshluh_n_s16<const N: i32>(a: i16) -> u16 {
18693    static_assert_uimm_bits!(N, 4);
18694    unsafe { simd_extract!(vqshlu_n_s16::<N>(vdup_n_s16(a)), 0) }
18695}
18696#[doc = "Signed saturating shift left unsigned"]
18697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlus_n_s32)"]
18698#[inline]
18699#[target_feature(enable = "neon")]
18700#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
18701#[rustc_legacy_const_generics(1)]
18702#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18703pub fn vqshlus_n_s32<const N: i32>(a: i32) -> u32 {
18704    static_assert_uimm_bits!(N, 5);
18705    unsafe { simd_extract!(vqshlu_n_s32::<N>(vdup_n_s32(a)), 0) }
18706}
18707#[doc = "Signed saturating shift right narrow"]
18708#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s16)"]
18709#[inline]
18710#[target_feature(enable = "neon")]
18711#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
18712#[rustc_legacy_const_generics(2)]
18713#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18714pub fn vqshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
18715    static_assert!(N >= 1 && N <= 8);
18716    unsafe {
18717        simd_shuffle!(
18718            a,
18719            vqshrn_n_s16::<N>(b),
18720            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18721        )
18722    }
18723}
18724#[doc = "Signed saturating shift right narrow"]
18725#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s32)"]
18726#[inline]
18727#[target_feature(enable = "neon")]
18728#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
18729#[rustc_legacy_const_generics(2)]
18730#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18731pub fn vqshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
18732    static_assert!(N >= 1 && N <= 16);
18733    unsafe { simd_shuffle!(a, vqshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18734}
18735#[doc = "Signed saturating shift right narrow"]
18736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s64)"]
18737#[inline]
18738#[target_feature(enable = "neon")]
18739#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
18740#[rustc_legacy_const_generics(2)]
18741#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18742pub fn vqshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
18743    static_assert!(N >= 1 && N <= 32);
18744    unsafe { simd_shuffle!(a, vqshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
18745}
18746#[doc = "Unsigned saturating shift right narrow"]
18747#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u16)"]
18748#[inline]
18749#[target_feature(enable = "neon")]
18750#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
18751#[rustc_legacy_const_generics(2)]
18752#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18753pub fn vqshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
18754    static_assert!(N >= 1 && N <= 8);
18755    unsafe {
18756        simd_shuffle!(
18757            a,
18758            vqshrn_n_u16::<N>(b),
18759            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18760        )
18761    }
18762}
18763#[doc = "Unsigned saturating shift right narrow"]
18764#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u32)"]
18765#[inline]
18766#[target_feature(enable = "neon")]
18767#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
18768#[rustc_legacy_const_generics(2)]
18769#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18770pub fn vqshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
18771    static_assert!(N >= 1 && N <= 16);
18772    unsafe { simd_shuffle!(a, vqshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18773}
18774#[doc = "Unsigned saturating shift right narrow"]
18775#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u64)"]
18776#[inline]
18777#[target_feature(enable = "neon")]
18778#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
18779#[rustc_legacy_const_generics(2)]
18780#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18781pub fn vqshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
18782    static_assert!(N >= 1 && N <= 32);
18783    unsafe { simd_shuffle!(a, vqshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
18784}
18785#[doc = "Signed saturating shift right narrow"]
18786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_s64)"]
18787#[inline]
18788#[target_feature(enable = "neon")]
18789#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
18790#[rustc_legacy_const_generics(1)]
18791#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18792pub fn vqshrnd_n_s64<const N: i32>(a: i64) -> i32 {
18793    static_assert!(N >= 1 && N <= 32);
18794    unsafe extern "unadjusted" {
18795        #[cfg_attr(
18796            any(target_arch = "aarch64", target_arch = "arm64ec"),
18797            link_name = "llvm.aarch64.neon.sqshrn.i32"
18798        )]
18799        fn _vqshrnd_n_s64(a: i64, n: i32) -> i32;
18800    }
18801    unsafe { _vqshrnd_n_s64(a, N) }
18802}
18803#[doc = "Unsigned saturating shift right narrow"]
18804#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_u64)"]
18805#[inline]
18806#[target_feature(enable = "neon")]
18807#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
18808#[rustc_legacy_const_generics(1)]
18809#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18810pub fn vqshrnd_n_u64<const N: i32>(a: u64) -> u32 {
18811    static_assert!(N >= 1 && N <= 32);
18812    unsafe extern "unadjusted" {
18813        #[cfg_attr(
18814            any(target_arch = "aarch64", target_arch = "arm64ec"),
18815            link_name = "llvm.aarch64.neon.uqshrn.i32"
18816        )]
18817        fn _vqshrnd_n_u64(a: u64, n: i32) -> u32;
18818    }
18819    unsafe { _vqshrnd_n_u64(a, N) }
18820}
18821#[doc = "Signed saturating shift right narrow"]
18822#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_s16)"]
18823#[inline]
18824#[target_feature(enable = "neon")]
18825#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
18826#[rustc_legacy_const_generics(1)]
18827#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18828pub fn vqshrnh_n_s16<const N: i32>(a: i16) -> i8 {
18829    static_assert!(N >= 1 && N <= 8);
18830    unsafe { simd_extract!(vqshrn_n_s16::<N>(vdupq_n_s16(a)), 0) }
18831}
18832#[doc = "Signed saturating shift right narrow"]
18833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_s32)"]
18834#[inline]
18835#[target_feature(enable = "neon")]
18836#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
18837#[rustc_legacy_const_generics(1)]
18838#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18839pub fn vqshrns_n_s32<const N: i32>(a: i32) -> i16 {
18840    static_assert!(N >= 1 && N <= 16);
18841    unsafe { simd_extract!(vqshrn_n_s32::<N>(vdupq_n_s32(a)), 0) }
18842}
18843#[doc = "Unsigned saturating shift right narrow"]
18844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_u16)"]
18845#[inline]
18846#[target_feature(enable = "neon")]
18847#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
18848#[rustc_legacy_const_generics(1)]
18849#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18850pub fn vqshrnh_n_u16<const N: i32>(a: u16) -> u8 {
18851    static_assert!(N >= 1 && N <= 8);
18852    unsafe { simd_extract!(vqshrn_n_u16::<N>(vdupq_n_u16(a)), 0) }
18853}
18854#[doc = "Unsigned saturating shift right narrow"]
18855#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_u32)"]
18856#[inline]
18857#[target_feature(enable = "neon")]
18858#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
18859#[rustc_legacy_const_generics(1)]
18860#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18861pub fn vqshrns_n_u32<const N: i32>(a: u32) -> u16 {
18862    static_assert!(N >= 1 && N <= 16);
18863    unsafe { simd_extract!(vqshrn_n_u32::<N>(vdupq_n_u32(a)), 0) }
18864}
18865#[doc = "Signed saturating shift right unsigned narrow"]
18866#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s16)"]
18867#[inline]
18868#[target_feature(enable = "neon")]
18869#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
18870#[rustc_legacy_const_generics(2)]
18871#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18872pub fn vqshrun_high_n_s16<const N: i32>(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
18873    static_assert!(N >= 1 && N <= 8);
18874    unsafe {
18875        simd_shuffle!(
18876            a,
18877            vqshrun_n_s16::<N>(b),
18878            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18879        )
18880    }
18881}
18882#[doc = "Signed saturating shift right unsigned narrow"]
18883#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s32)"]
18884#[inline]
18885#[target_feature(enable = "neon")]
18886#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
18887#[rustc_legacy_const_generics(2)]
18888#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18889pub fn vqshrun_high_n_s32<const N: i32>(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
18890    static_assert!(N >= 1 && N <= 16);
18891    unsafe { simd_shuffle!(a, vqshrun_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18892}
18893#[doc = "Signed saturating shift right unsigned narrow"]
18894#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s64)"]
18895#[inline]
18896#[target_feature(enable = "neon")]
18897#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
18898#[rustc_legacy_const_generics(2)]
18899#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18900pub fn vqshrun_high_n_s64<const N: i32>(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
18901    static_assert!(N >= 1 && N <= 32);
18902    unsafe { simd_shuffle!(a, vqshrun_n_s64::<N>(b), [0, 1, 2, 3]) }
18903}
18904#[doc = "Signed saturating shift right unsigned narrow"]
18905#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrund_n_s64)"]
18906#[inline]
18907#[target_feature(enable = "neon")]
18908#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
18909#[rustc_legacy_const_generics(1)]
18910#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18911pub fn vqshrund_n_s64<const N: i32>(a: i64) -> u32 {
18912    static_assert!(N >= 1 && N <= 32);
18913    unsafe { simd_extract!(vqshrun_n_s64::<N>(vdupq_n_s64(a)), 0) }
18914}
18915#[doc = "Signed saturating shift right unsigned narrow"]
18916#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrunh_n_s16)"]
18917#[inline]
18918#[target_feature(enable = "neon")]
18919#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
18920#[rustc_legacy_const_generics(1)]
18921#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18922pub fn vqshrunh_n_s16<const N: i32>(a: i16) -> u8 {
18923    static_assert!(N >= 1 && N <= 8);
18924    unsafe { simd_extract!(vqshrun_n_s16::<N>(vdupq_n_s16(a)), 0) }
18925}
18926#[doc = "Signed saturating shift right unsigned narrow"]
18927#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshruns_n_s32)"]
18928#[inline]
18929#[target_feature(enable = "neon")]
18930#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
18931#[rustc_legacy_const_generics(1)]
18932#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18933pub fn vqshruns_n_s32<const N: i32>(a: i32) -> u16 {
18934    static_assert!(N >= 1 && N <= 16);
18935    unsafe { simd_extract!(vqshrun_n_s32::<N>(vdupq_n_s32(a)), 0) }
18936}
18937#[doc = "Saturating subtract"]
18938#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_s8)"]
18939#[inline]
18940#[target_feature(enable = "neon")]
18941#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18942#[cfg_attr(test, assert_instr(sqsub))]
18943pub fn vqsubb_s8(a: i8, b: i8) -> i8 {
18944    let a: int8x8_t = vdup_n_s8(a);
18945    let b: int8x8_t = vdup_n_s8(b);
18946    unsafe { simd_extract!(vqsub_s8(a, b), 0) }
18947}
18948#[doc = "Saturating subtract"]
18949#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_s16)"]
18950#[inline]
18951#[target_feature(enable = "neon")]
18952#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18953#[cfg_attr(test, assert_instr(sqsub))]
18954pub fn vqsubh_s16(a: i16, b: i16) -> i16 {
18955    let a: int16x4_t = vdup_n_s16(a);
18956    let b: int16x4_t = vdup_n_s16(b);
18957    unsafe { simd_extract!(vqsub_s16(a, b), 0) }
18958}
18959#[doc = "Saturating subtract"]
18960#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_u8)"]
18961#[inline]
18962#[target_feature(enable = "neon")]
18963#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18964#[cfg_attr(test, assert_instr(uqsub))]
18965pub fn vqsubb_u8(a: u8, b: u8) -> u8 {
18966    let a: uint8x8_t = vdup_n_u8(a);
18967    let b: uint8x8_t = vdup_n_u8(b);
18968    unsafe { simd_extract!(vqsub_u8(a, b), 0) }
18969}
18970#[doc = "Saturating subtract"]
18971#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_u16)"]
18972#[inline]
18973#[target_feature(enable = "neon")]
18974#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18975#[cfg_attr(test, assert_instr(uqsub))]
18976pub fn vqsubh_u16(a: u16, b: u16) -> u16 {
18977    let a: uint16x4_t = vdup_n_u16(a);
18978    let b: uint16x4_t = vdup_n_u16(b);
18979    unsafe { simd_extract!(vqsub_u16(a, b), 0) }
18980}
18981#[doc = "Saturating subtract"]
18982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_s32)"]
18983#[inline]
18984#[target_feature(enable = "neon")]
18985#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18986#[cfg_attr(test, assert_instr(sqsub))]
18987pub fn vqsubs_s32(a: i32, b: i32) -> i32 {
18988    unsafe extern "unadjusted" {
18989        #[cfg_attr(
18990            any(target_arch = "aarch64", target_arch = "arm64ec"),
18991            link_name = "llvm.aarch64.neon.sqsub.i32"
18992        )]
18993        fn _vqsubs_s32(a: i32, b: i32) -> i32;
18994    }
18995    unsafe { _vqsubs_s32(a, b) }
18996}
18997#[doc = "Saturating subtract"]
18998#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_s64)"]
18999#[inline]
19000#[target_feature(enable = "neon")]
19001#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19002#[cfg_attr(test, assert_instr(sqsub))]
19003pub fn vqsubd_s64(a: i64, b: i64) -> i64 {
19004    unsafe extern "unadjusted" {
19005        #[cfg_attr(
19006            any(target_arch = "aarch64", target_arch = "arm64ec"),
19007            link_name = "llvm.aarch64.neon.sqsub.i64"
19008        )]
19009        fn _vqsubd_s64(a: i64, b: i64) -> i64;
19010    }
19011    unsafe { _vqsubd_s64(a, b) }
19012}
19013#[doc = "Saturating subtract"]
19014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_u32)"]
19015#[inline]
19016#[target_feature(enable = "neon")]
19017#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19018#[cfg_attr(test, assert_instr(uqsub))]
19019pub fn vqsubs_u32(a: u32, b: u32) -> u32 {
19020    unsafe extern "unadjusted" {
19021        #[cfg_attr(
19022            any(target_arch = "aarch64", target_arch = "arm64ec"),
19023            link_name = "llvm.aarch64.neon.uqsub.i32"
19024        )]
19025        fn _vqsubs_u32(a: u32, b: u32) -> u32;
19026    }
19027    unsafe { _vqsubs_u32(a, b) }
19028}
19029#[doc = "Saturating subtract"]
19030#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_u64)"]
19031#[inline]
19032#[target_feature(enable = "neon")]
19033#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19034#[cfg_attr(test, assert_instr(uqsub))]
19035pub fn vqsubd_u64(a: u64, b: u64) -> u64 {
19036    unsafe extern "unadjusted" {
19037        #[cfg_attr(
19038            any(target_arch = "aarch64", target_arch = "arm64ec"),
19039            link_name = "llvm.aarch64.neon.uqsub.i64"
19040        )]
19041        fn _vqsubd_u64(a: u64, b: u64) -> u64;
19042    }
19043    unsafe { _vqsubd_u64(a, b) }
19044}
19045#[doc = "Table look-up"]
19046#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1)"]
19047#[inline]
19048#[target_feature(enable = "neon")]
19049#[cfg_attr(test, assert_instr(tbl))]
19050#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19051fn vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t {
19052    unsafe extern "unadjusted" {
19053        #[cfg_attr(
19054            any(target_arch = "aarch64", target_arch = "arm64ec"),
19055            link_name = "llvm.aarch64.neon.tbl1.v8i8"
19056        )]
19057        fn _vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t;
19058    }
19059    unsafe { _vqtbl1(a, b) }
19060}
19061#[doc = "Table look-up"]
19062#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q)"]
19063#[inline]
19064#[target_feature(enable = "neon")]
19065#[cfg_attr(test, assert_instr(tbl))]
19066#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19067fn vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
19068    unsafe extern "unadjusted" {
19069        #[cfg_attr(
19070            any(target_arch = "aarch64", target_arch = "arm64ec"),
19071            link_name = "llvm.aarch64.neon.tbl1.v16i8"
19072        )]
19073        fn _vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t;
19074    }
19075    unsafe { _vqtbl1q(a, b) }
19076}
19077#[doc = "Table look-up"]
19078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_s8)"]
19079#[inline]
19080#[target_feature(enable = "neon")]
19081#[cfg_attr(test, assert_instr(tbl))]
19082#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19083pub fn vqtbl1_s8(a: int8x16_t, b: uint8x8_t) -> int8x8_t {
19084    vqtbl1(a, b)
19085}
19086#[doc = "Table look-up"]
19087#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_s8)"]
19088#[inline]
19089#[target_feature(enable = "neon")]
19090#[cfg_attr(test, assert_instr(tbl))]
19091#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19092pub fn vqtbl1q_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
19093    vqtbl1q(a, b)
19094}
19095#[doc = "Table look-up"]
19096#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_u8)"]
19097#[inline]
19098#[target_feature(enable = "neon")]
19099#[cfg_attr(test, assert_instr(tbl))]
19100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19101pub fn vqtbl1_u8(a: uint8x16_t, b: uint8x8_t) -> uint8x8_t {
19102    unsafe { transmute(vqtbl1(transmute(a), b)) }
19103}
19104#[doc = "Table look-up"]
19105#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_u8)"]
19106#[inline]
19107#[target_feature(enable = "neon")]
19108#[cfg_attr(test, assert_instr(tbl))]
19109#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19110pub fn vqtbl1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
19111    unsafe { transmute(vqtbl1q(transmute(a), b)) }
19112}
19113#[doc = "Table look-up"]
19114#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_p8)"]
19115#[inline]
19116#[target_feature(enable = "neon")]
19117#[cfg_attr(test, assert_instr(tbl))]
19118#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19119pub fn vqtbl1_p8(a: poly8x16_t, b: uint8x8_t) -> poly8x8_t {
19120    unsafe { transmute(vqtbl1(transmute(a), b)) }
19121}
19122#[doc = "Table look-up"]
19123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_p8)"]
19124#[inline]
19125#[target_feature(enable = "neon")]
19126#[cfg_attr(test, assert_instr(tbl))]
19127#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19128pub fn vqtbl1q_p8(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t {
19129    unsafe { transmute(vqtbl1q(transmute(a), b)) }
19130}
19131#[doc = "Table look-up"]
19132#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2)"]
19133#[inline]
19134#[target_feature(enable = "neon")]
19135#[cfg_attr(test, assert_instr(tbl))]
19136#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19137fn vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
19138    unsafe extern "unadjusted" {
19139        #[cfg_attr(
19140            any(target_arch = "aarch64", target_arch = "arm64ec"),
19141            link_name = "llvm.aarch64.neon.tbl2.v8i8"
19142        )]
19143        fn _vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t;
19144    }
19145    unsafe { _vqtbl2(a, b, c) }
19146}
19147#[doc = "Table look-up"]
19148#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q)"]
19149#[inline]
19150#[target_feature(enable = "neon")]
19151#[cfg_attr(test, assert_instr(tbl))]
19152#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19153fn vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
19154    unsafe extern "unadjusted" {
19155        #[cfg_attr(
19156            any(target_arch = "aarch64", target_arch = "arm64ec"),
19157            link_name = "llvm.aarch64.neon.tbl2.v16i8"
19158        )]
19159        fn _vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t;
19160    }
19161    unsafe { _vqtbl2q(a, b, c) }
19162}
19163#[doc = "Table look-up"]
19164#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_s8)"]
19165#[inline]
19166#[target_feature(enable = "neon")]
19167#[cfg_attr(test, assert_instr(tbl))]
19168#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19169pub fn vqtbl2_s8(a: int8x16x2_t, b: uint8x8_t) -> int8x8_t {
19170    vqtbl2(a.0, a.1, b)
19171}
19172#[doc = "Table look-up"]
19173#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_s8)"]
19174#[inline]
19175#[target_feature(enable = "neon")]
19176#[cfg_attr(test, assert_instr(tbl))]
19177#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19178pub fn vqtbl2q_s8(a: int8x16x2_t, b: uint8x16_t) -> int8x16_t {
19179    vqtbl2q(a.0, a.1, b)
19180}
19181#[doc = "Table look-up"]
19182#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_u8)"]
19183#[inline]
19184#[cfg(target_endian = "little")]
19185#[target_feature(enable = "neon")]
19186#[cfg_attr(test, assert_instr(tbl))]
19187#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19188pub fn vqtbl2_u8(a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t {
19189    unsafe { transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) }
19190}
19191#[doc = "Table look-up"]
19192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_u8)"]
19193#[inline]
19194#[cfg(target_endian = "big")]
19195#[target_feature(enable = "neon")]
19196#[cfg_attr(test, assert_instr(tbl))]
19197#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19198pub fn vqtbl2_u8(a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t {
19199    let mut a: uint8x16x2_t = a;
19200    a.0 = unsafe {
19201        simd_shuffle!(
19202            a.0,
19203            a.0,
19204            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19205        )
19206    };
19207    a.1 = unsafe {
19208        simd_shuffle!(
19209            a.1,
19210            a.1,
19211            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19212        )
19213    };
19214    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
19215    unsafe {
19216        let ret_val: uint8x8_t = transmute(vqtbl2(transmute(a.0), transmute(a.1), b));
19217        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19218    }
19219}
19220#[doc = "Table look-up"]
19221#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_u8)"]
19222#[inline]
19223#[cfg(target_endian = "little")]
19224#[target_feature(enable = "neon")]
19225#[cfg_attr(test, assert_instr(tbl))]
19226#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19227pub fn vqtbl2q_u8(a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t {
19228    unsafe { transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) }
19229}
19230#[doc = "Table look-up"]
19231#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_u8)"]
19232#[inline]
19233#[cfg(target_endian = "big")]
19234#[target_feature(enable = "neon")]
19235#[cfg_attr(test, assert_instr(tbl))]
19236#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19237pub fn vqtbl2q_u8(a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t {
19238    let mut a: uint8x16x2_t = a;
19239    a.0 = unsafe {
19240        simd_shuffle!(
19241            a.0,
19242            a.0,
19243            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19244        )
19245    };
19246    a.1 = unsafe {
19247        simd_shuffle!(
19248            a.1,
19249            a.1,
19250            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19251        )
19252    };
19253    let b: uint8x16_t =
19254        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
19255    unsafe {
19256        let ret_val: uint8x16_t = transmute(vqtbl2q(transmute(a.0), transmute(a.1), b));
19257        simd_shuffle!(
19258            ret_val,
19259            ret_val,
19260            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19261        )
19262    }
19263}
19264#[doc = "Table look-up"]
19265#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_p8)"]
19266#[inline]
19267#[cfg(target_endian = "little")]
19268#[target_feature(enable = "neon")]
19269#[cfg_attr(test, assert_instr(tbl))]
19270#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19271pub fn vqtbl2_p8(a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t {
19272    unsafe { transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) }
19273}
19274#[doc = "Table look-up"]
19275#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_p8)"]
19276#[inline]
19277#[cfg(target_endian = "big")]
19278#[target_feature(enable = "neon")]
19279#[cfg_attr(test, assert_instr(tbl))]
19280#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19281pub fn vqtbl2_p8(a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t {
19282    let mut a: poly8x16x2_t = a;
19283    a.0 = unsafe {
19284        simd_shuffle!(
19285            a.0,
19286            a.0,
19287            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19288        )
19289    };
19290    a.1 = unsafe {
19291        simd_shuffle!(
19292            a.1,
19293            a.1,
19294            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19295        )
19296    };
19297    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
19298    unsafe {
19299        let ret_val: poly8x8_t = transmute(vqtbl2(transmute(a.0), transmute(a.1), b));
19300        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19301    }
19302}
19303#[doc = "Table look-up"]
19304#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_p8)"]
19305#[inline]
19306#[cfg(target_endian = "little")]
19307#[target_feature(enable = "neon")]
19308#[cfg_attr(test, assert_instr(tbl))]
19309#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19310pub fn vqtbl2q_p8(a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t {
19311    unsafe { transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) }
19312}
19313#[doc = "Table look-up"]
19314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_p8)"]
19315#[inline]
19316#[cfg(target_endian = "big")]
19317#[target_feature(enable = "neon")]
19318#[cfg_attr(test, assert_instr(tbl))]
19319#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19320pub fn vqtbl2q_p8(a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t {
19321    let mut a: poly8x16x2_t = a;
19322    a.0 = unsafe {
19323        simd_shuffle!(
19324            a.0,
19325            a.0,
19326            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19327        )
19328    };
19329    a.1 = unsafe {
19330        simd_shuffle!(
19331            a.1,
19332            a.1,
19333            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19334        )
19335    };
19336    let b: uint8x16_t =
19337        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
19338    unsafe {
19339        let ret_val: poly8x16_t = transmute(vqtbl2q(transmute(a.0), transmute(a.1), b));
19340        simd_shuffle!(
19341            ret_val,
19342            ret_val,
19343            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19344        )
19345    }
19346}
19347#[doc = "Table look-up"]
19348#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3)"]
19349#[inline]
19350#[target_feature(enable = "neon")]
19351#[cfg_attr(test, assert_instr(tbl))]
19352#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19353fn vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t {
19354    unsafe extern "unadjusted" {
19355        #[cfg_attr(
19356            any(target_arch = "aarch64", target_arch = "arm64ec"),
19357            link_name = "llvm.aarch64.neon.tbl3.v8i8"
19358        )]
19359        fn _vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t;
19360    }
19361    unsafe { _vqtbl3(a, b, c, d) }
19362}
19363#[doc = "Table look-up"]
19364#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q)"]
19365#[inline]
19366#[target_feature(enable = "neon")]
19367#[cfg_attr(test, assert_instr(tbl))]
19368#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19369fn vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t {
19370    unsafe extern "unadjusted" {
19371        #[cfg_attr(
19372            any(target_arch = "aarch64", target_arch = "arm64ec"),
19373            link_name = "llvm.aarch64.neon.tbl3.v16i8"
19374        )]
19375        fn _vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t;
19376    }
19377    unsafe { _vqtbl3q(a, b, c, d) }
19378}
19379#[doc = "Table look-up"]
19380#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_s8)"]
19381#[inline]
19382#[target_feature(enable = "neon")]
19383#[cfg_attr(test, assert_instr(tbl))]
19384#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19385pub fn vqtbl3_s8(a: int8x16x3_t, b: uint8x8_t) -> int8x8_t {
19386    vqtbl3(a.0, a.1, a.2, b)
19387}
19388#[doc = "Table look-up"]
19389#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_s8)"]
19390#[inline]
19391#[target_feature(enable = "neon")]
19392#[cfg_attr(test, assert_instr(tbl))]
19393#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19394pub fn vqtbl3q_s8(a: int8x16x3_t, b: uint8x16_t) -> int8x16_t {
19395    vqtbl3q(a.0, a.1, a.2, b)
19396}
19397#[doc = "Table look-up"]
19398#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_u8)"]
19399#[inline]
19400#[cfg(target_endian = "little")]
19401#[target_feature(enable = "neon")]
19402#[cfg_attr(test, assert_instr(tbl))]
19403#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19404pub fn vqtbl3_u8(a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t {
19405    unsafe { transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19406}
19407#[doc = "Table look-up"]
19408#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_u8)"]
19409#[inline]
19410#[cfg(target_endian = "big")]
19411#[target_feature(enable = "neon")]
19412#[cfg_attr(test, assert_instr(tbl))]
19413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19414pub fn vqtbl3_u8(a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t {
19415    let mut a: uint8x16x3_t = a;
19416    a.0 = unsafe {
19417        simd_shuffle!(
19418            a.0,
19419            a.0,
19420            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19421        )
19422    };
19423    a.1 = unsafe {
19424        simd_shuffle!(
19425            a.1,
19426            a.1,
19427            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19428        )
19429    };
19430    a.2 = unsafe {
19431        simd_shuffle!(
19432            a.2,
19433            a.2,
19434            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19435        )
19436    };
19437    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
19438    unsafe {
19439        let ret_val: uint8x8_t =
19440            transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b));
19441        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19442    }
19443}
19444#[doc = "Table look-up"]
19445#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_u8)"]
19446#[inline]
19447#[cfg(target_endian = "little")]
19448#[target_feature(enable = "neon")]
19449#[cfg_attr(test, assert_instr(tbl))]
19450#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19451pub fn vqtbl3q_u8(a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t {
19452    unsafe { transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19453}
19454#[doc = "Table look-up"]
19455#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_u8)"]
19456#[inline]
19457#[cfg(target_endian = "big")]
19458#[target_feature(enable = "neon")]
19459#[cfg_attr(test, assert_instr(tbl))]
19460#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19461pub fn vqtbl3q_u8(a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t {
19462    let mut a: uint8x16x3_t = a;
19463    a.0 = unsafe {
19464        simd_shuffle!(
19465            a.0,
19466            a.0,
19467            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19468        )
19469    };
19470    a.1 = unsafe {
19471        simd_shuffle!(
19472            a.1,
19473            a.1,
19474            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19475        )
19476    };
19477    a.2 = unsafe {
19478        simd_shuffle!(
19479            a.2,
19480            a.2,
19481            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19482        )
19483    };
19484    let b: uint8x16_t =
19485        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
19486    unsafe {
19487        let ret_val: uint8x16_t =
19488            transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b));
19489        simd_shuffle!(
19490            ret_val,
19491            ret_val,
19492            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19493        )
19494    }
19495}
19496#[doc = "Table look-up"]
19497#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_p8)"]
19498#[inline]
19499#[cfg(target_endian = "little")]
19500#[target_feature(enable = "neon")]
19501#[cfg_attr(test, assert_instr(tbl))]
19502#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19503pub fn vqtbl3_p8(a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t {
19504    unsafe { transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19505}
19506#[doc = "Table look-up"]
19507#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_p8)"]
19508#[inline]
19509#[cfg(target_endian = "big")]
19510#[target_feature(enable = "neon")]
19511#[cfg_attr(test, assert_instr(tbl))]
19512#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19513pub fn vqtbl3_p8(a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t {
19514    let mut a: poly8x16x3_t = a;
19515    a.0 = unsafe {
19516        simd_shuffle!(
19517            a.0,
19518            a.0,
19519            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19520        )
19521    };
19522    a.1 = unsafe {
19523        simd_shuffle!(
19524            a.1,
19525            a.1,
19526            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19527        )
19528    };
19529    a.2 = unsafe {
19530        simd_shuffle!(
19531            a.2,
19532            a.2,
19533            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19534        )
19535    };
19536    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
19537    unsafe {
19538        let ret_val: poly8x8_t =
19539            transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b));
19540        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19541    }
19542}
19543#[doc = "Table look-up"]
19544#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_p8)"]
19545#[inline]
19546#[cfg(target_endian = "little")]
19547#[target_feature(enable = "neon")]
19548#[cfg_attr(test, assert_instr(tbl))]
19549#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19550pub fn vqtbl3q_p8(a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t {
19551    unsafe { transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19552}
19553#[doc = "Table look-up"]
19554#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_p8)"]
19555#[inline]
19556#[cfg(target_endian = "big")]
19557#[target_feature(enable = "neon")]
19558#[cfg_attr(test, assert_instr(tbl))]
19559#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19560pub fn vqtbl3q_p8(a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t {
19561    let mut a: poly8x16x3_t = a;
19562    a.0 = unsafe {
19563        simd_shuffle!(
19564            a.0,
19565            a.0,
19566            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19567        )
19568    };
19569    a.1 = unsafe {
19570        simd_shuffle!(
19571            a.1,
19572            a.1,
19573            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19574        )
19575    };
19576    a.2 = unsafe {
19577        simd_shuffle!(
19578            a.2,
19579            a.2,
19580            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19581        )
19582    };
19583    let b: uint8x16_t =
19584        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
19585    unsafe {
19586        let ret_val: poly8x16_t =
19587            transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b));
19588        simd_shuffle!(
19589            ret_val,
19590            ret_val,
19591            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19592        )
19593    }
19594}
19595#[doc = "Table look-up"]
19596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4)"]
19597#[inline]
19598#[target_feature(enable = "neon")]
19599#[cfg_attr(test, assert_instr(tbl))]
19600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19601fn vqtbl4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t {
19602    unsafe extern "unadjusted" {
19603        #[cfg_attr(
19604            any(target_arch = "aarch64", target_arch = "arm64ec"),
19605            link_name = "llvm.aarch64.neon.tbl4.v8i8"
19606        )]
19607        fn _vqtbl4(
19608            a: int8x16_t,
19609            b: int8x16_t,
19610            c: int8x16_t,
19611            d: int8x16_t,
19612            e: uint8x8_t,
19613        ) -> int8x8_t;
19614    }
19615    unsafe { _vqtbl4(a, b, c, d, e) }
19616}
19617#[doc = "Table look-up"]
19618#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q)"]
19619#[inline]
19620#[target_feature(enable = "neon")]
19621#[cfg_attr(test, assert_instr(tbl))]
19622#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19623fn vqtbl4q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x16_t) -> int8x16_t {
19624    unsafe extern "unadjusted" {
19625        #[cfg_attr(
19626            any(target_arch = "aarch64", target_arch = "arm64ec"),
19627            link_name = "llvm.aarch64.neon.tbl4.v16i8"
19628        )]
19629        fn _vqtbl4q(
19630            a: int8x16_t,
19631            b: int8x16_t,
19632            c: int8x16_t,
19633            d: int8x16_t,
19634            e: uint8x16_t,
19635        ) -> int8x16_t;
19636    }
19637    unsafe { _vqtbl4q(a, b, c, d, e) }
19638}
19639#[doc = "Table look-up"]
19640#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_s8)"]
19641#[inline]
19642#[target_feature(enable = "neon")]
19643#[cfg_attr(test, assert_instr(tbl))]
19644#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19645pub fn vqtbl4_s8(a: int8x16x4_t, b: uint8x8_t) -> int8x8_t {
19646    vqtbl4(a.0, a.1, a.2, a.3, b)
19647}
19648#[doc = "Table look-up"]
19649#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_s8)"]
19650#[inline]
19651#[target_feature(enable = "neon")]
19652#[cfg_attr(test, assert_instr(tbl))]
19653#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19654pub fn vqtbl4q_s8(a: int8x16x4_t, b: uint8x16_t) -> int8x16_t {
19655    vqtbl4q(a.0, a.1, a.2, a.3, b)
19656}
19657#[doc = "Table look-up"]
19658#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_u8)"]
19659#[inline]
19660#[cfg(target_endian = "little")]
19661#[target_feature(enable = "neon")]
19662#[cfg_attr(test, assert_instr(tbl))]
19663#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19664pub fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t {
19665    unsafe {
19666        transmute(vqtbl4(
19667            transmute(a.0),
19668            transmute(a.1),
19669            transmute(a.2),
19670            transmute(a.3),
19671            b,
19672        ))
19673    }
19674}
19675#[doc = "Table look-up"]
19676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_u8)"]
19677#[inline]
19678#[cfg(target_endian = "big")]
19679#[target_feature(enable = "neon")]
19680#[cfg_attr(test, assert_instr(tbl))]
19681#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19682pub fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t {
19683    let mut a: uint8x16x4_t = a;
19684    a.0 = unsafe {
19685        simd_shuffle!(
19686            a.0,
19687            a.0,
19688            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19689        )
19690    };
19691    a.1 = unsafe {
19692        simd_shuffle!(
19693            a.1,
19694            a.1,
19695            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19696        )
19697    };
19698    a.2 = unsafe {
19699        simd_shuffle!(
19700            a.2,
19701            a.2,
19702            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19703        )
19704    };
19705    a.3 = unsafe {
19706        simd_shuffle!(
19707            a.3,
19708            a.3,
19709            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19710        )
19711    };
19712    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
19713    unsafe {
19714        let ret_val: uint8x8_t = transmute(vqtbl4(
19715            transmute(a.0),
19716            transmute(a.1),
19717            transmute(a.2),
19718            transmute(a.3),
19719            b,
19720        ));
19721        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19722    }
19723}
19724#[doc = "Table look-up"]
19725#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_u8)"]
19726#[inline]
19727#[cfg(target_endian = "little")]
19728#[target_feature(enable = "neon")]
19729#[cfg_attr(test, assert_instr(tbl))]
19730#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19731pub fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t {
19732    unsafe {
19733        transmute(vqtbl4q(
19734            transmute(a.0),
19735            transmute(a.1),
19736            transmute(a.2),
19737            transmute(a.3),
19738            b,
19739        ))
19740    }
19741}
19742#[doc = "Table look-up"]
19743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_u8)"]
19744#[inline]
19745#[cfg(target_endian = "big")]
19746#[target_feature(enable = "neon")]
19747#[cfg_attr(test, assert_instr(tbl))]
19748#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19749pub fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t {
19750    let mut a: uint8x16x4_t = a;
19751    a.0 = unsafe {
19752        simd_shuffle!(
19753            a.0,
19754            a.0,
19755            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19756        )
19757    };
19758    a.1 = unsafe {
19759        simd_shuffle!(
19760            a.1,
19761            a.1,
19762            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19763        )
19764    };
19765    a.2 = unsafe {
19766        simd_shuffle!(
19767            a.2,
19768            a.2,
19769            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19770        )
19771    };
19772    a.3 = unsafe {
19773        simd_shuffle!(
19774            a.3,
19775            a.3,
19776            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19777        )
19778    };
19779    let b: uint8x16_t =
19780        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
19781    unsafe {
19782        let ret_val: uint8x16_t = transmute(vqtbl4q(
19783            transmute(a.0),
19784            transmute(a.1),
19785            transmute(a.2),
19786            transmute(a.3),
19787            b,
19788        ));
19789        simd_shuffle!(
19790            ret_val,
19791            ret_val,
19792            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19793        )
19794    }
19795}
19796#[doc = "Table look-up"]
19797#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_p8)"]
19798#[inline]
19799#[cfg(target_endian = "little")]
19800#[target_feature(enable = "neon")]
19801#[cfg_attr(test, assert_instr(tbl))]
19802#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19803pub fn vqtbl4_p8(a: poly8x16x4_t, b: uint8x8_t) -> poly8x8_t {
19804    unsafe {
19805        transmute(vqtbl4(
19806            transmute(a.0),
19807            transmute(a.1),
19808            transmute(a.2),
19809            transmute(a.3),
19810            b,
19811        ))
19812    }
19813}
19814#[doc = "Table look-up"]
19815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_p8)"]
19816#[inline]
19817#[cfg(target_endian = "big")]
19818#[target_feature(enable = "neon")]
19819#[cfg_attr(test, assert_instr(tbl))]
19820#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19821pub fn vqtbl4_p8(a: poly8x16x4_t, b: uint8x8_t) -> poly8x8_t {
19822    let mut a: poly8x16x4_t = a;
19823    a.0 = unsafe {
19824        simd_shuffle!(
19825            a.0,
19826            a.0,
19827            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19828        )
19829    };
19830    a.1 = unsafe {
19831        simd_shuffle!(
19832            a.1,
19833            a.1,
19834            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19835        )
19836    };
19837    a.2 = unsafe {
19838        simd_shuffle!(
19839            a.2,
19840            a.2,
19841            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19842        )
19843    };
19844    a.3 = unsafe {
19845        simd_shuffle!(
19846            a.3,
19847            a.3,
19848            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19849        )
19850    };
19851    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
19852    unsafe {
19853        let ret_val: poly8x8_t = transmute(vqtbl4(
19854            transmute(a.0),
19855            transmute(a.1),
19856            transmute(a.2),
19857            transmute(a.3),
19858            b,
19859        ));
19860        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19861    }
19862}
19863#[doc = "Table look-up"]
19864#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_p8)"]
19865#[inline]
19866#[cfg(target_endian = "little")]
19867#[target_feature(enable = "neon")]
19868#[cfg_attr(test, assert_instr(tbl))]
19869#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19870pub fn vqtbl4q_p8(a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t {
19871    unsafe {
19872        transmute(vqtbl4q(
19873            transmute(a.0),
19874            transmute(a.1),
19875            transmute(a.2),
19876            transmute(a.3),
19877            b,
19878        ))
19879    }
19880}
19881#[doc = "Table look-up"]
19882#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_p8)"]
19883#[inline]
19884#[cfg(target_endian = "big")]
19885#[target_feature(enable = "neon")]
19886#[cfg_attr(test, assert_instr(tbl))]
19887#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19888pub fn vqtbl4q_p8(a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t {
19889    let mut a: poly8x16x4_t = a;
19890    a.0 = unsafe {
19891        simd_shuffle!(
19892            a.0,
19893            a.0,
19894            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19895        )
19896    };
19897    a.1 = unsafe {
19898        simd_shuffle!(
19899            a.1,
19900            a.1,
19901            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19902        )
19903    };
19904    a.2 = unsafe {
19905        simd_shuffle!(
19906            a.2,
19907            a.2,
19908            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19909        )
19910    };
19911    a.3 = unsafe {
19912        simd_shuffle!(
19913            a.3,
19914            a.3,
19915            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19916        )
19917    };
19918    let b: uint8x16_t =
19919        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
19920    unsafe {
19921        let ret_val: poly8x16_t = transmute(vqtbl4q(
19922            transmute(a.0),
19923            transmute(a.1),
19924            transmute(a.2),
19925            transmute(a.3),
19926            b,
19927        ));
19928        simd_shuffle!(
19929            ret_val,
19930            ret_val,
19931            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19932        )
19933    }
19934}
19935#[doc = "Extended table look-up"]
19936#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1)"]
19937#[inline]
19938#[target_feature(enable = "neon")]
19939#[cfg_attr(test, assert_instr(tbx))]
19940#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19941fn vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
19942    unsafe extern "unadjusted" {
19943        #[cfg_attr(
19944            any(target_arch = "aarch64", target_arch = "arm64ec"),
19945            link_name = "llvm.aarch64.neon.tbx1.v8i8"
19946        )]
19947        fn _vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t;
19948    }
19949    unsafe { _vqtbx1(a, b, c) }
19950}
19951#[doc = "Extended table look-up"]
19952#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q)"]
19953#[inline]
19954#[target_feature(enable = "neon")]
19955#[cfg_attr(test, assert_instr(tbx))]
19956#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19957fn vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
19958    unsafe extern "unadjusted" {
19959        #[cfg_attr(
19960            any(target_arch = "aarch64", target_arch = "arm64ec"),
19961            link_name = "llvm.aarch64.neon.tbx1.v16i8"
19962        )]
19963        fn _vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t;
19964    }
19965    unsafe { _vqtbx1q(a, b, c) }
19966}
19967#[doc = "Extended table look-up"]
19968#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_s8)"]
19969#[inline]
19970#[target_feature(enable = "neon")]
19971#[cfg_attr(test, assert_instr(tbx))]
19972#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19973pub fn vqtbx1_s8(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
19974    vqtbx1(a, b, c)
19975}
19976#[doc = "Extended table look-up"]
19977#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_s8)"]
19978#[inline]
19979#[target_feature(enable = "neon")]
19980#[cfg_attr(test, assert_instr(tbx))]
19981#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19982pub fn vqtbx1q_s8(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
19983    vqtbx1q(a, b, c)
19984}
19985#[doc = "Extended table look-up"]
19986#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_u8)"]
19987#[inline]
19988#[target_feature(enable = "neon")]
19989#[cfg_attr(test, assert_instr(tbx))]
19990#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19991pub fn vqtbx1_u8(a: uint8x8_t, b: uint8x16_t, c: uint8x8_t) -> uint8x8_t {
19992    unsafe { transmute(vqtbx1(transmute(a), transmute(b), c)) }
19993}
19994#[doc = "Extended table look-up"]
19995#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_u8)"]
19996#[inline]
19997#[target_feature(enable = "neon")]
19998#[cfg_attr(test, assert_instr(tbx))]
19999#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20000pub fn vqtbx1q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
20001    unsafe { transmute(vqtbx1q(transmute(a), transmute(b), c)) }
20002}
20003#[doc = "Extended table look-up"]
20004#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_p8)"]
20005#[inline]
20006#[target_feature(enable = "neon")]
20007#[cfg_attr(test, assert_instr(tbx))]
20008#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20009pub fn vqtbx1_p8(a: poly8x8_t, b: poly8x16_t, c: uint8x8_t) -> poly8x8_t {
20010    unsafe { transmute(vqtbx1(transmute(a), transmute(b), c)) }
20011}
20012#[doc = "Extended table look-up"]
20013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_p8)"]
20014#[inline]
20015#[target_feature(enable = "neon")]
20016#[cfg_attr(test, assert_instr(tbx))]
20017#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20018pub fn vqtbx1q_p8(a: poly8x16_t, b: poly8x16_t, c: uint8x16_t) -> poly8x16_t {
20019    unsafe { transmute(vqtbx1q(transmute(a), transmute(b), c)) }
20020}
20021#[doc = "Extended table look-up"]
20022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2)"]
20023#[inline]
20024#[target_feature(enable = "neon")]
20025#[cfg_attr(test, assert_instr(tbx))]
20026#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20027fn vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t {
20028    unsafe extern "unadjusted" {
20029        #[cfg_attr(
20030            any(target_arch = "aarch64", target_arch = "arm64ec"),
20031            link_name = "llvm.aarch64.neon.tbx2.v8i8"
20032        )]
20033        fn _vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t;
20034    }
20035    unsafe { _vqtbx2(a, b, c, d) }
20036}
20037#[doc = "Extended table look-up"]
20038#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q)"]
20039#[inline]
20040#[target_feature(enable = "neon")]
20041#[cfg_attr(test, assert_instr(tbx))]
20042#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20043fn vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t {
20044    unsafe extern "unadjusted" {
20045        #[cfg_attr(
20046            any(target_arch = "aarch64", target_arch = "arm64ec"),
20047            link_name = "llvm.aarch64.neon.tbx2.v16i8"
20048        )]
20049        fn _vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t;
20050    }
20051    unsafe { _vqtbx2q(a, b, c, d) }
20052}
20053#[doc = "Extended table look-up"]
20054#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_s8)"]
20055#[inline]
20056#[target_feature(enable = "neon")]
20057#[cfg_attr(test, assert_instr(tbx))]
20058#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20059pub fn vqtbx2_s8(a: int8x8_t, b: int8x16x2_t, c: uint8x8_t) -> int8x8_t {
20060    vqtbx2(a, b.0, b.1, c)
20061}
20062#[doc = "Extended table look-up"]
20063#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_s8)"]
20064#[inline]
20065#[target_feature(enable = "neon")]
20066#[cfg_attr(test, assert_instr(tbx))]
20067#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20068pub fn vqtbx2q_s8(a: int8x16_t, b: int8x16x2_t, c: uint8x16_t) -> int8x16_t {
20069    vqtbx2q(a, b.0, b.1, c)
20070}
20071#[doc = "Extended table look-up"]
20072#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_u8)"]
20073#[inline]
20074#[cfg(target_endian = "little")]
20075#[target_feature(enable = "neon")]
20076#[cfg_attr(test, assert_instr(tbx))]
20077#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20078pub fn vqtbx2_u8(a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_t {
20079    unsafe { transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) }
20080}
20081#[doc = "Extended table look-up"]
20082#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_u8)"]
20083#[inline]
20084#[cfg(target_endian = "big")]
20085#[target_feature(enable = "neon")]
20086#[cfg_attr(test, assert_instr(tbx))]
20087#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20088pub fn vqtbx2_u8(a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_t {
20089    let mut b: uint8x16x2_t = b;
20090    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20091    b.0 = unsafe {
20092        simd_shuffle!(
20093            b.0,
20094            b.0,
20095            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20096        )
20097    };
20098    b.1 = unsafe {
20099        simd_shuffle!(
20100            b.1,
20101            b.1,
20102            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20103        )
20104    };
20105    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
20106    unsafe {
20107        let ret_val: uint8x8_t = transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c));
20108        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20109    }
20110}
20111#[doc = "Extended table look-up"]
20112#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_u8)"]
20113#[inline]
20114#[cfg(target_endian = "little")]
20115#[target_feature(enable = "neon")]
20116#[cfg_attr(test, assert_instr(tbx))]
20117#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20118pub fn vqtbx2q_u8(a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8x16_t {
20119    unsafe { transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) }
20120}
20121#[doc = "Extended table look-up"]
20122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_u8)"]
20123#[inline]
20124#[cfg(target_endian = "big")]
20125#[target_feature(enable = "neon")]
20126#[cfg_attr(test, assert_instr(tbx))]
20127#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20128pub fn vqtbx2q_u8(a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8x16_t {
20129    let mut b: uint8x16x2_t = b;
20130    let a: uint8x16_t =
20131        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20132    b.0 = unsafe {
20133        simd_shuffle!(
20134            b.0,
20135            b.0,
20136            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20137        )
20138    };
20139    b.1 = unsafe {
20140        simd_shuffle!(
20141            b.1,
20142            b.1,
20143            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20144        )
20145    };
20146    let c: uint8x16_t =
20147        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20148    unsafe {
20149        let ret_val: uint8x16_t =
20150            transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c));
20151        simd_shuffle!(
20152            ret_val,
20153            ret_val,
20154            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20155        )
20156    }
20157}
20158#[doc = "Extended table look-up"]
20159#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_p8)"]
20160#[inline]
20161#[cfg(target_endian = "little")]
20162#[target_feature(enable = "neon")]
20163#[cfg_attr(test, assert_instr(tbx))]
20164#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20165pub fn vqtbx2_p8(a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_t {
20166    unsafe { transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) }
20167}
20168#[doc = "Extended table look-up"]
20169#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_p8)"]
20170#[inline]
20171#[cfg(target_endian = "big")]
20172#[target_feature(enable = "neon")]
20173#[cfg_attr(test, assert_instr(tbx))]
20174#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20175pub fn vqtbx2_p8(a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_t {
20176    let mut b: poly8x16x2_t = b;
20177    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20178    b.0 = unsafe {
20179        simd_shuffle!(
20180            b.0,
20181            b.0,
20182            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20183        )
20184    };
20185    b.1 = unsafe {
20186        simd_shuffle!(
20187            b.1,
20188            b.1,
20189            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20190        )
20191    };
20192    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
20193    unsafe {
20194        let ret_val: poly8x8_t = transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c));
20195        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20196    }
20197}
20198#[doc = "Extended table look-up"]
20199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_p8)"]
20200#[inline]
20201#[cfg(target_endian = "little")]
20202#[target_feature(enable = "neon")]
20203#[cfg_attr(test, assert_instr(tbx))]
20204#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20205pub fn vqtbx2q_p8(a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8x16_t {
20206    unsafe { transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) }
20207}
20208#[doc = "Extended table look-up"]
20209#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_p8)"]
20210#[inline]
20211#[cfg(target_endian = "big")]
20212#[target_feature(enable = "neon")]
20213#[cfg_attr(test, assert_instr(tbx))]
20214#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20215pub fn vqtbx2q_p8(a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8x16_t {
20216    let mut b: poly8x16x2_t = b;
20217    let a: poly8x16_t =
20218        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20219    b.0 = unsafe {
20220        simd_shuffle!(
20221            b.0,
20222            b.0,
20223            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20224        )
20225    };
20226    b.1 = unsafe {
20227        simd_shuffle!(
20228            b.1,
20229            b.1,
20230            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20231        )
20232    };
20233    let c: uint8x16_t =
20234        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20235    unsafe {
20236        let ret_val: poly8x16_t =
20237            transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c));
20238        simd_shuffle!(
20239            ret_val,
20240            ret_val,
20241            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20242        )
20243    }
20244}
20245#[doc = "Extended table look-up"]
20246#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3)"]
20247#[inline]
20248#[target_feature(enable = "neon")]
20249#[cfg_attr(test, assert_instr(tbx))]
20250#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20251fn vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t {
20252    unsafe extern "unadjusted" {
20253        #[cfg_attr(
20254            any(target_arch = "aarch64", target_arch = "arm64ec"),
20255            link_name = "llvm.aarch64.neon.tbx3.v8i8"
20256        )]
20257        fn _vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t)
20258            -> int8x8_t;
20259    }
20260    unsafe { _vqtbx3(a, b, c, d, e) }
20261}
20262#[doc = "Extended table look-up"]
20263#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q)"]
20264#[inline]
20265#[target_feature(enable = "neon")]
20266#[cfg_attr(test, assert_instr(tbx))]
20267#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20268fn vqtbx3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x16_t) -> int8x16_t {
20269    unsafe extern "unadjusted" {
20270        #[cfg_attr(
20271            any(target_arch = "aarch64", target_arch = "arm64ec"),
20272            link_name = "llvm.aarch64.neon.tbx3.v16i8"
20273        )]
20274        fn _vqtbx3q(
20275            a: int8x16_t,
20276            b: int8x16_t,
20277            c: int8x16_t,
20278            d: int8x16_t,
20279            e: uint8x16_t,
20280        ) -> int8x16_t;
20281    }
20282    unsafe { _vqtbx3q(a, b, c, d, e) }
20283}
20284#[doc = "Extended table look-up"]
20285#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_s8)"]
20286#[inline]
20287#[target_feature(enable = "neon")]
20288#[cfg_attr(test, assert_instr(tbx))]
20289#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20290pub fn vqtbx3_s8(a: int8x8_t, b: int8x16x3_t, c: uint8x8_t) -> int8x8_t {
20291    vqtbx3(a, b.0, b.1, b.2, c)
20292}
20293#[doc = "Extended table look-up"]
20294#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_s8)"]
20295#[inline]
20296#[target_feature(enable = "neon")]
20297#[cfg_attr(test, assert_instr(tbx))]
20298#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20299pub fn vqtbx3q_s8(a: int8x16_t, b: int8x16x3_t, c: uint8x16_t) -> int8x16_t {
20300    vqtbx3q(a, b.0, b.1, b.2, c)
20301}
20302#[doc = "Extended table look-up"]
20303#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_u8)"]
20304#[inline]
20305#[cfg(target_endian = "little")]
20306#[target_feature(enable = "neon")]
20307#[cfg_attr(test, assert_instr(tbx))]
20308#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20309pub fn vqtbx3_u8(a: uint8x8_t, b: uint8x16x3_t, c: uint8x8_t) -> uint8x8_t {
20310    unsafe {
20311        transmute(vqtbx3(
20312            transmute(a),
20313            transmute(b.0),
20314            transmute(b.1),
20315            transmute(b.2),
20316            c,
20317        ))
20318    }
20319}
20320#[doc = "Extended table look-up"]
20321#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_u8)"]
20322#[inline]
20323#[cfg(target_endian = "big")]
20324#[target_feature(enable = "neon")]
20325#[cfg_attr(test, assert_instr(tbx))]
20326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20327pub fn vqtbx3_u8(a: uint8x8_t, b: uint8x16x3_t, c: uint8x8_t) -> uint8x8_t {
20328    let mut b: uint8x16x3_t = b;
20329    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20330    b.0 = unsafe {
20331        simd_shuffle!(
20332            b.0,
20333            b.0,
20334            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20335        )
20336    };
20337    b.1 = unsafe {
20338        simd_shuffle!(
20339            b.1,
20340            b.1,
20341            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20342        )
20343    };
20344    b.2 = unsafe {
20345        simd_shuffle!(
20346            b.2,
20347            b.2,
20348            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20349        )
20350    };
20351    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
20352    unsafe {
20353        let ret_val: uint8x8_t = transmute(vqtbx3(
20354            transmute(a),
20355            transmute(b.0),
20356            transmute(b.1),
20357            transmute(b.2),
20358            c,
20359        ));
20360        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20361    }
20362}
20363#[doc = "Extended table look-up"]
20364#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_u8)"]
20365#[inline]
20366#[cfg(target_endian = "little")]
20367#[target_feature(enable = "neon")]
20368#[cfg_attr(test, assert_instr(tbx))]
20369#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20370pub fn vqtbx3q_u8(a: uint8x16_t, b: uint8x16x3_t, c: uint8x16_t) -> uint8x16_t {
20371    unsafe {
20372        transmute(vqtbx3q(
20373            transmute(a),
20374            transmute(b.0),
20375            transmute(b.1),
20376            transmute(b.2),
20377            c,
20378        ))
20379    }
20380}
20381#[doc = "Extended table look-up"]
20382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_u8)"]
20383#[inline]
20384#[cfg(target_endian = "big")]
20385#[target_feature(enable = "neon")]
20386#[cfg_attr(test, assert_instr(tbx))]
20387#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20388pub fn vqtbx3q_u8(a: uint8x16_t, b: uint8x16x3_t, c: uint8x16_t) -> uint8x16_t {
20389    let mut b: uint8x16x3_t = b;
20390    let a: uint8x16_t =
20391        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20392    b.0 = unsafe {
20393        simd_shuffle!(
20394            b.0,
20395            b.0,
20396            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20397        )
20398    };
20399    b.1 = unsafe {
20400        simd_shuffle!(
20401            b.1,
20402            b.1,
20403            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20404        )
20405    };
20406    b.2 = unsafe {
20407        simd_shuffle!(
20408            b.2,
20409            b.2,
20410            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20411        )
20412    };
20413    let c: uint8x16_t =
20414        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20415    unsafe {
20416        let ret_val: uint8x16_t = transmute(vqtbx3q(
20417            transmute(a),
20418            transmute(b.0),
20419            transmute(b.1),
20420            transmute(b.2),
20421            c,
20422        ));
20423        simd_shuffle!(
20424            ret_val,
20425            ret_val,
20426            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20427        )
20428    }
20429}
20430#[doc = "Extended table look-up"]
20431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_p8)"]
20432#[inline]
20433#[cfg(target_endian = "little")]
20434#[target_feature(enable = "neon")]
20435#[cfg_attr(test, assert_instr(tbx))]
20436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20437pub fn vqtbx3_p8(a: poly8x8_t, b: poly8x16x3_t, c: uint8x8_t) -> poly8x8_t {
20438    unsafe {
20439        transmute(vqtbx3(
20440            transmute(a),
20441            transmute(b.0),
20442            transmute(b.1),
20443            transmute(b.2),
20444            c,
20445        ))
20446    }
20447}
20448#[doc = "Extended table look-up"]
20449#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_p8)"]
20450#[inline]
20451#[cfg(target_endian = "big")]
20452#[target_feature(enable = "neon")]
20453#[cfg_attr(test, assert_instr(tbx))]
20454#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20455pub fn vqtbx3_p8(a: poly8x8_t, b: poly8x16x3_t, c: uint8x8_t) -> poly8x8_t {
20456    let mut b: poly8x16x3_t = b;
20457    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20458    b.0 = unsafe {
20459        simd_shuffle!(
20460            b.0,
20461            b.0,
20462            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20463        )
20464    };
20465    b.1 = unsafe {
20466        simd_shuffle!(
20467            b.1,
20468            b.1,
20469            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20470        )
20471    };
20472    b.2 = unsafe {
20473        simd_shuffle!(
20474            b.2,
20475            b.2,
20476            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20477        )
20478    };
20479    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
20480    unsafe {
20481        let ret_val: poly8x8_t = transmute(vqtbx3(
20482            transmute(a),
20483            transmute(b.0),
20484            transmute(b.1),
20485            transmute(b.2),
20486            c,
20487        ));
20488        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20489    }
20490}
20491#[doc = "Extended table look-up"]
20492#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_p8)"]
20493#[inline]
20494#[cfg(target_endian = "little")]
20495#[target_feature(enable = "neon")]
20496#[cfg_attr(test, assert_instr(tbx))]
20497#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20498pub fn vqtbx3q_p8(a: poly8x16_t, b: poly8x16x3_t, c: uint8x16_t) -> poly8x16_t {
20499    unsafe {
20500        transmute(vqtbx3q(
20501            transmute(a),
20502            transmute(b.0),
20503            transmute(b.1),
20504            transmute(b.2),
20505            c,
20506        ))
20507    }
20508}
20509#[doc = "Extended table look-up"]
20510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_p8)"]
20511#[inline]
20512#[cfg(target_endian = "big")]
20513#[target_feature(enable = "neon")]
20514#[cfg_attr(test, assert_instr(tbx))]
20515#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20516pub fn vqtbx3q_p8(a: poly8x16_t, b: poly8x16x3_t, c: uint8x16_t) -> poly8x16_t {
20517    let mut b: poly8x16x3_t = b;
20518    let a: poly8x16_t =
20519        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20520    b.0 = unsafe {
20521        simd_shuffle!(
20522            b.0,
20523            b.0,
20524            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20525        )
20526    };
20527    b.1 = unsafe {
20528        simd_shuffle!(
20529            b.1,
20530            b.1,
20531            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20532        )
20533    };
20534    b.2 = unsafe {
20535        simd_shuffle!(
20536            b.2,
20537            b.2,
20538            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20539        )
20540    };
20541    let c: uint8x16_t =
20542        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20543    unsafe {
20544        let ret_val: poly8x16_t = transmute(vqtbx3q(
20545            transmute(a),
20546            transmute(b.0),
20547            transmute(b.1),
20548            transmute(b.2),
20549            c,
20550        ));
20551        simd_shuffle!(
20552            ret_val,
20553            ret_val,
20554            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20555        )
20556    }
20557}
20558#[doc = "Extended table look-up"]
20559#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4)"]
20560#[inline]
20561#[target_feature(enable = "neon")]
20562#[cfg_attr(test, assert_instr(tbx))]
20563#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20564fn vqtbx4(
20565    a: int8x8_t,
20566    b: int8x16_t,
20567    c: int8x16_t,
20568    d: int8x16_t,
20569    e: int8x16_t,
20570    f: uint8x8_t,
20571) -> int8x8_t {
20572    unsafe extern "unadjusted" {
20573        #[cfg_attr(
20574            any(target_arch = "aarch64", target_arch = "arm64ec"),
20575            link_name = "llvm.aarch64.neon.tbx4.v8i8"
20576        )]
20577        fn _vqtbx4(
20578            a: int8x8_t,
20579            b: int8x16_t,
20580            c: int8x16_t,
20581            d: int8x16_t,
20582            e: int8x16_t,
20583            f: uint8x8_t,
20584        ) -> int8x8_t;
20585    }
20586    unsafe { _vqtbx4(a, b, c, d, e, f) }
20587}
20588#[doc = "Extended table look-up"]
20589#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q)"]
20590#[inline]
20591#[target_feature(enable = "neon")]
20592#[cfg_attr(test, assert_instr(tbx))]
20593#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20594fn vqtbx4q(
20595    a: int8x16_t,
20596    b: int8x16_t,
20597    c: int8x16_t,
20598    d: int8x16_t,
20599    e: int8x16_t,
20600    f: uint8x16_t,
20601) -> int8x16_t {
20602    unsafe extern "unadjusted" {
20603        #[cfg_attr(
20604            any(target_arch = "aarch64", target_arch = "arm64ec"),
20605            link_name = "llvm.aarch64.neon.tbx4.v16i8"
20606        )]
20607        fn _vqtbx4q(
20608            a: int8x16_t,
20609            b: int8x16_t,
20610            c: int8x16_t,
20611            d: int8x16_t,
20612            e: int8x16_t,
20613            f: uint8x16_t,
20614        ) -> int8x16_t;
20615    }
20616    unsafe { _vqtbx4q(a, b, c, d, e, f) }
20617}
20618#[doc = "Extended table look-up"]
20619#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_s8)"]
20620#[inline]
20621#[target_feature(enable = "neon")]
20622#[cfg_attr(test, assert_instr(tbx))]
20623#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20624pub fn vqtbx4_s8(a: int8x8_t, b: int8x16x4_t, c: uint8x8_t) -> int8x8_t {
20625    vqtbx4(a, b.0, b.1, b.2, b.3, c)
20626}
20627#[doc = "Extended table look-up"]
20628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_s8)"]
20629#[inline]
20630#[target_feature(enable = "neon")]
20631#[cfg_attr(test, assert_instr(tbx))]
20632#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20633pub fn vqtbx4q_s8(a: int8x16_t, b: int8x16x4_t, c: uint8x16_t) -> int8x16_t {
20634    vqtbx4q(a, b.0, b.1, b.2, b.3, c)
20635}
20636#[doc = "Extended table look-up"]
20637#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_u8)"]
20638#[inline]
20639#[cfg(target_endian = "little")]
20640#[target_feature(enable = "neon")]
20641#[cfg_attr(test, assert_instr(tbx))]
20642#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20643pub fn vqtbx4_u8(a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_t {
20644    unsafe {
20645        transmute(vqtbx4(
20646            transmute(a),
20647            transmute(b.0),
20648            transmute(b.1),
20649            transmute(b.2),
20650            transmute(b.3),
20651            c,
20652        ))
20653    }
20654}
20655#[doc = "Extended table look-up"]
20656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_u8)"]
20657#[inline]
20658#[cfg(target_endian = "big")]
20659#[target_feature(enable = "neon")]
20660#[cfg_attr(test, assert_instr(tbx))]
20661#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20662pub fn vqtbx4_u8(a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_t {
20663    let mut b: uint8x16x4_t = b;
20664    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20665    b.0 = unsafe {
20666        simd_shuffle!(
20667            b.0,
20668            b.0,
20669            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20670        )
20671    };
20672    b.1 = unsafe {
20673        simd_shuffle!(
20674            b.1,
20675            b.1,
20676            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20677        )
20678    };
20679    b.2 = unsafe {
20680        simd_shuffle!(
20681            b.2,
20682            b.2,
20683            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20684        )
20685    };
20686    b.3 = unsafe {
20687        simd_shuffle!(
20688            b.3,
20689            b.3,
20690            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20691        )
20692    };
20693    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
20694    unsafe {
20695        let ret_val: uint8x8_t = transmute(vqtbx4(
20696            transmute(a),
20697            transmute(b.0),
20698            transmute(b.1),
20699            transmute(b.2),
20700            transmute(b.3),
20701            c,
20702        ));
20703        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20704    }
20705}
20706#[doc = "Extended table look-up"]
20707#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_u8)"]
20708#[inline]
20709#[cfg(target_endian = "little")]
20710#[target_feature(enable = "neon")]
20711#[cfg_attr(test, assert_instr(tbx))]
20712#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20713pub fn vqtbx4q_u8(a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8x16_t {
20714    unsafe {
20715        transmute(vqtbx4q(
20716            transmute(a),
20717            transmute(b.0),
20718            transmute(b.1),
20719            transmute(b.2),
20720            transmute(b.3),
20721            c,
20722        ))
20723    }
20724}
20725#[doc = "Extended table look-up"]
20726#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_u8)"]
20727#[inline]
20728#[cfg(target_endian = "big")]
20729#[target_feature(enable = "neon")]
20730#[cfg_attr(test, assert_instr(tbx))]
20731#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20732pub fn vqtbx4q_u8(a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8x16_t {
20733    let mut b: uint8x16x4_t = b;
20734    let a: uint8x16_t =
20735        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20736    b.0 = unsafe {
20737        simd_shuffle!(
20738            b.0,
20739            b.0,
20740            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20741        )
20742    };
20743    b.1 = unsafe {
20744        simd_shuffle!(
20745            b.1,
20746            b.1,
20747            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20748        )
20749    };
20750    b.2 = unsafe {
20751        simd_shuffle!(
20752            b.2,
20753            b.2,
20754            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20755        )
20756    };
20757    b.3 = unsafe {
20758        simd_shuffle!(
20759            b.3,
20760            b.3,
20761            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20762        )
20763    };
20764    let c: uint8x16_t =
20765        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20766    unsafe {
20767        let ret_val: uint8x16_t = transmute(vqtbx4q(
20768            transmute(a),
20769            transmute(b.0),
20770            transmute(b.1),
20771            transmute(b.2),
20772            transmute(b.3),
20773            c,
20774        ));
20775        simd_shuffle!(
20776            ret_val,
20777            ret_val,
20778            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20779        )
20780    }
20781}
20782#[doc = "Extended table look-up"]
20783#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_p8)"]
20784#[inline]
20785#[cfg(target_endian = "little")]
20786#[target_feature(enable = "neon")]
20787#[cfg_attr(test, assert_instr(tbx))]
20788#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20789pub fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_t {
20790    unsafe {
20791        transmute(vqtbx4(
20792            transmute(a),
20793            transmute(b.0),
20794            transmute(b.1),
20795            transmute(b.2),
20796            transmute(b.3),
20797            c,
20798        ))
20799    }
20800}
20801#[doc = "Extended table look-up"]
20802#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_p8)"]
20803#[inline]
20804#[cfg(target_endian = "big")]
20805#[target_feature(enable = "neon")]
20806#[cfg_attr(test, assert_instr(tbx))]
20807#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20808pub fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_t {
20809    let mut b: poly8x16x4_t = b;
20810    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20811    b.0 = unsafe {
20812        simd_shuffle!(
20813            b.0,
20814            b.0,
20815            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20816        )
20817    };
20818    b.1 = unsafe {
20819        simd_shuffle!(
20820            b.1,
20821            b.1,
20822            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20823        )
20824    };
20825    b.2 = unsafe {
20826        simd_shuffle!(
20827            b.2,
20828            b.2,
20829            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20830        )
20831    };
20832    b.3 = unsafe {
20833        simd_shuffle!(
20834            b.3,
20835            b.3,
20836            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20837        )
20838    };
20839    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
20840    unsafe {
20841        let ret_val: poly8x8_t = transmute(vqtbx4(
20842            transmute(a),
20843            transmute(b.0),
20844            transmute(b.1),
20845            transmute(b.2),
20846            transmute(b.3),
20847            c,
20848        ));
20849        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20850    }
20851}
20852#[doc = "Extended table look-up"]
20853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_p8)"]
20854#[inline]
20855#[cfg(target_endian = "little")]
20856#[target_feature(enable = "neon")]
20857#[cfg_attr(test, assert_instr(tbx))]
20858#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20859pub fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8x16_t {
20860    unsafe {
20861        transmute(vqtbx4q(
20862            transmute(a),
20863            transmute(b.0),
20864            transmute(b.1),
20865            transmute(b.2),
20866            transmute(b.3),
20867            c,
20868        ))
20869    }
20870}
20871#[doc = "Extended table look-up"]
20872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_p8)"]
20873#[inline]
20874#[cfg(target_endian = "big")]
20875#[target_feature(enable = "neon")]
20876#[cfg_attr(test, assert_instr(tbx))]
20877#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20878pub fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8x16_t {
20879    let mut b: poly8x16x4_t = b;
20880    let a: poly8x16_t =
20881        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20882    b.0 = unsafe {
20883        simd_shuffle!(
20884            b.0,
20885            b.0,
20886            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20887        )
20888    };
20889    b.1 = unsafe {
20890        simd_shuffle!(
20891            b.1,
20892            b.1,
20893            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20894        )
20895    };
20896    b.2 = unsafe {
20897        simd_shuffle!(
20898            b.2,
20899            b.2,
20900            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20901        )
20902    };
20903    b.3 = unsafe {
20904        simd_shuffle!(
20905            b.3,
20906            b.3,
20907            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20908        )
20909    };
20910    let c: uint8x16_t =
20911        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20912    unsafe {
20913        let ret_val: poly8x16_t = transmute(vqtbx4q(
20914            transmute(a),
20915            transmute(b.0),
20916            transmute(b.1),
20917            transmute(b.2),
20918            transmute(b.3),
20919            c,
20920        ));
20921        simd_shuffle!(
20922            ret_val,
20923            ret_val,
20924            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20925        )
20926    }
20927}
20928#[doc = "Rotate and exclusive OR"]
20929#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrax1q_u64)"]
20930#[inline]
20931#[target_feature(enable = "neon,sha3")]
20932#[cfg_attr(test, assert_instr(rax1))]
20933#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
20934pub fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
20935    unsafe extern "unadjusted" {
20936        #[cfg_attr(
20937            any(target_arch = "aarch64", target_arch = "arm64ec"),
20938            link_name = "llvm.aarch64.crypto.rax1"
20939        )]
20940        fn _vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t;
20941    }
20942    unsafe { _vrax1q_u64(a, b) }
20943}
20944#[doc = "Reverse bit order"]
20945#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_s8)"]
20946#[inline]
20947#[target_feature(enable = "neon")]
20948#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20949#[cfg_attr(test, assert_instr(rbit))]
20950pub fn vrbit_s8(a: int8x8_t) -> int8x8_t {
20951    unsafe { simd_bitreverse(a) }
20952}
20953#[doc = "Reverse bit order"]
20954#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_s8)"]
20955#[inline]
20956#[target_feature(enable = "neon")]
20957#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20958#[cfg_attr(test, assert_instr(rbit))]
20959pub fn vrbitq_s8(a: int8x16_t) -> int8x16_t {
20960    unsafe { simd_bitreverse(a) }
20961}
20962#[doc = "Reverse bit order"]
20963#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"]
20964#[inline]
20965#[cfg(target_endian = "little")]
20966#[target_feature(enable = "neon")]
20967#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20968#[cfg_attr(test, assert_instr(rbit))]
20969pub fn vrbit_u8(a: uint8x8_t) -> uint8x8_t {
20970    unsafe { transmute(vrbit_s8(transmute(a))) }
20971}
20972#[doc = "Reverse bit order"]
20973#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"]
20974#[inline]
20975#[cfg(target_endian = "big")]
20976#[target_feature(enable = "neon")]
20977#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20978#[cfg_attr(test, assert_instr(rbit))]
20979pub fn vrbit_u8(a: uint8x8_t) -> uint8x8_t {
20980    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20981    unsafe {
20982        let ret_val: uint8x8_t = transmute(vrbit_s8(transmute(a)));
20983        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20984    }
20985}
20986#[doc = "Reverse bit order"]
20987#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"]
20988#[inline]
20989#[cfg(target_endian = "little")]
20990#[target_feature(enable = "neon")]
20991#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20992#[cfg_attr(test, assert_instr(rbit))]
20993pub fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t {
20994    unsafe { transmute(vrbitq_s8(transmute(a))) }
20995}
20996#[doc = "Reverse bit order"]
20997#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"]
20998#[inline]
20999#[cfg(target_endian = "big")]
21000#[target_feature(enable = "neon")]
21001#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21002#[cfg_attr(test, assert_instr(rbit))]
21003pub fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t {
21004    let a: uint8x16_t =
21005        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21006    unsafe {
21007        let ret_val: uint8x16_t = transmute(vrbitq_s8(transmute(a)));
21008        simd_shuffle!(
21009            ret_val,
21010            ret_val,
21011            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21012        )
21013    }
21014}
21015#[doc = "Reverse bit order"]
21016#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"]
21017#[inline]
21018#[cfg(target_endian = "little")]
21019#[target_feature(enable = "neon")]
21020#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21021#[cfg_attr(test, assert_instr(rbit))]
21022pub fn vrbit_p8(a: poly8x8_t) -> poly8x8_t {
21023    unsafe { transmute(vrbit_s8(transmute(a))) }
21024}
21025#[doc = "Reverse bit order"]
21026#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"]
21027#[inline]
21028#[cfg(target_endian = "big")]
21029#[target_feature(enable = "neon")]
21030#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21031#[cfg_attr(test, assert_instr(rbit))]
21032pub fn vrbit_p8(a: poly8x8_t) -> poly8x8_t {
21033    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21034    unsafe {
21035        let ret_val: poly8x8_t = transmute(vrbit_s8(transmute(a)));
21036        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21037    }
21038}
21039#[doc = "Reverse bit order"]
21040#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"]
21041#[inline]
21042#[cfg(target_endian = "little")]
21043#[target_feature(enable = "neon")]
21044#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21045#[cfg_attr(test, assert_instr(rbit))]
21046pub fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t {
21047    unsafe { transmute(vrbitq_s8(transmute(a))) }
21048}
21049#[doc = "Reverse bit order"]
21050#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"]
21051#[inline]
21052#[cfg(target_endian = "big")]
21053#[target_feature(enable = "neon")]
21054#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21055#[cfg_attr(test, assert_instr(rbit))]
21056pub fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t {
21057    let a: poly8x16_t =
21058        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21059    unsafe {
21060        let ret_val: poly8x16_t = transmute(vrbitq_s8(transmute(a)));
21061        simd_shuffle!(
21062            ret_val,
21063            ret_val,
21064            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21065        )
21066    }
21067}
21068#[doc = "Reciprocal estimate."]
21069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f64)"]
21070#[inline]
21071#[target_feature(enable = "neon")]
21072#[cfg_attr(test, assert_instr(frecpe))]
21073#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21074pub fn vrecpe_f64(a: float64x1_t) -> float64x1_t {
21075    unsafe extern "unadjusted" {
21076        #[cfg_attr(
21077            any(target_arch = "aarch64", target_arch = "arm64ec"),
21078            link_name = "llvm.aarch64.neon.frecpe.v1f64"
21079        )]
21080        fn _vrecpe_f64(a: float64x1_t) -> float64x1_t;
21081    }
21082    unsafe { _vrecpe_f64(a) }
21083}
21084#[doc = "Reciprocal estimate."]
21085#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f64)"]
21086#[inline]
21087#[target_feature(enable = "neon")]
21088#[cfg_attr(test, assert_instr(frecpe))]
21089#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21090pub fn vrecpeq_f64(a: float64x2_t) -> float64x2_t {
21091    unsafe extern "unadjusted" {
21092        #[cfg_attr(
21093            any(target_arch = "aarch64", target_arch = "arm64ec"),
21094            link_name = "llvm.aarch64.neon.frecpe.v2f64"
21095        )]
21096        fn _vrecpeq_f64(a: float64x2_t) -> float64x2_t;
21097    }
21098    unsafe { _vrecpeq_f64(a) }
21099}
21100#[doc = "Reciprocal estimate."]
21101#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecped_f64)"]
21102#[inline]
21103#[target_feature(enable = "neon")]
21104#[cfg_attr(test, assert_instr(frecpe))]
21105#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21106pub fn vrecped_f64(a: f64) -> f64 {
21107    unsafe extern "unadjusted" {
21108        #[cfg_attr(
21109            any(target_arch = "aarch64", target_arch = "arm64ec"),
21110            link_name = "llvm.aarch64.neon.frecpe.f64"
21111        )]
21112        fn _vrecped_f64(a: f64) -> f64;
21113    }
21114    unsafe { _vrecped_f64(a) }
21115}
21116#[doc = "Reciprocal estimate."]
21117#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpes_f32)"]
21118#[inline]
21119#[target_feature(enable = "neon")]
21120#[cfg_attr(test, assert_instr(frecpe))]
21121#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21122pub fn vrecpes_f32(a: f32) -> f32 {
21123    unsafe extern "unadjusted" {
21124        #[cfg_attr(
21125            any(target_arch = "aarch64", target_arch = "arm64ec"),
21126            link_name = "llvm.aarch64.neon.frecpe.f32"
21127        )]
21128        fn _vrecpes_f32(a: f32) -> f32;
21129    }
21130    unsafe { _vrecpes_f32(a) }
21131}
21132#[doc = "Reciprocal estimate."]
21133#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeh_f16)"]
21134#[inline]
21135#[cfg_attr(test, assert_instr(frecpe))]
21136#[target_feature(enable = "neon,fp16")]
21137#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21138pub fn vrecpeh_f16(a: f16) -> f16 {
21139    unsafe extern "unadjusted" {
21140        #[cfg_attr(
21141            any(target_arch = "aarch64", target_arch = "arm64ec"),
21142            link_name = "llvm.aarch64.neon.frecpe.f16"
21143        )]
21144        fn _vrecpeh_f16(a: f16) -> f16;
21145    }
21146    unsafe { _vrecpeh_f16(a) }
21147}
21148#[doc = "Floating-point reciprocal step"]
21149#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f64)"]
21150#[inline]
21151#[target_feature(enable = "neon")]
21152#[cfg_attr(test, assert_instr(frecps))]
21153#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21154pub fn vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
21155    unsafe extern "unadjusted" {
21156        #[cfg_attr(
21157            any(target_arch = "aarch64", target_arch = "arm64ec"),
21158            link_name = "llvm.aarch64.neon.frecps.v1f64"
21159        )]
21160        fn _vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
21161    }
21162    unsafe { _vrecps_f64(a, b) }
21163}
21164#[doc = "Floating-point reciprocal step"]
21165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f64)"]
21166#[inline]
21167#[target_feature(enable = "neon")]
21168#[cfg_attr(test, assert_instr(frecps))]
21169#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21170pub fn vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
21171    unsafe extern "unadjusted" {
21172        #[cfg_attr(
21173            any(target_arch = "aarch64", target_arch = "arm64ec"),
21174            link_name = "llvm.aarch64.neon.frecps.v2f64"
21175        )]
21176        fn _vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
21177    }
21178    unsafe { _vrecpsq_f64(a, b) }
21179}
21180#[doc = "Floating-point reciprocal step"]
21181#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsd_f64)"]
21182#[inline]
21183#[target_feature(enable = "neon")]
21184#[cfg_attr(test, assert_instr(frecps))]
21185#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21186pub fn vrecpsd_f64(a: f64, b: f64) -> f64 {
21187    unsafe extern "unadjusted" {
21188        #[cfg_attr(
21189            any(target_arch = "aarch64", target_arch = "arm64ec"),
21190            link_name = "llvm.aarch64.neon.frecps.f64"
21191        )]
21192        fn _vrecpsd_f64(a: f64, b: f64) -> f64;
21193    }
21194    unsafe { _vrecpsd_f64(a, b) }
21195}
21196#[doc = "Floating-point reciprocal step"]
21197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpss_f32)"]
21198#[inline]
21199#[target_feature(enable = "neon")]
21200#[cfg_attr(test, assert_instr(frecps))]
21201#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21202pub fn vrecpss_f32(a: f32, b: f32) -> f32 {
21203    unsafe extern "unadjusted" {
21204        #[cfg_attr(
21205            any(target_arch = "aarch64", target_arch = "arm64ec"),
21206            link_name = "llvm.aarch64.neon.frecps.f32"
21207        )]
21208        fn _vrecpss_f32(a: f32, b: f32) -> f32;
21209    }
21210    unsafe { _vrecpss_f32(a, b) }
21211}
21212#[doc = "Floating-point reciprocal step"]
21213#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsh_f16)"]
21214#[inline]
21215#[cfg_attr(test, assert_instr(frecps))]
21216#[target_feature(enable = "neon,fp16")]
21217#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21218pub fn vrecpsh_f16(a: f16, b: f16) -> f16 {
21219    unsafe extern "unadjusted" {
21220        #[cfg_attr(
21221            any(target_arch = "aarch64", target_arch = "arm64ec"),
21222            link_name = "llvm.aarch64.neon.frecps.f16"
21223        )]
21224        fn _vrecpsh_f16(a: f16, b: f16) -> f16;
21225    }
21226    unsafe { _vrecpsh_f16(a, b) }
21227}
21228#[doc = "Floating-point reciprocal exponent"]
21229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxd_f64)"]
21230#[inline]
21231#[target_feature(enable = "neon")]
21232#[cfg_attr(test, assert_instr(frecpx))]
21233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21234pub fn vrecpxd_f64(a: f64) -> f64 {
21235    unsafe extern "unadjusted" {
21236        #[cfg_attr(
21237            any(target_arch = "aarch64", target_arch = "arm64ec"),
21238            link_name = "llvm.aarch64.neon.frecpx.f64"
21239        )]
21240        fn _vrecpxd_f64(a: f64) -> f64;
21241    }
21242    unsafe { _vrecpxd_f64(a) }
21243}
21244#[doc = "Floating-point reciprocal exponent"]
21245#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxs_f32)"]
21246#[inline]
21247#[target_feature(enable = "neon")]
21248#[cfg_attr(test, assert_instr(frecpx))]
21249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21250pub fn vrecpxs_f32(a: f32) -> f32 {
21251    unsafe extern "unadjusted" {
21252        #[cfg_attr(
21253            any(target_arch = "aarch64", target_arch = "arm64ec"),
21254            link_name = "llvm.aarch64.neon.frecpx.f32"
21255        )]
21256        fn _vrecpxs_f32(a: f32) -> f32;
21257    }
21258    unsafe { _vrecpxs_f32(a) }
21259}
21260#[doc = "Floating-point reciprocal exponent"]
21261#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxh_f16)"]
21262#[inline]
21263#[cfg_attr(test, assert_instr(frecpx))]
21264#[target_feature(enable = "neon,fp16")]
21265#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21266pub fn vrecpxh_f16(a: f16) -> f16 {
21267    unsafe extern "unadjusted" {
21268        #[cfg_attr(
21269            any(target_arch = "aarch64", target_arch = "arm64ec"),
21270            link_name = "llvm.aarch64.neon.frecpx.f16"
21271        )]
21272        fn _vrecpxh_f16(a: f16) -> f16;
21273    }
21274    unsafe { _vrecpxh_f16(a) }
21275}
21276#[doc = "Vector reinterpret cast operation"]
21277#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f16)"]
21278#[inline]
21279#[cfg(target_endian = "little")]
21280#[target_feature(enable = "neon,fp16")]
21281#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21282#[cfg_attr(test, assert_instr(nop))]
21283pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t {
21284    unsafe { transmute(a) }
21285}
21286#[doc = "Vector reinterpret cast operation"]
21287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f16)"]
21288#[inline]
21289#[cfg(target_endian = "big")]
21290#[target_feature(enable = "neon,fp16")]
21291#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21292#[cfg_attr(test, assert_instr(nop))]
21293pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t {
21294    let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21295    unsafe { transmute(a) }
21296}
21297#[doc = "Vector reinterpret cast operation"]
21298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f16)"]
21299#[inline]
21300#[cfg(target_endian = "little")]
21301#[target_feature(enable = "neon,fp16")]
21302#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21303#[cfg_attr(test, assert_instr(nop))]
21304pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t {
21305    unsafe { transmute(a) }
21306}
21307#[doc = "Vector reinterpret cast operation"]
21308#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f16)"]
21309#[inline]
21310#[cfg(target_endian = "big")]
21311#[target_feature(enable = "neon,fp16")]
21312#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21313#[cfg_attr(test, assert_instr(nop))]
21314pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t {
21315    let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21316    unsafe {
21317        let ret_val: float64x2_t = transmute(a);
21318        simd_shuffle!(ret_val, ret_val, [1, 0])
21319    }
21320}
21321#[doc = "Vector reinterpret cast operation"]
21322#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f64)"]
21323#[inline]
21324#[cfg(target_endian = "little")]
21325#[target_feature(enable = "neon,fp16")]
21326#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21327#[cfg_attr(test, assert_instr(nop))]
21328pub fn vreinterpret_f16_f64(a: float64x1_t) -> float16x4_t {
21329    unsafe { transmute(a) }
21330}
21331#[doc = "Vector reinterpret cast operation"]
21332#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f64)"]
21333#[inline]
21334#[cfg(target_endian = "big")]
21335#[target_feature(enable = "neon,fp16")]
21336#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21337#[cfg_attr(test, assert_instr(nop))]
21338pub fn vreinterpret_f16_f64(a: float64x1_t) -> float16x4_t {
21339    unsafe {
21340        let ret_val: float16x4_t = transmute(a);
21341        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
21342    }
21343}
21344#[doc = "Vector reinterpret cast operation"]
21345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f64)"]
21346#[inline]
21347#[cfg(target_endian = "little")]
21348#[target_feature(enable = "neon,fp16")]
21349#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21350#[cfg_attr(test, assert_instr(nop))]
21351pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t {
21352    unsafe { transmute(a) }
21353}
21354#[doc = "Vector reinterpret cast operation"]
21355#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f64)"]
21356#[inline]
21357#[cfg(target_endian = "big")]
21358#[target_feature(enable = "neon,fp16")]
21359#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21360#[cfg_attr(test, assert_instr(nop))]
21361pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t {
21362    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21363    unsafe {
21364        let ret_val: float16x8_t = transmute(a);
21365        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21366    }
21367}
21368#[doc = "Vector reinterpret cast operation"]
21369#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"]
21370#[inline]
21371#[cfg(target_endian = "little")]
21372#[target_feature(enable = "neon")]
21373#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21374#[cfg_attr(test, assert_instr(nop))]
21375pub fn vreinterpretq_f64_p128(a: p128) -> float64x2_t {
21376    unsafe { transmute(a) }
21377}
21378#[doc = "Vector reinterpret cast operation"]
21379#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"]
21380#[inline]
21381#[cfg(target_endian = "big")]
21382#[target_feature(enable = "neon")]
21383#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21384#[cfg_attr(test, assert_instr(nop))]
21385pub fn vreinterpretq_f64_p128(a: p128) -> float64x2_t {
21386    unsafe {
21387        let ret_val: float64x2_t = transmute(a);
21388        simd_shuffle!(ret_val, ret_val, [1, 0])
21389    }
21390}
21391#[doc = "Vector reinterpret cast operation"]
21392#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"]
21393#[inline]
21394#[cfg(target_endian = "little")]
21395#[target_feature(enable = "neon")]
21396#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21397#[cfg_attr(test, assert_instr(nop))]
21398pub fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t {
21399    unsafe { transmute(a) }
21400}
21401#[doc = "Vector reinterpret cast operation"]
21402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"]
21403#[inline]
21404#[cfg(target_endian = "big")]
21405#[target_feature(enable = "neon")]
21406#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21407#[cfg_attr(test, assert_instr(nop))]
21408pub fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t {
21409    let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21410    unsafe { transmute(a) }
21411}
21412#[doc = "Vector reinterpret cast operation"]
21413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"]
21414#[inline]
21415#[cfg(target_endian = "little")]
21416#[target_feature(enable = "neon")]
21417#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21418#[cfg_attr(test, assert_instr(nop))]
21419pub fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t {
21420    unsafe { transmute(a) }
21421}
21422#[doc = "Vector reinterpret cast operation"]
21423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"]
21424#[inline]
21425#[cfg(target_endian = "big")]
21426#[target_feature(enable = "neon")]
21427#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21428#[cfg_attr(test, assert_instr(nop))]
21429pub fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t {
21430    let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21431    unsafe { transmute(a) }
21432}
21433#[doc = "Vector reinterpret cast operation"]
21434#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"]
21435#[inline]
21436#[cfg(target_endian = "little")]
21437#[target_feature(enable = "neon")]
21438#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21439#[cfg_attr(test, assert_instr(nop))]
21440pub fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t {
21441    unsafe { transmute(a) }
21442}
21443#[doc = "Vector reinterpret cast operation"]
21444#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"]
21445#[inline]
21446#[cfg(target_endian = "big")]
21447#[target_feature(enable = "neon")]
21448#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21449#[cfg_attr(test, assert_instr(nop))]
21450pub fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t {
21451    let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21452    unsafe {
21453        let ret_val: float64x2_t = transmute(a);
21454        simd_shuffle!(ret_val, ret_val, [1, 0])
21455    }
21456}
21457#[doc = "Vector reinterpret cast operation"]
21458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"]
21459#[inline]
21460#[cfg(target_endian = "little")]
21461#[target_feature(enable = "neon")]
21462#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21463#[cfg_attr(test, assert_instr(nop))]
21464pub fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t {
21465    unsafe { transmute(a) }
21466}
21467#[doc = "Vector reinterpret cast operation"]
21468#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"]
21469#[inline]
21470#[cfg(target_endian = "big")]
21471#[target_feature(enable = "neon")]
21472#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21473#[cfg_attr(test, assert_instr(nop))]
21474pub fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t {
21475    let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21476    unsafe {
21477        let ret_val: poly64x2_t = transmute(a);
21478        simd_shuffle!(ret_val, ret_val, [1, 0])
21479    }
21480}
21481#[doc = "Vector reinterpret cast operation"]
21482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"]
21483#[inline]
21484#[cfg(target_endian = "little")]
21485#[target_feature(enable = "neon")]
21486#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21487#[cfg_attr(test, assert_instr(nop))]
21488pub fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t {
21489    unsafe { transmute(a) }
21490}
21491#[doc = "Vector reinterpret cast operation"]
21492#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"]
21493#[inline]
21494#[cfg(target_endian = "big")]
21495#[target_feature(enable = "neon")]
21496#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21497#[cfg_attr(test, assert_instr(nop))]
21498pub fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t {
21499    unsafe {
21500        let ret_val: float32x2_t = transmute(a);
21501        simd_shuffle!(ret_val, ret_val, [1, 0])
21502    }
21503}
21504#[doc = "Vector reinterpret cast operation"]
21505#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"]
21506#[inline]
21507#[cfg(target_endian = "little")]
21508#[target_feature(enable = "neon")]
21509#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21510#[cfg_attr(test, assert_instr(nop))]
21511pub fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t {
21512    unsafe { transmute(a) }
21513}
21514#[doc = "Vector reinterpret cast operation"]
21515#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"]
21516#[inline]
21517#[cfg(target_endian = "big")]
21518#[target_feature(enable = "neon")]
21519#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21520#[cfg_attr(test, assert_instr(nop))]
21521pub fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t {
21522    unsafe {
21523        let ret_val: int8x8_t = transmute(a);
21524        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21525    }
21526}
21527#[doc = "Vector reinterpret cast operation"]
21528#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"]
21529#[inline]
21530#[cfg(target_endian = "little")]
21531#[target_feature(enable = "neon")]
21532#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21533#[cfg_attr(test, assert_instr(nop))]
21534pub fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t {
21535    unsafe { transmute(a) }
21536}
21537#[doc = "Vector reinterpret cast operation"]
21538#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"]
21539#[inline]
21540#[cfg(target_endian = "big")]
21541#[target_feature(enable = "neon")]
21542#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21543#[cfg_attr(test, assert_instr(nop))]
21544pub fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t {
21545    unsafe {
21546        let ret_val: int16x4_t = transmute(a);
21547        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
21548    }
21549}
21550#[doc = "Vector reinterpret cast operation"]
21551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"]
21552#[inline]
21553#[cfg(target_endian = "little")]
21554#[target_feature(enable = "neon")]
21555#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21556#[cfg_attr(test, assert_instr(nop))]
21557pub fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t {
21558    unsafe { transmute(a) }
21559}
21560#[doc = "Vector reinterpret cast operation"]
21561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"]
21562#[inline]
21563#[cfg(target_endian = "big")]
21564#[target_feature(enable = "neon")]
21565#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21566#[cfg_attr(test, assert_instr(nop))]
21567pub fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t {
21568    unsafe {
21569        let ret_val: int32x2_t = transmute(a);
21570        simd_shuffle!(ret_val, ret_val, [1, 0])
21571    }
21572}
21573#[doc = "Vector reinterpret cast operation"]
21574#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f64)"]
21575#[inline]
21576#[target_feature(enable = "neon")]
21577#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21578#[cfg_attr(test, assert_instr(nop))]
21579pub fn vreinterpret_s64_f64(a: float64x1_t) -> int64x1_t {
21580    unsafe { transmute(a) }
21581}
21582#[doc = "Vector reinterpret cast operation"]
21583#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"]
21584#[inline]
21585#[cfg(target_endian = "little")]
21586#[target_feature(enable = "neon")]
21587#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21588#[cfg_attr(test, assert_instr(nop))]
21589pub fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t {
21590    unsafe { transmute(a) }
21591}
21592#[doc = "Vector reinterpret cast operation"]
21593#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"]
21594#[inline]
21595#[cfg(target_endian = "big")]
21596#[target_feature(enable = "neon")]
21597#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21598#[cfg_attr(test, assert_instr(nop))]
21599pub fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t {
21600    unsafe {
21601        let ret_val: uint8x8_t = transmute(a);
21602        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21603    }
21604}
21605#[doc = "Vector reinterpret cast operation"]
21606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"]
21607#[inline]
21608#[cfg(target_endian = "little")]
21609#[target_feature(enable = "neon")]
21610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21611#[cfg_attr(test, assert_instr(nop))]
21612pub fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t {
21613    unsafe { transmute(a) }
21614}
21615#[doc = "Vector reinterpret cast operation"]
21616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"]
21617#[inline]
21618#[cfg(target_endian = "big")]
21619#[target_feature(enable = "neon")]
21620#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21621#[cfg_attr(test, assert_instr(nop))]
21622pub fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t {
21623    unsafe {
21624        let ret_val: uint16x4_t = transmute(a);
21625        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
21626    }
21627}
21628#[doc = "Vector reinterpret cast operation"]
21629#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"]
21630#[inline]
21631#[cfg(target_endian = "little")]
21632#[target_feature(enable = "neon")]
21633#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21634#[cfg_attr(test, assert_instr(nop))]
21635pub fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t {
21636    unsafe { transmute(a) }
21637}
21638#[doc = "Vector reinterpret cast operation"]
21639#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"]
21640#[inline]
21641#[cfg(target_endian = "big")]
21642#[target_feature(enable = "neon")]
21643#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21644#[cfg_attr(test, assert_instr(nop))]
21645pub fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t {
21646    unsafe {
21647        let ret_val: uint32x2_t = transmute(a);
21648        simd_shuffle!(ret_val, ret_val, [1, 0])
21649    }
21650}
21651#[doc = "Vector reinterpret cast operation"]
21652#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f64)"]
21653#[inline]
21654#[target_feature(enable = "neon")]
21655#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21656#[cfg_attr(test, assert_instr(nop))]
21657pub fn vreinterpret_u64_f64(a: float64x1_t) -> uint64x1_t {
21658    unsafe { transmute(a) }
21659}
21660#[doc = "Vector reinterpret cast operation"]
21661#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"]
21662#[inline]
21663#[cfg(target_endian = "little")]
21664#[target_feature(enable = "neon")]
21665#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21666#[cfg_attr(test, assert_instr(nop))]
21667pub fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t {
21668    unsafe { transmute(a) }
21669}
21670#[doc = "Vector reinterpret cast operation"]
21671#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"]
21672#[inline]
21673#[cfg(target_endian = "big")]
21674#[target_feature(enable = "neon")]
21675#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21676#[cfg_attr(test, assert_instr(nop))]
21677pub fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t {
21678    unsafe {
21679        let ret_val: poly8x8_t = transmute(a);
21680        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21681    }
21682}
21683#[doc = "Vector reinterpret cast operation"]
21684#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"]
21685#[inline]
21686#[cfg(target_endian = "little")]
21687#[target_feature(enable = "neon")]
21688#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21689#[cfg_attr(test, assert_instr(nop))]
21690pub fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t {
21691    unsafe { transmute(a) }
21692}
21693#[doc = "Vector reinterpret cast operation"]
21694#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"]
21695#[inline]
21696#[cfg(target_endian = "big")]
21697#[target_feature(enable = "neon")]
21698#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21699#[cfg_attr(test, assert_instr(nop))]
21700pub fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t {
21701    unsafe {
21702        let ret_val: poly16x4_t = transmute(a);
21703        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
21704    }
21705}
21706#[doc = "Vector reinterpret cast operation"]
21707#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f64)"]
21708#[inline]
21709#[target_feature(enable = "neon")]
21710#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21711#[cfg_attr(test, assert_instr(nop))]
21712pub fn vreinterpret_p64_f64(a: float64x1_t) -> poly64x1_t {
21713    unsafe { transmute(a) }
21714}
21715#[doc = "Vector reinterpret cast operation"]
21716#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"]
21717#[inline]
21718#[cfg(target_endian = "little")]
21719#[target_feature(enable = "neon")]
21720#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21721#[cfg_attr(test, assert_instr(nop))]
21722pub fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 {
21723    unsafe { transmute(a) }
21724}
21725#[doc = "Vector reinterpret cast operation"]
21726#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"]
21727#[inline]
21728#[cfg(target_endian = "big")]
21729#[target_feature(enable = "neon")]
21730#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21731#[cfg_attr(test, assert_instr(nop))]
21732pub fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 {
21733    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21734    unsafe { transmute(a) }
21735}
21736#[doc = "Vector reinterpret cast operation"]
21737#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"]
21738#[inline]
21739#[cfg(target_endian = "little")]
21740#[target_feature(enable = "neon")]
21741#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21742#[cfg_attr(test, assert_instr(nop))]
21743pub fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t {
21744    unsafe { transmute(a) }
21745}
21746#[doc = "Vector reinterpret cast operation"]
21747#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"]
21748#[inline]
21749#[cfg(target_endian = "big")]
21750#[target_feature(enable = "neon")]
21751#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21752#[cfg_attr(test, assert_instr(nop))]
21753pub fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t {
21754    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21755    unsafe {
21756        let ret_val: float32x4_t = transmute(a);
21757        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
21758    }
21759}
21760#[doc = "Vector reinterpret cast operation"]
21761#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"]
21762#[inline]
21763#[cfg(target_endian = "little")]
21764#[target_feature(enable = "neon")]
21765#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21766#[cfg_attr(test, assert_instr(nop))]
21767pub fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t {
21768    unsafe { transmute(a) }
21769}
21770#[doc = "Vector reinterpret cast operation"]
21771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"]
21772#[inline]
21773#[cfg(target_endian = "big")]
21774#[target_feature(enable = "neon")]
21775#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21776#[cfg_attr(test, assert_instr(nop))]
21777pub fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t {
21778    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21779    unsafe {
21780        let ret_val: int8x16_t = transmute(a);
21781        simd_shuffle!(
21782            ret_val,
21783            ret_val,
21784            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21785        )
21786    }
21787}
21788#[doc = "Vector reinterpret cast operation"]
21789#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"]
21790#[inline]
21791#[cfg(target_endian = "little")]
21792#[target_feature(enable = "neon")]
21793#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21794#[cfg_attr(test, assert_instr(nop))]
21795pub fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t {
21796    unsafe { transmute(a) }
21797}
21798#[doc = "Vector reinterpret cast operation"]
21799#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"]
21800#[inline]
21801#[cfg(target_endian = "big")]
21802#[target_feature(enable = "neon")]
21803#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21804#[cfg_attr(test, assert_instr(nop))]
21805pub fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t {
21806    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21807    unsafe {
21808        let ret_val: int16x8_t = transmute(a);
21809        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21810    }
21811}
21812#[doc = "Vector reinterpret cast operation"]
21813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"]
21814#[inline]
21815#[cfg(target_endian = "little")]
21816#[target_feature(enable = "neon")]
21817#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21818#[cfg_attr(test, assert_instr(nop))]
21819pub fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t {
21820    unsafe { transmute(a) }
21821}
21822#[doc = "Vector reinterpret cast operation"]
21823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"]
21824#[inline]
21825#[cfg(target_endian = "big")]
21826#[target_feature(enable = "neon")]
21827#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21828#[cfg_attr(test, assert_instr(nop))]
21829pub fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t {
21830    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21831    unsafe {
21832        let ret_val: int32x4_t = transmute(a);
21833        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
21834    }
21835}
21836#[doc = "Vector reinterpret cast operation"]
21837#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"]
21838#[inline]
21839#[cfg(target_endian = "little")]
21840#[target_feature(enable = "neon")]
21841#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21842#[cfg_attr(test, assert_instr(nop))]
21843pub fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t {
21844    unsafe { transmute(a) }
21845}
21846#[doc = "Vector reinterpret cast operation"]
21847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"]
21848#[inline]
21849#[cfg(target_endian = "big")]
21850#[target_feature(enable = "neon")]
21851#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21852#[cfg_attr(test, assert_instr(nop))]
21853pub fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t {
21854    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21855    unsafe {
21856        let ret_val: int64x2_t = transmute(a);
21857        simd_shuffle!(ret_val, ret_val, [1, 0])
21858    }
21859}
21860#[doc = "Vector reinterpret cast operation"]
21861#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"]
21862#[inline]
21863#[cfg(target_endian = "little")]
21864#[target_feature(enable = "neon")]
21865#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21866#[cfg_attr(test, assert_instr(nop))]
21867pub fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t {
21868    unsafe { transmute(a) }
21869}
21870#[doc = "Vector reinterpret cast operation"]
21871#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"]
21872#[inline]
21873#[cfg(target_endian = "big")]
21874#[target_feature(enable = "neon")]
21875#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21876#[cfg_attr(test, assert_instr(nop))]
21877pub fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t {
21878    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21879    unsafe {
21880        let ret_val: uint8x16_t = transmute(a);
21881        simd_shuffle!(
21882            ret_val,
21883            ret_val,
21884            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21885        )
21886    }
21887}
21888#[doc = "Vector reinterpret cast operation"]
21889#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"]
21890#[inline]
21891#[cfg(target_endian = "little")]
21892#[target_feature(enable = "neon")]
21893#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21894#[cfg_attr(test, assert_instr(nop))]
21895pub fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t {
21896    unsafe { transmute(a) }
21897}
21898#[doc = "Vector reinterpret cast operation"]
21899#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"]
21900#[inline]
21901#[cfg(target_endian = "big")]
21902#[target_feature(enable = "neon")]
21903#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21904#[cfg_attr(test, assert_instr(nop))]
21905pub fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t {
21906    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21907    unsafe {
21908        let ret_val: uint16x8_t = transmute(a);
21909        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21910    }
21911}
21912#[doc = "Vector reinterpret cast operation"]
21913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"]
21914#[inline]
21915#[cfg(target_endian = "little")]
21916#[target_feature(enable = "neon")]
21917#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21918#[cfg_attr(test, assert_instr(nop))]
21919pub fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t {
21920    unsafe { transmute(a) }
21921}
21922#[doc = "Vector reinterpret cast operation"]
21923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"]
21924#[inline]
21925#[cfg(target_endian = "big")]
21926#[target_feature(enable = "neon")]
21927#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21928#[cfg_attr(test, assert_instr(nop))]
21929pub fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t {
21930    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21931    unsafe {
21932        let ret_val: uint32x4_t = transmute(a);
21933        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
21934    }
21935}
21936#[doc = "Vector reinterpret cast operation"]
21937#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"]
21938#[inline]
21939#[cfg(target_endian = "little")]
21940#[target_feature(enable = "neon")]
21941#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21942#[cfg_attr(test, assert_instr(nop))]
21943pub fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t {
21944    unsafe { transmute(a) }
21945}
21946#[doc = "Vector reinterpret cast operation"]
21947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"]
21948#[inline]
21949#[cfg(target_endian = "big")]
21950#[target_feature(enable = "neon")]
21951#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21952#[cfg_attr(test, assert_instr(nop))]
21953pub fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t {
21954    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21955    unsafe {
21956        let ret_val: uint64x2_t = transmute(a);
21957        simd_shuffle!(ret_val, ret_val, [1, 0])
21958    }
21959}
21960#[doc = "Vector reinterpret cast operation"]
21961#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"]
21962#[inline]
21963#[cfg(target_endian = "little")]
21964#[target_feature(enable = "neon")]
21965#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21966#[cfg_attr(test, assert_instr(nop))]
21967pub fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t {
21968    unsafe { transmute(a) }
21969}
21970#[doc = "Vector reinterpret cast operation"]
21971#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"]
21972#[inline]
21973#[cfg(target_endian = "big")]
21974#[target_feature(enable = "neon")]
21975#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21976#[cfg_attr(test, assert_instr(nop))]
21977pub fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t {
21978    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21979    unsafe {
21980        let ret_val: poly8x16_t = transmute(a);
21981        simd_shuffle!(
21982            ret_val,
21983            ret_val,
21984            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21985        )
21986    }
21987}
21988#[doc = "Vector reinterpret cast operation"]
21989#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"]
21990#[inline]
21991#[cfg(target_endian = "little")]
21992#[target_feature(enable = "neon")]
21993#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21994#[cfg_attr(test, assert_instr(nop))]
21995pub fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t {
21996    unsafe { transmute(a) }
21997}
21998#[doc = "Vector reinterpret cast operation"]
21999#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"]
22000#[inline]
22001#[cfg(target_endian = "big")]
22002#[target_feature(enable = "neon")]
22003#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22004#[cfg_attr(test, assert_instr(nop))]
22005pub fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t {
22006    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22007    unsafe {
22008        let ret_val: poly16x8_t = transmute(a);
22009        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
22010    }
22011}
22012#[doc = "Vector reinterpret cast operation"]
22013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"]
22014#[inline]
22015#[cfg(target_endian = "little")]
22016#[target_feature(enable = "neon")]
22017#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22018#[cfg_attr(test, assert_instr(nop))]
22019pub fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t {
22020    unsafe { transmute(a) }
22021}
22022#[doc = "Vector reinterpret cast operation"]
22023#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"]
22024#[inline]
22025#[cfg(target_endian = "big")]
22026#[target_feature(enable = "neon")]
22027#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22028#[cfg_attr(test, assert_instr(nop))]
22029pub fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t {
22030    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22031    unsafe {
22032        let ret_val: poly64x2_t = transmute(a);
22033        simd_shuffle!(ret_val, ret_val, [1, 0])
22034    }
22035}
22036#[doc = "Vector reinterpret cast operation"]
22037#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"]
22038#[inline]
22039#[cfg(target_endian = "little")]
22040#[target_feature(enable = "neon")]
22041#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22042#[cfg_attr(test, assert_instr(nop))]
22043pub fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t {
22044    unsafe { transmute(a) }
22045}
22046#[doc = "Vector reinterpret cast operation"]
22047#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"]
22048#[inline]
22049#[cfg(target_endian = "big")]
22050#[target_feature(enable = "neon")]
22051#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22052#[cfg_attr(test, assert_instr(nop))]
22053pub fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t {
22054    let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22055    unsafe { transmute(a) }
22056}
22057#[doc = "Vector reinterpret cast operation"]
22058#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"]
22059#[inline]
22060#[cfg(target_endian = "little")]
22061#[target_feature(enable = "neon")]
22062#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22063#[cfg_attr(test, assert_instr(nop))]
22064pub fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t {
22065    unsafe { transmute(a) }
22066}
22067#[doc = "Vector reinterpret cast operation"]
22068#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"]
22069#[inline]
22070#[cfg(target_endian = "big")]
22071#[target_feature(enable = "neon")]
22072#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22073#[cfg_attr(test, assert_instr(nop))]
22074pub fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t {
22075    let a: int8x16_t =
22076        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
22077    unsafe {
22078        let ret_val: float64x2_t = transmute(a);
22079        simd_shuffle!(ret_val, ret_val, [1, 0])
22080    }
22081}
22082#[doc = "Vector reinterpret cast operation"]
22083#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"]
22084#[inline]
22085#[cfg(target_endian = "little")]
22086#[target_feature(enable = "neon")]
22087#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22088#[cfg_attr(test, assert_instr(nop))]
22089pub fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t {
22090    unsafe { transmute(a) }
22091}
22092#[doc = "Vector reinterpret cast operation"]
22093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"]
22094#[inline]
22095#[cfg(target_endian = "big")]
22096#[target_feature(enable = "neon")]
22097#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22098#[cfg_attr(test, assert_instr(nop))]
22099pub fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t {
22100    let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22101    unsafe { transmute(a) }
22102}
22103#[doc = "Vector reinterpret cast operation"]
22104#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"]
22105#[inline]
22106#[cfg(target_endian = "little")]
22107#[target_feature(enable = "neon")]
22108#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22109#[cfg_attr(test, assert_instr(nop))]
22110pub fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t {
22111    unsafe { transmute(a) }
22112}
22113#[doc = "Vector reinterpret cast operation"]
22114#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"]
22115#[inline]
22116#[cfg(target_endian = "big")]
22117#[target_feature(enable = "neon")]
22118#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22119#[cfg_attr(test, assert_instr(nop))]
22120pub fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t {
22121    let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22122    unsafe {
22123        let ret_val: float64x2_t = transmute(a);
22124        simd_shuffle!(ret_val, ret_val, [1, 0])
22125    }
22126}
22127#[doc = "Vector reinterpret cast operation"]
22128#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"]
22129#[inline]
22130#[cfg(target_endian = "little")]
22131#[target_feature(enable = "neon")]
22132#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22133#[cfg_attr(test, assert_instr(nop))]
22134pub fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t {
22135    unsafe { transmute(a) }
22136}
22137#[doc = "Vector reinterpret cast operation"]
22138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"]
22139#[inline]
22140#[cfg(target_endian = "big")]
22141#[target_feature(enable = "neon")]
22142#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22143#[cfg_attr(test, assert_instr(nop))]
22144pub fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t {
22145    let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22146    unsafe { transmute(a) }
22147}
22148#[doc = "Vector reinterpret cast operation"]
22149#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"]
22150#[inline]
22151#[cfg(target_endian = "little")]
22152#[target_feature(enable = "neon")]
22153#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22154#[cfg_attr(test, assert_instr(nop))]
22155pub fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t {
22156    unsafe { transmute(a) }
22157}
22158#[doc = "Vector reinterpret cast operation"]
22159#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"]
22160#[inline]
22161#[cfg(target_endian = "big")]
22162#[target_feature(enable = "neon")]
22163#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22164#[cfg_attr(test, assert_instr(nop))]
22165pub fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t {
22166    let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22167    unsafe {
22168        let ret_val: float64x2_t = transmute(a);
22169        simd_shuffle!(ret_val, ret_val, [1, 0])
22170    }
22171}
22172#[doc = "Vector reinterpret cast operation"]
22173#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s64)"]
22174#[inline]
22175#[target_feature(enable = "neon")]
22176#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22177#[cfg_attr(test, assert_instr(nop))]
22178pub fn vreinterpret_f64_s64(a: int64x1_t) -> float64x1_t {
22179    unsafe { transmute(a) }
22180}
22181#[doc = "Vector reinterpret cast operation"]
22182#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s64)"]
22183#[inline]
22184#[target_feature(enable = "neon")]
22185#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22186#[cfg_attr(test, assert_instr(nop))]
22187pub fn vreinterpret_p64_s64(a: int64x1_t) -> poly64x1_t {
22188    unsafe { transmute(a) }
22189}
22190#[doc = "Vector reinterpret cast operation"]
22191#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"]
22192#[inline]
22193#[cfg(target_endian = "little")]
22194#[target_feature(enable = "neon")]
22195#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22196#[cfg_attr(test, assert_instr(nop))]
22197pub fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t {
22198    unsafe { transmute(a) }
22199}
22200#[doc = "Vector reinterpret cast operation"]
22201#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"]
22202#[inline]
22203#[cfg(target_endian = "big")]
22204#[target_feature(enable = "neon")]
22205#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22206#[cfg_attr(test, assert_instr(nop))]
22207pub fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t {
22208    let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22209    unsafe {
22210        let ret_val: float64x2_t = transmute(a);
22211        simd_shuffle!(ret_val, ret_val, [1, 0])
22212    }
22213}
22214#[doc = "Vector reinterpret cast operation"]
22215#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"]
22216#[inline]
22217#[cfg(target_endian = "little")]
22218#[target_feature(enable = "neon")]
22219#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22220#[cfg_attr(test, assert_instr(nop))]
22221pub fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t {
22222    unsafe { transmute(a) }
22223}
22224#[doc = "Vector reinterpret cast operation"]
22225#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"]
22226#[inline]
22227#[cfg(target_endian = "big")]
22228#[target_feature(enable = "neon")]
22229#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22230#[cfg_attr(test, assert_instr(nop))]
22231pub fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t {
22232    let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22233    unsafe {
22234        let ret_val: poly64x2_t = transmute(a);
22235        simd_shuffle!(ret_val, ret_val, [1, 0])
22236    }
22237}
22238#[doc = "Vector reinterpret cast operation"]
22239#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"]
22240#[inline]
22241#[cfg(target_endian = "little")]
22242#[target_feature(enable = "neon")]
22243#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22244#[cfg_attr(test, assert_instr(nop))]
22245pub fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t {
22246    unsafe { transmute(a) }
22247}
22248#[doc = "Vector reinterpret cast operation"]
22249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"]
22250#[inline]
22251#[cfg(target_endian = "big")]
22252#[target_feature(enable = "neon")]
22253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22254#[cfg_attr(test, assert_instr(nop))]
22255pub fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t {
22256    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22257    unsafe { transmute(a) }
22258}
22259#[doc = "Vector reinterpret cast operation"]
22260#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"]
22261#[inline]
22262#[cfg(target_endian = "little")]
22263#[target_feature(enable = "neon")]
22264#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22265#[cfg_attr(test, assert_instr(nop))]
22266pub fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t {
22267    unsafe { transmute(a) }
22268}
22269#[doc = "Vector reinterpret cast operation"]
22270#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"]
22271#[inline]
22272#[cfg(target_endian = "big")]
22273#[target_feature(enable = "neon")]
22274#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22275#[cfg_attr(test, assert_instr(nop))]
22276pub fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t {
22277    let a: uint8x16_t =
22278        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
22279    unsafe {
22280        let ret_val: float64x2_t = transmute(a);
22281        simd_shuffle!(ret_val, ret_val, [1, 0])
22282    }
22283}
22284#[doc = "Vector reinterpret cast operation"]
22285#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"]
22286#[inline]
22287#[cfg(target_endian = "little")]
22288#[target_feature(enable = "neon")]
22289#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22290#[cfg_attr(test, assert_instr(nop))]
22291pub fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t {
22292    unsafe { transmute(a) }
22293}
22294#[doc = "Vector reinterpret cast operation"]
22295#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"]
22296#[inline]
22297#[cfg(target_endian = "big")]
22298#[target_feature(enable = "neon")]
22299#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22300#[cfg_attr(test, assert_instr(nop))]
22301pub fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t {
22302    let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22303    unsafe { transmute(a) }
22304}
22305#[doc = "Vector reinterpret cast operation"]
22306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"]
22307#[inline]
22308#[cfg(target_endian = "little")]
22309#[target_feature(enable = "neon")]
22310#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22311#[cfg_attr(test, assert_instr(nop))]
22312pub fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t {
22313    unsafe { transmute(a) }
22314}
22315#[doc = "Vector reinterpret cast operation"]
22316#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"]
22317#[inline]
22318#[cfg(target_endian = "big")]
22319#[target_feature(enable = "neon")]
22320#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22321#[cfg_attr(test, assert_instr(nop))]
22322pub fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t {
22323    let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22324    unsafe {
22325        let ret_val: float64x2_t = transmute(a);
22326        simd_shuffle!(ret_val, ret_val, [1, 0])
22327    }
22328}
22329#[doc = "Vector reinterpret cast operation"]
22330#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"]
22331#[inline]
22332#[cfg(target_endian = "little")]
22333#[target_feature(enable = "neon")]
22334#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22335#[cfg_attr(test, assert_instr(nop))]
22336pub fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t {
22337    unsafe { transmute(a) }
22338}
22339#[doc = "Vector reinterpret cast operation"]
22340#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"]
22341#[inline]
22342#[cfg(target_endian = "big")]
22343#[target_feature(enable = "neon")]
22344#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22345#[cfg_attr(test, assert_instr(nop))]
22346pub fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t {
22347    let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22348    unsafe { transmute(a) }
22349}
22350#[doc = "Vector reinterpret cast operation"]
22351#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"]
22352#[inline]
22353#[cfg(target_endian = "little")]
22354#[target_feature(enable = "neon")]
22355#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22356#[cfg_attr(test, assert_instr(nop))]
22357pub fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t {
22358    unsafe { transmute(a) }
22359}
22360#[doc = "Vector reinterpret cast operation"]
22361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"]
22362#[inline]
22363#[cfg(target_endian = "big")]
22364#[target_feature(enable = "neon")]
22365#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22366#[cfg_attr(test, assert_instr(nop))]
22367pub fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t {
22368    let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22369    unsafe {
22370        let ret_val: float64x2_t = transmute(a);
22371        simd_shuffle!(ret_val, ret_val, [1, 0])
22372    }
22373}
22374#[doc = "Vector reinterpret cast operation"]
22375#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u64)"]
22376#[inline]
22377#[target_feature(enable = "neon")]
22378#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22379#[cfg_attr(test, assert_instr(nop))]
22380pub fn vreinterpret_f64_u64(a: uint64x1_t) -> float64x1_t {
22381    unsafe { transmute(a) }
22382}
22383#[doc = "Vector reinterpret cast operation"]
22384#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u64)"]
22385#[inline]
22386#[target_feature(enable = "neon")]
22387#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22388#[cfg_attr(test, assert_instr(nop))]
22389pub fn vreinterpret_p64_u64(a: uint64x1_t) -> poly64x1_t {
22390    unsafe { transmute(a) }
22391}
22392#[doc = "Vector reinterpret cast operation"]
22393#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"]
22394#[inline]
22395#[cfg(target_endian = "little")]
22396#[target_feature(enable = "neon")]
22397#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22398#[cfg_attr(test, assert_instr(nop))]
22399pub fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t {
22400    unsafe { transmute(a) }
22401}
22402#[doc = "Vector reinterpret cast operation"]
22403#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"]
22404#[inline]
22405#[cfg(target_endian = "big")]
22406#[target_feature(enable = "neon")]
22407#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22408#[cfg_attr(test, assert_instr(nop))]
22409pub fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t {
22410    let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22411    unsafe {
22412        let ret_val: float64x2_t = transmute(a);
22413        simd_shuffle!(ret_val, ret_val, [1, 0])
22414    }
22415}
22416#[doc = "Vector reinterpret cast operation"]
22417#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"]
22418#[inline]
22419#[cfg(target_endian = "little")]
22420#[target_feature(enable = "neon")]
22421#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22422#[cfg_attr(test, assert_instr(nop))]
22423pub fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t {
22424    unsafe { transmute(a) }
22425}
22426#[doc = "Vector reinterpret cast operation"]
22427#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"]
22428#[inline]
22429#[cfg(target_endian = "big")]
22430#[target_feature(enable = "neon")]
22431#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22432#[cfg_attr(test, assert_instr(nop))]
22433pub fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t {
22434    let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22435    unsafe {
22436        let ret_val: poly64x2_t = transmute(a);
22437        simd_shuffle!(ret_val, ret_val, [1, 0])
22438    }
22439}
22440#[doc = "Vector reinterpret cast operation"]
22441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"]
22442#[inline]
22443#[cfg(target_endian = "little")]
22444#[target_feature(enable = "neon")]
22445#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22446#[cfg_attr(test, assert_instr(nop))]
22447pub fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t {
22448    unsafe { transmute(a) }
22449}
22450#[doc = "Vector reinterpret cast operation"]
22451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"]
22452#[inline]
22453#[cfg(target_endian = "big")]
22454#[target_feature(enable = "neon")]
22455#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22456#[cfg_attr(test, assert_instr(nop))]
22457pub fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t {
22458    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22459    unsafe { transmute(a) }
22460}
22461#[doc = "Vector reinterpret cast operation"]
22462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"]
22463#[inline]
22464#[cfg(target_endian = "little")]
22465#[target_feature(enable = "neon")]
22466#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22467#[cfg_attr(test, assert_instr(nop))]
22468pub fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t {
22469    unsafe { transmute(a) }
22470}
22471#[doc = "Vector reinterpret cast operation"]
22472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"]
22473#[inline]
22474#[cfg(target_endian = "big")]
22475#[target_feature(enable = "neon")]
22476#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22477#[cfg_attr(test, assert_instr(nop))]
22478pub fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t {
22479    let a: poly8x16_t =
22480        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
22481    unsafe {
22482        let ret_val: float64x2_t = transmute(a);
22483        simd_shuffle!(ret_val, ret_val, [1, 0])
22484    }
22485}
22486#[doc = "Vector reinterpret cast operation"]
22487#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"]
22488#[inline]
22489#[cfg(target_endian = "little")]
22490#[target_feature(enable = "neon")]
22491#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22492#[cfg_attr(test, assert_instr(nop))]
22493pub fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t {
22494    unsafe { transmute(a) }
22495}
22496#[doc = "Vector reinterpret cast operation"]
22497#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"]
22498#[inline]
22499#[cfg(target_endian = "big")]
22500#[target_feature(enable = "neon")]
22501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22502#[cfg_attr(test, assert_instr(nop))]
22503pub fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t {
22504    let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22505    unsafe { transmute(a) }
22506}
22507#[doc = "Vector reinterpret cast operation"]
22508#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"]
22509#[inline]
22510#[cfg(target_endian = "little")]
22511#[target_feature(enable = "neon")]
22512#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22513#[cfg_attr(test, assert_instr(nop))]
22514pub fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t {
22515    unsafe { transmute(a) }
22516}
22517#[doc = "Vector reinterpret cast operation"]
22518#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"]
22519#[inline]
22520#[cfg(target_endian = "big")]
22521#[target_feature(enable = "neon")]
22522#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22523#[cfg_attr(test, assert_instr(nop))]
22524pub fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t {
22525    let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22526    unsafe {
22527        let ret_val: float64x2_t = transmute(a);
22528        simd_shuffle!(ret_val, ret_val, [1, 0])
22529    }
22530}
22531#[doc = "Vector reinterpret cast operation"]
22532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"]
22533#[inline]
22534#[cfg(target_endian = "little")]
22535#[target_feature(enable = "neon")]
22536#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22537#[cfg_attr(test, assert_instr(nop))]
22538pub fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t {
22539    unsafe { transmute(a) }
22540}
22541#[doc = "Vector reinterpret cast operation"]
22542#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"]
22543#[inline]
22544#[cfg(target_endian = "big")]
22545#[target_feature(enable = "neon")]
22546#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22547#[cfg_attr(test, assert_instr(nop))]
22548pub fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t {
22549    unsafe {
22550        let ret_val: float32x2_t = transmute(a);
22551        simd_shuffle!(ret_val, ret_val, [1, 0])
22552    }
22553}
22554#[doc = "Vector reinterpret cast operation"]
22555#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p64)"]
22556#[inline]
22557#[target_feature(enable = "neon")]
22558#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22559#[cfg_attr(test, assert_instr(nop))]
22560pub fn vreinterpret_f64_p64(a: poly64x1_t) -> float64x1_t {
22561    unsafe { transmute(a) }
22562}
22563#[doc = "Vector reinterpret cast operation"]
22564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p64)"]
22565#[inline]
22566#[target_feature(enable = "neon")]
22567#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22568#[cfg_attr(test, assert_instr(nop))]
22569pub fn vreinterpret_s64_p64(a: poly64x1_t) -> int64x1_t {
22570    unsafe { transmute(a) }
22571}
22572#[doc = "Vector reinterpret cast operation"]
22573#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p64)"]
22574#[inline]
22575#[target_feature(enable = "neon")]
22576#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22577#[cfg_attr(test, assert_instr(nop))]
22578pub fn vreinterpret_u64_p64(a: poly64x1_t) -> uint64x1_t {
22579    unsafe { transmute(a) }
22580}
22581#[doc = "Vector reinterpret cast operation"]
22582#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"]
22583#[inline]
22584#[cfg(target_endian = "little")]
22585#[target_feature(enable = "neon")]
22586#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22587#[cfg_attr(test, assert_instr(nop))]
22588pub fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t {
22589    unsafe { transmute(a) }
22590}
22591#[doc = "Vector reinterpret cast operation"]
22592#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"]
22593#[inline]
22594#[cfg(target_endian = "big")]
22595#[target_feature(enable = "neon")]
22596#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22597#[cfg_attr(test, assert_instr(nop))]
22598pub fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t {
22599    let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22600    unsafe {
22601        let ret_val: float32x4_t = transmute(a);
22602        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
22603    }
22604}
22605#[doc = "Vector reinterpret cast operation"]
22606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"]
22607#[inline]
22608#[cfg(target_endian = "little")]
22609#[target_feature(enable = "neon")]
22610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22611#[cfg_attr(test, assert_instr(nop))]
22612pub fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t {
22613    unsafe { transmute(a) }
22614}
22615#[doc = "Vector reinterpret cast operation"]
22616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"]
22617#[inline]
22618#[cfg(target_endian = "big")]
22619#[target_feature(enable = "neon")]
22620#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22621#[cfg_attr(test, assert_instr(nop))]
22622pub fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t {
22623    let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22624    unsafe {
22625        let ret_val: float64x2_t = transmute(a);
22626        simd_shuffle!(ret_val, ret_val, [1, 0])
22627    }
22628}
22629#[doc = "Vector reinterpret cast operation"]
22630#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"]
22631#[inline]
22632#[cfg(target_endian = "little")]
22633#[target_feature(enable = "neon")]
22634#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22635#[cfg_attr(test, assert_instr(nop))]
22636pub fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t {
22637    unsafe { transmute(a) }
22638}
22639#[doc = "Vector reinterpret cast operation"]
22640#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"]
22641#[inline]
22642#[cfg(target_endian = "big")]
22643#[target_feature(enable = "neon")]
22644#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22645#[cfg_attr(test, assert_instr(nop))]
22646pub fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t {
22647    let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22648    unsafe {
22649        let ret_val: int64x2_t = transmute(a);
22650        simd_shuffle!(ret_val, ret_val, [1, 0])
22651    }
22652}
22653#[doc = "Vector reinterpret cast operation"]
22654#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"]
22655#[inline]
22656#[cfg(target_endian = "little")]
22657#[target_feature(enable = "neon")]
22658#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22659#[cfg_attr(test, assert_instr(nop))]
22660pub fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t {
22661    unsafe { transmute(a) }
22662}
22663#[doc = "Vector reinterpret cast operation"]
22664#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"]
22665#[inline]
22666#[cfg(target_endian = "big")]
22667#[target_feature(enable = "neon")]
22668#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22669#[cfg_attr(test, assert_instr(nop))]
22670pub fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t {
22671    let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22672    unsafe {
22673        let ret_val: uint64x2_t = transmute(a);
22674        simd_shuffle!(ret_val, ret_val, [1, 0])
22675    }
22676}
22677#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
22678#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f32)"]
22679#[inline]
22680#[target_feature(enable = "neon,frintts")]
22681#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22682#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
22683pub fn vrnd32x_f32(a: float32x2_t) -> float32x2_t {
22684    unsafe extern "unadjusted" {
22685        #[cfg_attr(
22686            any(target_arch = "aarch64", target_arch = "arm64ec"),
22687            link_name = "llvm.aarch64.neon.frint32x.v2f32"
22688        )]
22689        fn _vrnd32x_f32(a: float32x2_t) -> float32x2_t;
22690    }
22691    unsafe { _vrnd32x_f32(a) }
22692}
22693#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
22694#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f32)"]
22695#[inline]
22696#[target_feature(enable = "neon,frintts")]
22697#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22698#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
22699pub fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t {
22700    unsafe extern "unadjusted" {
22701        #[cfg_attr(
22702            any(target_arch = "aarch64", target_arch = "arm64ec"),
22703            link_name = "llvm.aarch64.neon.frint32x.v4f32"
22704        )]
22705        fn _vrnd32xq_f32(a: float32x4_t) -> float32x4_t;
22706    }
22707    unsafe { _vrnd32xq_f32(a) }
22708}
22709#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
22710#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f64)"]
22711#[inline]
22712#[target_feature(enable = "neon,frintts")]
22713#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22714#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
22715pub fn vrnd32xq_f64(a: float64x2_t) -> float64x2_t {
22716    unsafe extern "unadjusted" {
22717        #[cfg_attr(
22718            any(target_arch = "aarch64", target_arch = "arm64ec"),
22719            link_name = "llvm.aarch64.neon.frint32x.v2f64"
22720        )]
22721        fn _vrnd32xq_f64(a: float64x2_t) -> float64x2_t;
22722    }
22723    unsafe { _vrnd32xq_f64(a) }
22724}
22725#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
22726#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f64)"]
22727#[inline]
22728#[target_feature(enable = "neon,frintts")]
22729#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22730#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
22731pub fn vrnd32x_f64(a: float64x1_t) -> float64x1_t {
22732    unsafe extern "unadjusted" {
22733        #[cfg_attr(
22734            any(target_arch = "aarch64", target_arch = "arm64ec"),
22735            link_name = "llvm.aarch64.frint32x.f64"
22736        )]
22737        fn _vrnd32x_f64(a: f64) -> f64;
22738    }
22739    unsafe { transmute(_vrnd32x_f64(simd_extract!(a, 0))) }
22740}
22741#[doc = "Floating-point round to 32-bit integer toward zero"]
22742#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f32)"]
22743#[inline]
22744#[target_feature(enable = "neon,frintts")]
22745#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22746#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
22747pub fn vrnd32z_f32(a: float32x2_t) -> float32x2_t {
22748    unsafe extern "unadjusted" {
22749        #[cfg_attr(
22750            any(target_arch = "aarch64", target_arch = "arm64ec"),
22751            link_name = "llvm.aarch64.neon.frint32z.v2f32"
22752        )]
22753        fn _vrnd32z_f32(a: float32x2_t) -> float32x2_t;
22754    }
22755    unsafe { _vrnd32z_f32(a) }
22756}
22757#[doc = "Floating-point round to 32-bit integer toward zero"]
22758#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f32)"]
22759#[inline]
22760#[target_feature(enable = "neon,frintts")]
22761#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22762#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
22763pub fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t {
22764    unsafe extern "unadjusted" {
22765        #[cfg_attr(
22766            any(target_arch = "aarch64", target_arch = "arm64ec"),
22767            link_name = "llvm.aarch64.neon.frint32z.v4f32"
22768        )]
22769        fn _vrnd32zq_f32(a: float32x4_t) -> float32x4_t;
22770    }
22771    unsafe { _vrnd32zq_f32(a) }
22772}
22773#[doc = "Floating-point round to 32-bit integer toward zero"]
22774#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f64)"]
22775#[inline]
22776#[target_feature(enable = "neon,frintts")]
22777#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22778#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
22779pub fn vrnd32zq_f64(a: float64x2_t) -> float64x2_t {
22780    unsafe extern "unadjusted" {
22781        #[cfg_attr(
22782            any(target_arch = "aarch64", target_arch = "arm64ec"),
22783            link_name = "llvm.aarch64.neon.frint32z.v2f64"
22784        )]
22785        fn _vrnd32zq_f64(a: float64x2_t) -> float64x2_t;
22786    }
22787    unsafe { _vrnd32zq_f64(a) }
22788}
22789#[doc = "Floating-point round to 32-bit integer toward zero"]
22790#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f64)"]
22791#[inline]
22792#[target_feature(enable = "neon,frintts")]
22793#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22794#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
22795pub fn vrnd32z_f64(a: float64x1_t) -> float64x1_t {
22796    unsafe extern "unadjusted" {
22797        #[cfg_attr(
22798            any(target_arch = "aarch64", target_arch = "arm64ec"),
22799            link_name = "llvm.aarch64.frint32z.f64"
22800        )]
22801        fn _vrnd32z_f64(a: f64) -> f64;
22802    }
22803    unsafe { transmute(_vrnd32z_f64(simd_extract!(a, 0))) }
22804}
22805#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
22806#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f32)"]
22807#[inline]
22808#[target_feature(enable = "neon,frintts")]
22809#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22810#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
22811pub fn vrnd64x_f32(a: float32x2_t) -> float32x2_t {
22812    unsafe extern "unadjusted" {
22813        #[cfg_attr(
22814            any(target_arch = "aarch64", target_arch = "arm64ec"),
22815            link_name = "llvm.aarch64.neon.frint64x.v2f32"
22816        )]
22817        fn _vrnd64x_f32(a: float32x2_t) -> float32x2_t;
22818    }
22819    unsafe { _vrnd64x_f32(a) }
22820}
22821#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
22822#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f32)"]
22823#[inline]
22824#[target_feature(enable = "neon,frintts")]
22825#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22826#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
22827pub fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t {
22828    unsafe extern "unadjusted" {
22829        #[cfg_attr(
22830            any(target_arch = "aarch64", target_arch = "arm64ec"),
22831            link_name = "llvm.aarch64.neon.frint64x.v4f32"
22832        )]
22833        fn _vrnd64xq_f32(a: float32x4_t) -> float32x4_t;
22834    }
22835    unsafe { _vrnd64xq_f32(a) }
22836}
22837#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
22838#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f64)"]
22839#[inline]
22840#[target_feature(enable = "neon,frintts")]
22841#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22842#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
22843pub fn vrnd64xq_f64(a: float64x2_t) -> float64x2_t {
22844    unsafe extern "unadjusted" {
22845        #[cfg_attr(
22846            any(target_arch = "aarch64", target_arch = "arm64ec"),
22847            link_name = "llvm.aarch64.neon.frint64x.v2f64"
22848        )]
22849        fn _vrnd64xq_f64(a: float64x2_t) -> float64x2_t;
22850    }
22851    unsafe { _vrnd64xq_f64(a) }
22852}
22853#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
22854#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f64)"]
22855#[inline]
22856#[target_feature(enable = "neon,frintts")]
22857#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22858#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
22859pub fn vrnd64x_f64(a: float64x1_t) -> float64x1_t {
22860    unsafe extern "unadjusted" {
22861        #[cfg_attr(
22862            any(target_arch = "aarch64", target_arch = "arm64ec"),
22863            link_name = "llvm.aarch64.frint64x.f64"
22864        )]
22865        fn _vrnd64x_f64(a: f64) -> f64;
22866    }
22867    unsafe { transmute(_vrnd64x_f64(simd_extract!(a, 0))) }
22868}
22869#[doc = "Floating-point round to 64-bit integer toward zero"]
22870#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f32)"]
22871#[inline]
22872#[target_feature(enable = "neon,frintts")]
22873#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22874#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
22875pub fn vrnd64z_f32(a: float32x2_t) -> float32x2_t {
22876    unsafe extern "unadjusted" {
22877        #[cfg_attr(
22878            any(target_arch = "aarch64", target_arch = "arm64ec"),
22879            link_name = "llvm.aarch64.neon.frint64z.v2f32"
22880        )]
22881        fn _vrnd64z_f32(a: float32x2_t) -> float32x2_t;
22882    }
22883    unsafe { _vrnd64z_f32(a) }
22884}
22885#[doc = "Floating-point round to 64-bit integer toward zero"]
22886#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f32)"]
22887#[inline]
22888#[target_feature(enable = "neon,frintts")]
22889#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22890#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
22891pub fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t {
22892    unsafe extern "unadjusted" {
22893        #[cfg_attr(
22894            any(target_arch = "aarch64", target_arch = "arm64ec"),
22895            link_name = "llvm.aarch64.neon.frint64z.v4f32"
22896        )]
22897        fn _vrnd64zq_f32(a: float32x4_t) -> float32x4_t;
22898    }
22899    unsafe { _vrnd64zq_f32(a) }
22900}
22901#[doc = "Floating-point round to 64-bit integer toward zero"]
22902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f64)"]
22903#[inline]
22904#[target_feature(enable = "neon,frintts")]
22905#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22906#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
22907pub fn vrnd64zq_f64(a: float64x2_t) -> float64x2_t {
22908    unsafe extern "unadjusted" {
22909        #[cfg_attr(
22910            any(target_arch = "aarch64", target_arch = "arm64ec"),
22911            link_name = "llvm.aarch64.neon.frint64z.v2f64"
22912        )]
22913        fn _vrnd64zq_f64(a: float64x2_t) -> float64x2_t;
22914    }
22915    unsafe { _vrnd64zq_f64(a) }
22916}
22917#[doc = "Floating-point round to 64-bit integer toward zero"]
22918#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f64)"]
22919#[inline]
22920#[target_feature(enable = "neon,frintts")]
22921#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
22922#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
22923pub fn vrnd64z_f64(a: float64x1_t) -> float64x1_t {
22924    unsafe extern "unadjusted" {
22925        #[cfg_attr(
22926            any(target_arch = "aarch64", target_arch = "arm64ec"),
22927            link_name = "llvm.aarch64.frint64z.f64"
22928        )]
22929        fn _vrnd64z_f64(a: f64) -> f64;
22930    }
22931    unsafe { transmute(_vrnd64z_f64(simd_extract!(a, 0))) }
22932}
22933#[doc = "Floating-point round to integral, toward zero"]
22934#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f16)"]
22935#[inline]
22936#[target_feature(enable = "neon,fp16")]
22937#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22938#[cfg_attr(test, assert_instr(frintz))]
22939pub fn vrnd_f16(a: float16x4_t) -> float16x4_t {
22940    unsafe { simd_trunc(a) }
22941}
22942#[doc = "Floating-point round to integral, toward zero"]
22943#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f16)"]
22944#[inline]
22945#[target_feature(enable = "neon,fp16")]
22946#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22947#[cfg_attr(test, assert_instr(frintz))]
22948pub fn vrndq_f16(a: float16x8_t) -> float16x8_t {
22949    unsafe { simd_trunc(a) }
22950}
22951#[doc = "Floating-point round to integral, toward zero"]
22952#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f32)"]
22953#[inline]
22954#[target_feature(enable = "neon")]
22955#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22956#[cfg_attr(test, assert_instr(frintz))]
22957pub fn vrnd_f32(a: float32x2_t) -> float32x2_t {
22958    unsafe { simd_trunc(a) }
22959}
22960#[doc = "Floating-point round to integral, toward zero"]
22961#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f32)"]
22962#[inline]
22963#[target_feature(enable = "neon")]
22964#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22965#[cfg_attr(test, assert_instr(frintz))]
22966pub fn vrndq_f32(a: float32x4_t) -> float32x4_t {
22967    unsafe { simd_trunc(a) }
22968}
22969#[doc = "Floating-point round to integral, toward zero"]
22970#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f64)"]
22971#[inline]
22972#[target_feature(enable = "neon")]
22973#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22974#[cfg_attr(test, assert_instr(frintz))]
22975pub fn vrnd_f64(a: float64x1_t) -> float64x1_t {
22976    unsafe { simd_trunc(a) }
22977}
22978#[doc = "Floating-point round to integral, toward zero"]
22979#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f64)"]
22980#[inline]
22981#[target_feature(enable = "neon")]
22982#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22983#[cfg_attr(test, assert_instr(frintz))]
22984pub fn vrndq_f64(a: float64x2_t) -> float64x2_t {
22985    unsafe { simd_trunc(a) }
22986}
22987#[doc = "Floating-point round to integral, to nearest with ties to away"]
22988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f16)"]
22989#[inline]
22990#[target_feature(enable = "neon,fp16")]
22991#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22992#[cfg_attr(test, assert_instr(frinta))]
22993pub fn vrnda_f16(a: float16x4_t) -> float16x4_t {
22994    unsafe { simd_round(a) }
22995}
22996#[doc = "Floating-point round to integral, to nearest with ties to away"]
22997#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f16)"]
22998#[inline]
22999#[target_feature(enable = "neon,fp16")]
23000#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23001#[cfg_attr(test, assert_instr(frinta))]
23002pub fn vrndaq_f16(a: float16x8_t) -> float16x8_t {
23003    unsafe { simd_round(a) }
23004}
23005#[doc = "Floating-point round to integral, to nearest with ties to away"]
23006#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f32)"]
23007#[inline]
23008#[target_feature(enable = "neon")]
23009#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23010#[cfg_attr(test, assert_instr(frinta))]
23011pub fn vrnda_f32(a: float32x2_t) -> float32x2_t {
23012    unsafe { simd_round(a) }
23013}
23014#[doc = "Floating-point round to integral, to nearest with ties to away"]
23015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f32)"]
23016#[inline]
23017#[target_feature(enable = "neon")]
23018#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23019#[cfg_attr(test, assert_instr(frinta))]
23020pub fn vrndaq_f32(a: float32x4_t) -> float32x4_t {
23021    unsafe { simd_round(a) }
23022}
23023#[doc = "Floating-point round to integral, to nearest with ties to away"]
23024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f64)"]
23025#[inline]
23026#[target_feature(enable = "neon")]
23027#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23028#[cfg_attr(test, assert_instr(frinta))]
23029pub fn vrnda_f64(a: float64x1_t) -> float64x1_t {
23030    unsafe { simd_round(a) }
23031}
23032#[doc = "Floating-point round to integral, to nearest with ties to away"]
23033#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f64)"]
23034#[inline]
23035#[target_feature(enable = "neon")]
23036#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23037#[cfg_attr(test, assert_instr(frinta))]
23038pub fn vrndaq_f64(a: float64x2_t) -> float64x2_t {
23039    unsafe { simd_round(a) }
23040}
23041#[doc = "Floating-point round to integral, to nearest with ties to away"]
23042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndah_f16)"]
23043#[inline]
23044#[target_feature(enable = "neon,fp16")]
23045#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23046#[cfg_attr(test, assert_instr(frinta))]
23047pub fn vrndah_f16(a: f16) -> f16 {
23048    unsafe { roundf16(a) }
23049}
23050#[doc = "Floating-point round to integral, to nearest with ties to away"]
23051#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndh_f16)"]
23052#[inline]
23053#[target_feature(enable = "neon,fp16")]
23054#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23055#[cfg_attr(test, assert_instr(frintz))]
23056pub fn vrndh_f16(a: f16) -> f16 {
23057    unsafe { truncf16(a) }
23058}
23059#[doc = "Floating-point round to integral, using current rounding mode"]
23060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f16)"]
23061#[inline]
23062#[target_feature(enable = "neon,fp16")]
23063#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23064#[cfg_attr(test, assert_instr(frinti))]
23065pub fn vrndi_f16(a: float16x4_t) -> float16x4_t {
23066    unsafe extern "unadjusted" {
23067        #[cfg_attr(
23068            any(target_arch = "aarch64", target_arch = "arm64ec"),
23069            link_name = "llvm.nearbyint.v4f16"
23070        )]
23071        fn _vrndi_f16(a: float16x4_t) -> float16x4_t;
23072    }
23073    unsafe { _vrndi_f16(a) }
23074}
23075#[doc = "Floating-point round to integral, using current rounding mode"]
23076#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f16)"]
23077#[inline]
23078#[target_feature(enable = "neon,fp16")]
23079#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23080#[cfg_attr(test, assert_instr(frinti))]
23081pub fn vrndiq_f16(a: float16x8_t) -> float16x8_t {
23082    unsafe extern "unadjusted" {
23083        #[cfg_attr(
23084            any(target_arch = "aarch64", target_arch = "arm64ec"),
23085            link_name = "llvm.nearbyint.v8f16"
23086        )]
23087        fn _vrndiq_f16(a: float16x8_t) -> float16x8_t;
23088    }
23089    unsafe { _vrndiq_f16(a) }
23090}
23091#[doc = "Floating-point round to integral, using current rounding mode"]
23092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f32)"]
23093#[inline]
23094#[target_feature(enable = "neon")]
23095#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23096#[cfg_attr(test, assert_instr(frinti))]
23097pub fn vrndi_f32(a: float32x2_t) -> float32x2_t {
23098    unsafe extern "unadjusted" {
23099        #[cfg_attr(
23100            any(target_arch = "aarch64", target_arch = "arm64ec"),
23101            link_name = "llvm.nearbyint.v2f32"
23102        )]
23103        fn _vrndi_f32(a: float32x2_t) -> float32x2_t;
23104    }
23105    unsafe { _vrndi_f32(a) }
23106}
23107#[doc = "Floating-point round to integral, using current rounding mode"]
23108#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f32)"]
23109#[inline]
23110#[target_feature(enable = "neon")]
23111#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23112#[cfg_attr(test, assert_instr(frinti))]
23113pub fn vrndiq_f32(a: float32x4_t) -> float32x4_t {
23114    unsafe extern "unadjusted" {
23115        #[cfg_attr(
23116            any(target_arch = "aarch64", target_arch = "arm64ec"),
23117            link_name = "llvm.nearbyint.v4f32"
23118        )]
23119        fn _vrndiq_f32(a: float32x4_t) -> float32x4_t;
23120    }
23121    unsafe { _vrndiq_f32(a) }
23122}
23123#[doc = "Floating-point round to integral, using current rounding mode"]
23124#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f64)"]
23125#[inline]
23126#[target_feature(enable = "neon")]
23127#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23128#[cfg_attr(test, assert_instr(frinti))]
23129pub fn vrndi_f64(a: float64x1_t) -> float64x1_t {
23130    unsafe extern "unadjusted" {
23131        #[cfg_attr(
23132            any(target_arch = "aarch64", target_arch = "arm64ec"),
23133            link_name = "llvm.nearbyint.v1f64"
23134        )]
23135        fn _vrndi_f64(a: float64x1_t) -> float64x1_t;
23136    }
23137    unsafe { _vrndi_f64(a) }
23138}
23139#[doc = "Floating-point round to integral, using current rounding mode"]
23140#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f64)"]
23141#[inline]
23142#[target_feature(enable = "neon")]
23143#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23144#[cfg_attr(test, assert_instr(frinti))]
23145pub fn vrndiq_f64(a: float64x2_t) -> float64x2_t {
23146    unsafe extern "unadjusted" {
23147        #[cfg_attr(
23148            any(target_arch = "aarch64", target_arch = "arm64ec"),
23149            link_name = "llvm.nearbyint.v2f64"
23150        )]
23151        fn _vrndiq_f64(a: float64x2_t) -> float64x2_t;
23152    }
23153    unsafe { _vrndiq_f64(a) }
23154}
23155#[doc = "Floating-point round to integral, using current rounding mode"]
23156#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndih_f16)"]
23157#[inline]
23158#[target_feature(enable = "neon,fp16")]
23159#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23160#[cfg_attr(test, assert_instr(frinti))]
23161pub fn vrndih_f16(a: f16) -> f16 {
23162    unsafe extern "unadjusted" {
23163        #[cfg_attr(
23164            any(target_arch = "aarch64", target_arch = "arm64ec"),
23165            link_name = "llvm.nearbyint.f16"
23166        )]
23167        fn _vrndih_f16(a: f16) -> f16;
23168    }
23169    unsafe { _vrndih_f16(a) }
23170}
23171#[doc = "Floating-point round to integral, toward minus infinity"]
23172#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f16)"]
23173#[inline]
23174#[target_feature(enable = "neon,fp16")]
23175#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23176#[cfg_attr(test, assert_instr(frintm))]
23177pub fn vrndm_f16(a: float16x4_t) -> float16x4_t {
23178    unsafe { simd_floor(a) }
23179}
23180#[doc = "Floating-point round to integral, toward minus infinity"]
23181#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f16)"]
23182#[inline]
23183#[target_feature(enable = "neon,fp16")]
23184#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23185#[cfg_attr(test, assert_instr(frintm))]
23186pub fn vrndmq_f16(a: float16x8_t) -> float16x8_t {
23187    unsafe { simd_floor(a) }
23188}
23189#[doc = "Floating-point round to integral, toward minus infinity"]
23190#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f32)"]
23191#[inline]
23192#[target_feature(enable = "neon")]
23193#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23194#[cfg_attr(test, assert_instr(frintm))]
23195pub fn vrndm_f32(a: float32x2_t) -> float32x2_t {
23196    unsafe { simd_floor(a) }
23197}
23198#[doc = "Floating-point round to integral, toward minus infinity"]
23199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f32)"]
23200#[inline]
23201#[target_feature(enable = "neon")]
23202#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23203#[cfg_attr(test, assert_instr(frintm))]
23204pub fn vrndmq_f32(a: float32x4_t) -> float32x4_t {
23205    unsafe { simd_floor(a) }
23206}
23207#[doc = "Floating-point round to integral, toward minus infinity"]
23208#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f64)"]
23209#[inline]
23210#[target_feature(enable = "neon")]
23211#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23212#[cfg_attr(test, assert_instr(frintm))]
23213pub fn vrndm_f64(a: float64x1_t) -> float64x1_t {
23214    unsafe { simd_floor(a) }
23215}
23216#[doc = "Floating-point round to integral, toward minus infinity"]
23217#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f64)"]
23218#[inline]
23219#[target_feature(enable = "neon")]
23220#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23221#[cfg_attr(test, assert_instr(frintm))]
23222pub fn vrndmq_f64(a: float64x2_t) -> float64x2_t {
23223    unsafe { simd_floor(a) }
23224}
23225#[doc = "Floating-point round to integral, toward minus infinity"]
23226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmh_f16)"]
23227#[inline]
23228#[target_feature(enable = "neon,fp16")]
23229#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23230#[cfg_attr(test, assert_instr(frintm))]
23231pub fn vrndmh_f16(a: f16) -> f16 {
23232    unsafe { floorf16(a) }
23233}
23234#[doc = "Floating-point round to integral, to nearest with ties to even"]
23235#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f64)"]
23236#[inline]
23237#[target_feature(enable = "neon")]
23238#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23239#[cfg_attr(test, assert_instr(frintn))]
23240pub fn vrndn_f64(a: float64x1_t) -> float64x1_t {
23241    unsafe extern "unadjusted" {
23242        #[cfg_attr(
23243            any(target_arch = "aarch64", target_arch = "arm64ec"),
23244            link_name = "llvm.roundeven.v1f64"
23245        )]
23246        fn _vrndn_f64(a: float64x1_t) -> float64x1_t;
23247    }
23248    unsafe { _vrndn_f64(a) }
23249}
23250#[doc = "Floating-point round to integral, to nearest with ties to even"]
23251#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f64)"]
23252#[inline]
23253#[target_feature(enable = "neon")]
23254#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23255#[cfg_attr(test, assert_instr(frintn))]
23256pub fn vrndnq_f64(a: float64x2_t) -> float64x2_t {
23257    unsafe extern "unadjusted" {
23258        #[cfg_attr(
23259            any(target_arch = "aarch64", target_arch = "arm64ec"),
23260            link_name = "llvm.roundeven.v2f64"
23261        )]
23262        fn _vrndnq_f64(a: float64x2_t) -> float64x2_t;
23263    }
23264    unsafe { _vrndnq_f64(a) }
23265}
23266#[doc = "Floating-point round to integral, toward minus infinity"]
23267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnh_f16)"]
23268#[inline]
23269#[target_feature(enable = "neon,fp16")]
23270#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23271#[cfg_attr(test, assert_instr(frintn))]
23272pub fn vrndnh_f16(a: f16) -> f16 {
23273    unsafe extern "unadjusted" {
23274        #[cfg_attr(
23275            any(target_arch = "aarch64", target_arch = "arm64ec"),
23276            link_name = "llvm.roundeven.f16"
23277        )]
23278        fn _vrndnh_f16(a: f16) -> f16;
23279    }
23280    unsafe { _vrndnh_f16(a) }
23281}
23282#[doc = "Floating-point round to integral, to nearest with ties to even"]
23283#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndns_f32)"]
23284#[inline]
23285#[target_feature(enable = "neon")]
23286#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23287#[cfg_attr(test, assert_instr(frintn))]
23288pub fn vrndns_f32(a: f32) -> f32 {
23289    unsafe extern "unadjusted" {
23290        #[cfg_attr(
23291            any(target_arch = "aarch64", target_arch = "arm64ec"),
23292            link_name = "llvm.roundeven.f32"
23293        )]
23294        fn _vrndns_f32(a: f32) -> f32;
23295    }
23296    unsafe { _vrndns_f32(a) }
23297}
23298#[doc = "Floating-point round to integral, toward plus infinity"]
23299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f16)"]
23300#[inline]
23301#[target_feature(enable = "neon,fp16")]
23302#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23303#[cfg_attr(test, assert_instr(frintp))]
23304pub fn vrndp_f16(a: float16x4_t) -> float16x4_t {
23305    unsafe { simd_ceil(a) }
23306}
23307#[doc = "Floating-point round to integral, toward plus infinity"]
23308#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f16)"]
23309#[inline]
23310#[target_feature(enable = "neon,fp16")]
23311#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23312#[cfg_attr(test, assert_instr(frintp))]
23313pub fn vrndpq_f16(a: float16x8_t) -> float16x8_t {
23314    unsafe { simd_ceil(a) }
23315}
23316#[doc = "Floating-point round to integral, toward plus infinity"]
23317#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f32)"]
23318#[inline]
23319#[target_feature(enable = "neon")]
23320#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23321#[cfg_attr(test, assert_instr(frintp))]
23322pub fn vrndp_f32(a: float32x2_t) -> float32x2_t {
23323    unsafe { simd_ceil(a) }
23324}
23325#[doc = "Floating-point round to integral, toward plus infinity"]
23326#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f32)"]
23327#[inline]
23328#[target_feature(enable = "neon")]
23329#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23330#[cfg_attr(test, assert_instr(frintp))]
23331pub fn vrndpq_f32(a: float32x4_t) -> float32x4_t {
23332    unsafe { simd_ceil(a) }
23333}
23334#[doc = "Floating-point round to integral, toward plus infinity"]
23335#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f64)"]
23336#[inline]
23337#[target_feature(enable = "neon")]
23338#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23339#[cfg_attr(test, assert_instr(frintp))]
23340pub fn vrndp_f64(a: float64x1_t) -> float64x1_t {
23341    unsafe { simd_ceil(a) }
23342}
23343#[doc = "Floating-point round to integral, toward plus infinity"]
23344#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f64)"]
23345#[inline]
23346#[target_feature(enable = "neon")]
23347#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23348#[cfg_attr(test, assert_instr(frintp))]
23349pub fn vrndpq_f64(a: float64x2_t) -> float64x2_t {
23350    unsafe { simd_ceil(a) }
23351}
23352#[doc = "Floating-point round to integral, toward plus infinity"]
23353#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndph_f16)"]
23354#[inline]
23355#[target_feature(enable = "neon,fp16")]
23356#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23357#[cfg_attr(test, assert_instr(frintp))]
23358pub fn vrndph_f16(a: f16) -> f16 {
23359    unsafe { ceilf16(a) }
23360}
23361#[doc = "Floating-point round to integral exact, using current rounding mode"]
23362#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f16)"]
23363#[inline]
23364#[target_feature(enable = "neon,fp16")]
23365#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23366#[cfg_attr(test, assert_instr(frintx))]
23367pub fn vrndx_f16(a: float16x4_t) -> float16x4_t {
23368    unsafe { simd_round_ties_even(a) }
23369}
23370#[doc = "Floating-point round to integral exact, using current rounding mode"]
23371#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f16)"]
23372#[inline]
23373#[target_feature(enable = "neon,fp16")]
23374#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23375#[cfg_attr(test, assert_instr(frintx))]
23376pub fn vrndxq_f16(a: float16x8_t) -> float16x8_t {
23377    unsafe { simd_round_ties_even(a) }
23378}
23379#[doc = "Floating-point round to integral exact, using current rounding mode"]
23380#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f32)"]
23381#[inline]
23382#[target_feature(enable = "neon")]
23383#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23384#[cfg_attr(test, assert_instr(frintx))]
23385pub fn vrndx_f32(a: float32x2_t) -> float32x2_t {
23386    unsafe { simd_round_ties_even(a) }
23387}
23388#[doc = "Floating-point round to integral exact, using current rounding mode"]
23389#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f32)"]
23390#[inline]
23391#[target_feature(enable = "neon")]
23392#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23393#[cfg_attr(test, assert_instr(frintx))]
23394pub fn vrndxq_f32(a: float32x4_t) -> float32x4_t {
23395    unsafe { simd_round_ties_even(a) }
23396}
23397#[doc = "Floating-point round to integral exact, using current rounding mode"]
23398#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f64)"]
23399#[inline]
23400#[target_feature(enable = "neon")]
23401#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23402#[cfg_attr(test, assert_instr(frintx))]
23403pub fn vrndx_f64(a: float64x1_t) -> float64x1_t {
23404    unsafe { simd_round_ties_even(a) }
23405}
23406#[doc = "Floating-point round to integral exact, using current rounding mode"]
23407#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f64)"]
23408#[inline]
23409#[target_feature(enable = "neon")]
23410#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23411#[cfg_attr(test, assert_instr(frintx))]
23412pub fn vrndxq_f64(a: float64x2_t) -> float64x2_t {
23413    unsafe { simd_round_ties_even(a) }
23414}
23415#[doc = "Floating-point round to integral, using current rounding mode"]
23416#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxh_f16)"]
23417#[inline]
23418#[target_feature(enable = "neon,fp16")]
23419#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23420#[cfg_attr(test, assert_instr(frintx))]
23421pub fn vrndxh_f16(a: f16) -> f16 {
23422    round_ties_even_f16(a)
23423}
23424#[doc = "Signed rounding shift left"]
23425#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_s64)"]
23426#[inline]
23427#[target_feature(enable = "neon")]
23428#[cfg_attr(test, assert_instr(srshl))]
23429#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23430pub fn vrshld_s64(a: i64, b: i64) -> i64 {
23431    unsafe extern "unadjusted" {
23432        #[cfg_attr(
23433            any(target_arch = "aarch64", target_arch = "arm64ec"),
23434            link_name = "llvm.aarch64.neon.srshl.i64"
23435        )]
23436        fn _vrshld_s64(a: i64, b: i64) -> i64;
23437    }
23438    unsafe { _vrshld_s64(a, b) }
23439}
23440#[doc = "Unsigned rounding shift left"]
23441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_u64)"]
23442#[inline]
23443#[target_feature(enable = "neon")]
23444#[cfg_attr(test, assert_instr(urshl))]
23445#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23446pub fn vrshld_u64(a: u64, b: i64) -> u64 {
23447    unsafe extern "unadjusted" {
23448        #[cfg_attr(
23449            any(target_arch = "aarch64", target_arch = "arm64ec"),
23450            link_name = "llvm.aarch64.neon.urshl.i64"
23451        )]
23452        fn _vrshld_u64(a: u64, b: i64) -> u64;
23453    }
23454    unsafe { _vrshld_u64(a, b) }
23455}
23456#[doc = "Signed rounding shift right"]
23457#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_s64)"]
23458#[inline]
23459#[target_feature(enable = "neon")]
23460#[cfg_attr(test, assert_instr(srshr, N = 2))]
23461#[rustc_legacy_const_generics(1)]
23462#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23463pub fn vrshrd_n_s64<const N: i32>(a: i64) -> i64 {
23464    static_assert!(N >= 1 && N <= 64);
23465    vrshld_s64(a, -N as i64)
23466}
23467#[doc = "Unsigned rounding shift right"]
23468#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_u64)"]
23469#[inline]
23470#[target_feature(enable = "neon")]
23471#[cfg_attr(test, assert_instr(urshr, N = 2))]
23472#[rustc_legacy_const_generics(1)]
23473#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23474pub fn vrshrd_n_u64<const N: i32>(a: u64) -> u64 {
23475    static_assert!(N >= 1 && N <= 64);
23476    vrshld_u64(a, -N as i64)
23477}
23478#[doc = "Rounding shift right narrow"]
23479#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s16)"]
23480#[inline]
23481#[target_feature(enable = "neon")]
23482#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
23483#[rustc_legacy_const_generics(2)]
23484#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23485pub fn vrshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
23486    static_assert!(N >= 1 && N <= 8);
23487    unsafe {
23488        simd_shuffle!(
23489            a,
23490            vrshrn_n_s16::<N>(b),
23491            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
23492        )
23493    }
23494}
23495#[doc = "Rounding shift right narrow"]
23496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s32)"]
23497#[inline]
23498#[target_feature(enable = "neon")]
23499#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
23500#[rustc_legacy_const_generics(2)]
23501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23502pub fn vrshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
23503    static_assert!(N >= 1 && N <= 16);
23504    unsafe { simd_shuffle!(a, vrshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
23505}
23506#[doc = "Rounding shift right narrow"]
23507#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s64)"]
23508#[inline]
23509#[target_feature(enable = "neon")]
23510#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
23511#[rustc_legacy_const_generics(2)]
23512#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23513pub fn vrshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
23514    static_assert!(N >= 1 && N <= 32);
23515    unsafe { simd_shuffle!(a, vrshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
23516}
23517#[doc = "Rounding shift right narrow"]
23518#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u16)"]
23519#[inline]
23520#[target_feature(enable = "neon")]
23521#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
23522#[rustc_legacy_const_generics(2)]
23523#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23524pub fn vrshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
23525    static_assert!(N >= 1 && N <= 8);
23526    unsafe {
23527        simd_shuffle!(
23528            a,
23529            vrshrn_n_u16::<N>(b),
23530            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
23531        )
23532    }
23533}
23534#[doc = "Rounding shift right narrow"]
23535#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u32)"]
23536#[inline]
23537#[target_feature(enable = "neon")]
23538#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
23539#[rustc_legacy_const_generics(2)]
23540#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23541pub fn vrshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
23542    static_assert!(N >= 1 && N <= 16);
23543    unsafe { simd_shuffle!(a, vrshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
23544}
23545#[doc = "Rounding shift right narrow"]
23546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u64)"]
23547#[inline]
23548#[target_feature(enable = "neon")]
23549#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
23550#[rustc_legacy_const_generics(2)]
23551#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23552pub fn vrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
23553    static_assert!(N >= 1 && N <= 32);
23554    unsafe { simd_shuffle!(a, vrshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
23555}
23556#[doc = "Reciprocal square-root estimate."]
23557#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f64)"]
23558#[inline]
23559#[target_feature(enable = "neon")]
23560#[cfg_attr(test, assert_instr(frsqrte))]
23561#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23562pub fn vrsqrte_f64(a: float64x1_t) -> float64x1_t {
23563    unsafe extern "unadjusted" {
23564        #[cfg_attr(
23565            any(target_arch = "aarch64", target_arch = "arm64ec"),
23566            link_name = "llvm.aarch64.neon.frsqrte.v1f64"
23567        )]
23568        fn _vrsqrte_f64(a: float64x1_t) -> float64x1_t;
23569    }
23570    unsafe { _vrsqrte_f64(a) }
23571}
23572#[doc = "Reciprocal square-root estimate."]
23573#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f64)"]
23574#[inline]
23575#[target_feature(enable = "neon")]
23576#[cfg_attr(test, assert_instr(frsqrte))]
23577#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23578pub fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t {
23579    unsafe extern "unadjusted" {
23580        #[cfg_attr(
23581            any(target_arch = "aarch64", target_arch = "arm64ec"),
23582            link_name = "llvm.aarch64.neon.frsqrte.v2f64"
23583        )]
23584        fn _vrsqrteq_f64(a: float64x2_t) -> float64x2_t;
23585    }
23586    unsafe { _vrsqrteq_f64(a) }
23587}
23588#[doc = "Reciprocal square-root estimate."]
23589#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrted_f64)"]
23590#[inline]
23591#[target_feature(enable = "neon")]
23592#[cfg_attr(test, assert_instr(frsqrte))]
23593#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23594pub fn vrsqrted_f64(a: f64) -> f64 {
23595    unsafe extern "unadjusted" {
23596        #[cfg_attr(
23597            any(target_arch = "aarch64", target_arch = "arm64ec"),
23598            link_name = "llvm.aarch64.neon.frsqrte.f64"
23599        )]
23600        fn _vrsqrted_f64(a: f64) -> f64;
23601    }
23602    unsafe { _vrsqrted_f64(a) }
23603}
23604#[doc = "Reciprocal square-root estimate."]
23605#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtes_f32)"]
23606#[inline]
23607#[target_feature(enable = "neon")]
23608#[cfg_attr(test, assert_instr(frsqrte))]
23609#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23610pub fn vrsqrtes_f32(a: f32) -> f32 {
23611    unsafe extern "unadjusted" {
23612        #[cfg_attr(
23613            any(target_arch = "aarch64", target_arch = "arm64ec"),
23614            link_name = "llvm.aarch64.neon.frsqrte.f32"
23615        )]
23616        fn _vrsqrtes_f32(a: f32) -> f32;
23617    }
23618    unsafe { _vrsqrtes_f32(a) }
23619}
23620#[doc = "Reciprocal square-root estimate."]
23621#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteh_f16)"]
23622#[inline]
23623#[cfg_attr(test, assert_instr(frsqrte))]
23624#[target_feature(enable = "neon,fp16")]
23625#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23626pub fn vrsqrteh_f16(a: f16) -> f16 {
23627    unsafe extern "unadjusted" {
23628        #[cfg_attr(
23629            any(target_arch = "aarch64", target_arch = "arm64ec"),
23630            link_name = "llvm.aarch64.neon.frsqrte.f16"
23631        )]
23632        fn _vrsqrteh_f16(a: f16) -> f16;
23633    }
23634    unsafe { _vrsqrteh_f16(a) }
23635}
23636#[doc = "Floating-point reciprocal square root step"]
23637#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f64)"]
23638#[inline]
23639#[target_feature(enable = "neon")]
23640#[cfg_attr(test, assert_instr(frsqrts))]
23641#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23642pub fn vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
23643    unsafe extern "unadjusted" {
23644        #[cfg_attr(
23645            any(target_arch = "aarch64", target_arch = "arm64ec"),
23646            link_name = "llvm.aarch64.neon.frsqrts.v1f64"
23647        )]
23648        fn _vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
23649    }
23650    unsafe { _vrsqrts_f64(a, b) }
23651}
23652#[doc = "Floating-point reciprocal square root step"]
23653#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f64)"]
23654#[inline]
23655#[target_feature(enable = "neon")]
23656#[cfg_attr(test, assert_instr(frsqrts))]
23657#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23658pub fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
23659    unsafe extern "unadjusted" {
23660        #[cfg_attr(
23661            any(target_arch = "aarch64", target_arch = "arm64ec"),
23662            link_name = "llvm.aarch64.neon.frsqrts.v2f64"
23663        )]
23664        fn _vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
23665    }
23666    unsafe { _vrsqrtsq_f64(a, b) }
23667}
23668#[doc = "Floating-point reciprocal square root step"]
23669#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsd_f64)"]
23670#[inline]
23671#[target_feature(enable = "neon")]
23672#[cfg_attr(test, assert_instr(frsqrts))]
23673#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23674pub fn vrsqrtsd_f64(a: f64, b: f64) -> f64 {
23675    unsafe extern "unadjusted" {
23676        #[cfg_attr(
23677            any(target_arch = "aarch64", target_arch = "arm64ec"),
23678            link_name = "llvm.aarch64.neon.frsqrts.f64"
23679        )]
23680        fn _vrsqrtsd_f64(a: f64, b: f64) -> f64;
23681    }
23682    unsafe { _vrsqrtsd_f64(a, b) }
23683}
23684#[doc = "Floating-point reciprocal square root step"]
23685#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtss_f32)"]
23686#[inline]
23687#[target_feature(enable = "neon")]
23688#[cfg_attr(test, assert_instr(frsqrts))]
23689#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23690pub fn vrsqrtss_f32(a: f32, b: f32) -> f32 {
23691    unsafe extern "unadjusted" {
23692        #[cfg_attr(
23693            any(target_arch = "aarch64", target_arch = "arm64ec"),
23694            link_name = "llvm.aarch64.neon.frsqrts.f32"
23695        )]
23696        fn _vrsqrtss_f32(a: f32, b: f32) -> f32;
23697    }
23698    unsafe { _vrsqrtss_f32(a, b) }
23699}
23700#[doc = "Floating-point reciprocal square root step"]
23701#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsh_f16)"]
23702#[inline]
23703#[target_feature(enable = "neon,fp16")]
23704#[cfg_attr(test, assert_instr(frsqrts))]
23705#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23706pub fn vrsqrtsh_f16(a: f16, b: f16) -> f16 {
23707    unsafe extern "unadjusted" {
23708        #[cfg_attr(
23709            any(target_arch = "aarch64", target_arch = "arm64ec"),
23710            link_name = "llvm.aarch64.neon.frsqrts.f16"
23711        )]
23712        fn _vrsqrtsh_f16(a: f16, b: f16) -> f16;
23713    }
23714    unsafe { _vrsqrtsh_f16(a, b) }
23715}
23716#[doc = "Signed rounding shift right and accumulate."]
23717#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_s64)"]
23718#[inline]
23719#[target_feature(enable = "neon")]
23720#[cfg_attr(test, assert_instr(srshr, N = 2))]
23721#[rustc_legacy_const_generics(2)]
23722#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23723pub fn vrsrad_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
23724    static_assert!(N >= 1 && N <= 64);
23725    let b: i64 = vrshrd_n_s64::<N>(b);
23726    a.wrapping_add(b)
23727}
23728#[doc = "Unsigned rounding shift right and accumulate."]
23729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_u64)"]
23730#[inline]
23731#[target_feature(enable = "neon")]
23732#[cfg_attr(test, assert_instr(urshr, N = 2))]
23733#[rustc_legacy_const_generics(2)]
23734#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23735pub fn vrsrad_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
23736    static_assert!(N >= 1 && N <= 64);
23737    let b: u64 = vrshrd_n_u64::<N>(b);
23738    a.wrapping_add(b)
23739}
23740#[doc = "Rounding subtract returning high narrow"]
23741#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"]
23742#[inline]
23743#[target_feature(enable = "neon")]
23744#[cfg(target_endian = "little")]
23745#[cfg_attr(test, assert_instr(rsubhn2))]
23746#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23747pub fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t {
23748    let x: int8x8_t = vrsubhn_s16(b, c);
23749    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
23750}
23751#[doc = "Rounding subtract returning high narrow"]
23752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"]
23753#[inline]
23754#[target_feature(enable = "neon")]
23755#[cfg(target_endian = "little")]
23756#[cfg_attr(test, assert_instr(rsubhn2))]
23757#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23758pub fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t {
23759    let x: int16x4_t = vrsubhn_s32(b, c);
23760    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
23761}
23762#[doc = "Rounding subtract returning high narrow"]
23763#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"]
23764#[inline]
23765#[target_feature(enable = "neon")]
23766#[cfg(target_endian = "little")]
23767#[cfg_attr(test, assert_instr(rsubhn2))]
23768#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23769pub fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t {
23770    let x: int32x2_t = vrsubhn_s64(b, c);
23771    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
23772}
23773#[doc = "Rounding subtract returning high narrow"]
23774#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"]
23775#[inline]
23776#[target_feature(enable = "neon")]
23777#[cfg(target_endian = "little")]
23778#[cfg_attr(test, assert_instr(rsubhn2))]
23779#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23780pub fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t {
23781    let x: uint8x8_t = vrsubhn_u16(b, c);
23782    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
23783}
23784#[doc = "Rounding subtract returning high narrow"]
23785#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"]
23786#[inline]
23787#[target_feature(enable = "neon")]
23788#[cfg(target_endian = "little")]
23789#[cfg_attr(test, assert_instr(rsubhn2))]
23790#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23791pub fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t {
23792    let x: uint16x4_t = vrsubhn_u32(b, c);
23793    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
23794}
23795#[doc = "Rounding subtract returning high narrow"]
23796#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"]
23797#[inline]
23798#[target_feature(enable = "neon")]
23799#[cfg(target_endian = "little")]
23800#[cfg_attr(test, assert_instr(rsubhn2))]
23801#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23802pub fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t {
23803    let x: uint32x2_t = vrsubhn_u64(b, c);
23804    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
23805}
23806#[doc = "Rounding subtract returning high narrow"]
23807#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"]
23808#[inline]
23809#[target_feature(enable = "neon")]
23810#[cfg(target_endian = "big")]
23811#[cfg_attr(test, assert_instr(rsubhn))]
23812#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23813pub fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t {
23814    let x: int8x8_t = vrsubhn_s16(b, c);
23815    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
23816}
23817#[doc = "Rounding subtract returning high narrow"]
23818#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"]
23819#[inline]
23820#[target_feature(enable = "neon")]
23821#[cfg(target_endian = "big")]
23822#[cfg_attr(test, assert_instr(rsubhn))]
23823#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23824pub fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t {
23825    let x: int16x4_t = vrsubhn_s32(b, c);
23826    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
23827}
23828#[doc = "Rounding subtract returning high narrow"]
23829#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"]
23830#[inline]
23831#[target_feature(enable = "neon")]
23832#[cfg(target_endian = "big")]
23833#[cfg_attr(test, assert_instr(rsubhn))]
23834#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23835pub fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t {
23836    let x: int32x2_t = vrsubhn_s64(b, c);
23837    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
23838}
23839#[doc = "Rounding subtract returning high narrow"]
23840#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"]
23841#[inline]
23842#[target_feature(enable = "neon")]
23843#[cfg(target_endian = "big")]
23844#[cfg_attr(test, assert_instr(rsubhn))]
23845#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23846pub fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t {
23847    let x: uint8x8_t = vrsubhn_u16(b, c);
23848    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
23849}
23850#[doc = "Rounding subtract returning high narrow"]
23851#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"]
23852#[inline]
23853#[target_feature(enable = "neon")]
23854#[cfg(target_endian = "big")]
23855#[cfg_attr(test, assert_instr(rsubhn))]
23856#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23857pub fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t {
23858    let x: uint16x4_t = vrsubhn_u32(b, c);
23859    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
23860}
23861#[doc = "Rounding subtract returning high narrow"]
23862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"]
23863#[inline]
23864#[target_feature(enable = "neon")]
23865#[cfg(target_endian = "big")]
23866#[cfg_attr(test, assert_instr(rsubhn))]
23867#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23868pub fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t {
23869    let x: uint32x2_t = vrsubhn_u64(b, c);
23870    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
23871}
23872#[doc = "Insert vector element from another vector element"]
23873#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f64)"]
23874#[inline]
23875#[target_feature(enable = "neon")]
23876#[cfg_attr(test, assert_instr(nop, LANE = 0))]
23877#[rustc_legacy_const_generics(2)]
23878#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23879pub fn vset_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> float64x1_t {
23880    static_assert!(LANE == 0);
23881    unsafe { simd_insert!(b, LANE as u32, a) }
23882}
23883#[doc = "Insert vector element from another vector element"]
23884#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f64)"]
23885#[inline]
23886#[target_feature(enable = "neon")]
23887#[cfg_attr(test, assert_instr(nop, LANE = 0))]
23888#[rustc_legacy_const_generics(2)]
23889#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23890pub fn vsetq_lane_f64<const LANE: i32>(a: f64, b: float64x2_t) -> float64x2_t {
23891    static_assert_uimm_bits!(LANE, 1);
23892    unsafe { simd_insert!(b, LANE as u32, a) }
23893}
23894#[doc = "SHA512 hash update part 2"]
23895#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512h2q_u64)"]
23896#[inline]
23897#[target_feature(enable = "neon,sha3")]
23898#[cfg_attr(test, assert_instr(sha512h2))]
23899#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
23900pub fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
23901    unsafe extern "unadjusted" {
23902        #[cfg_attr(
23903            any(target_arch = "aarch64", target_arch = "arm64ec"),
23904            link_name = "llvm.aarch64.crypto.sha512h2"
23905        )]
23906        fn _vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
23907    }
23908    unsafe { _vsha512h2q_u64(a, b, c) }
23909}
23910#[doc = "SHA512 hash update part 1"]
23911#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512hq_u64)"]
23912#[inline]
23913#[target_feature(enable = "neon,sha3")]
23914#[cfg_attr(test, assert_instr(sha512h))]
23915#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
23916pub fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
23917    unsafe extern "unadjusted" {
23918        #[cfg_attr(
23919            any(target_arch = "aarch64", target_arch = "arm64ec"),
23920            link_name = "llvm.aarch64.crypto.sha512h"
23921        )]
23922        fn _vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
23923    }
23924    unsafe { _vsha512hq_u64(a, b, c) }
23925}
23926#[doc = "SHA512 schedule update 0"]
23927#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su0q_u64)"]
23928#[inline]
23929#[target_feature(enable = "neon,sha3")]
23930#[cfg_attr(test, assert_instr(sha512su0))]
23931#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
23932pub fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
23933    unsafe extern "unadjusted" {
23934        #[cfg_attr(
23935            any(target_arch = "aarch64", target_arch = "arm64ec"),
23936            link_name = "llvm.aarch64.crypto.sha512su0"
23937        )]
23938        fn _vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t;
23939    }
23940    unsafe { _vsha512su0q_u64(a, b) }
23941}
23942#[doc = "SHA512 schedule update 1"]
23943#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su1q_u64)"]
23944#[inline]
23945#[target_feature(enable = "neon,sha3")]
23946#[cfg_attr(test, assert_instr(sha512su1))]
23947#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
23948pub fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
23949    unsafe extern "unadjusted" {
23950        #[cfg_attr(
23951            any(target_arch = "aarch64", target_arch = "arm64ec"),
23952            link_name = "llvm.aarch64.crypto.sha512su1"
23953        )]
23954        fn _vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
23955    }
23956    unsafe { _vsha512su1q_u64(a, b, c) }
23957}
23958#[doc = "Signed Shift left"]
23959#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_s64)"]
23960#[inline]
23961#[target_feature(enable = "neon")]
23962#[cfg_attr(test, assert_instr(sshl))]
23963#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23964pub fn vshld_s64(a: i64, b: i64) -> i64 {
23965    unsafe { transmute(vshl_s64(transmute(a), transmute(b))) }
23966}
23967#[doc = "Unsigned Shift left"]
23968#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_u64)"]
23969#[inline]
23970#[target_feature(enable = "neon")]
23971#[cfg_attr(test, assert_instr(ushl))]
23972#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23973pub fn vshld_u64(a: u64, b: i64) -> u64 {
23974    unsafe { transmute(vshl_u64(transmute(a), transmute(b))) }
23975}
23976#[doc = "Signed shift left long"]
23977#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s8)"]
23978#[inline]
23979#[target_feature(enable = "neon")]
23980#[cfg_attr(test, assert_instr(sshll2, N = 2))]
23981#[rustc_legacy_const_generics(1)]
23982#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23983pub fn vshll_high_n_s8<const N: i32>(a: int8x16_t) -> int16x8_t {
23984    static_assert!(N >= 0 && N <= 8);
23985    unsafe {
23986        let b: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
23987        vshll_n_s8::<N>(b)
23988    }
23989}
23990#[doc = "Signed shift left long"]
23991#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s16)"]
23992#[inline]
23993#[target_feature(enable = "neon")]
23994#[cfg_attr(test, assert_instr(sshll2, N = 2))]
23995#[rustc_legacy_const_generics(1)]
23996#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23997pub fn vshll_high_n_s16<const N: i32>(a: int16x8_t) -> int32x4_t {
23998    static_assert!(N >= 0 && N <= 16);
23999    unsafe {
24000        let b: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
24001        vshll_n_s16::<N>(b)
24002    }
24003}
24004#[doc = "Signed shift left long"]
24005#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s32)"]
24006#[inline]
24007#[target_feature(enable = "neon")]
24008#[cfg_attr(test, assert_instr(sshll2, N = 2))]
24009#[rustc_legacy_const_generics(1)]
24010#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24011pub fn vshll_high_n_s32<const N: i32>(a: int32x4_t) -> int64x2_t {
24012    static_assert!(N >= 0 && N <= 32);
24013    unsafe {
24014        let b: int32x2_t = simd_shuffle!(a, a, [2, 3]);
24015        vshll_n_s32::<N>(b)
24016    }
24017}
24018#[doc = "Signed shift left long"]
24019#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u8)"]
24020#[inline]
24021#[target_feature(enable = "neon")]
24022#[cfg_attr(test, assert_instr(ushll2, N = 2))]
24023#[rustc_legacy_const_generics(1)]
24024#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24025pub fn vshll_high_n_u8<const N: i32>(a: uint8x16_t) -> uint16x8_t {
24026    static_assert!(N >= 0 && N <= 8);
24027    unsafe {
24028        let b: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
24029        vshll_n_u8::<N>(b)
24030    }
24031}
24032#[doc = "Signed shift left long"]
24033#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u16)"]
24034#[inline]
24035#[target_feature(enable = "neon")]
24036#[cfg_attr(test, assert_instr(ushll2, N = 2))]
24037#[rustc_legacy_const_generics(1)]
24038#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24039pub fn vshll_high_n_u16<const N: i32>(a: uint16x8_t) -> uint32x4_t {
24040    static_assert!(N >= 0 && N <= 16);
24041    unsafe {
24042        let b: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
24043        vshll_n_u16::<N>(b)
24044    }
24045}
24046#[doc = "Signed shift left long"]
24047#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u32)"]
24048#[inline]
24049#[target_feature(enable = "neon")]
24050#[cfg_attr(test, assert_instr(ushll2, N = 2))]
24051#[rustc_legacy_const_generics(1)]
24052#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24053pub fn vshll_high_n_u32<const N: i32>(a: uint32x4_t) -> uint64x2_t {
24054    static_assert!(N >= 0 && N <= 32);
24055    unsafe {
24056        let b: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
24057        vshll_n_u32::<N>(b)
24058    }
24059}
24060#[doc = "Shift right narrow"]
24061#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s16)"]
24062#[inline]
24063#[target_feature(enable = "neon")]
24064#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24065#[rustc_legacy_const_generics(2)]
24066#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24067pub fn vshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
24068    static_assert!(N >= 1 && N <= 8);
24069    unsafe {
24070        simd_shuffle!(
24071            a,
24072            vshrn_n_s16::<N>(b),
24073            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
24074        )
24075    }
24076}
24077#[doc = "Shift right narrow"]
24078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s32)"]
24079#[inline]
24080#[target_feature(enable = "neon")]
24081#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24082#[rustc_legacy_const_generics(2)]
24083#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24084pub fn vshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
24085    static_assert!(N >= 1 && N <= 16);
24086    unsafe { simd_shuffle!(a, vshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
24087}
24088#[doc = "Shift right narrow"]
24089#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s64)"]
24090#[inline]
24091#[target_feature(enable = "neon")]
24092#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24093#[rustc_legacy_const_generics(2)]
24094#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24095pub fn vshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
24096    static_assert!(N >= 1 && N <= 32);
24097    unsafe { simd_shuffle!(a, vshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
24098}
24099#[doc = "Shift right narrow"]
24100#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u16)"]
24101#[inline]
24102#[target_feature(enable = "neon")]
24103#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24104#[rustc_legacy_const_generics(2)]
24105#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24106pub fn vshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
24107    static_assert!(N >= 1 && N <= 8);
24108    unsafe {
24109        simd_shuffle!(
24110            a,
24111            vshrn_n_u16::<N>(b),
24112            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
24113        )
24114    }
24115}
24116#[doc = "Shift right narrow"]
24117#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u32)"]
24118#[inline]
24119#[target_feature(enable = "neon")]
24120#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24121#[rustc_legacy_const_generics(2)]
24122#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24123pub fn vshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
24124    static_assert!(N >= 1 && N <= 16);
24125    unsafe { simd_shuffle!(a, vshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
24126}
24127#[doc = "Shift right narrow"]
24128#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u64)"]
24129#[inline]
24130#[target_feature(enable = "neon")]
24131#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24132#[rustc_legacy_const_generics(2)]
24133#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24134pub fn vshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
24135    static_assert!(N >= 1 && N <= 32);
24136    unsafe { simd_shuffle!(a, vshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
24137}
24138#[doc = "Shift Left and Insert (immediate)"]
24139#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s8)"]
24140#[inline]
24141#[target_feature(enable = "neon")]
24142#[cfg_attr(test, assert_instr(sli, N = 1))]
24143#[rustc_legacy_const_generics(2)]
24144#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24145pub fn vsli_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
24146    static_assert_uimm_bits!(N, 3);
24147    unsafe extern "unadjusted" {
24148        #[cfg_attr(
24149            any(target_arch = "aarch64", target_arch = "arm64ec"),
24150            link_name = "llvm.aarch64.neon.vsli.v8i8"
24151        )]
24152        fn _vsli_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t;
24153    }
24154    unsafe { _vsli_n_s8(a, b, N) }
24155}
24156#[doc = "Shift Left and Insert (immediate)"]
24157#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s8)"]
24158#[inline]
24159#[target_feature(enable = "neon")]
24160#[cfg_attr(test, assert_instr(sli, N = 1))]
24161#[rustc_legacy_const_generics(2)]
24162#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24163pub fn vsliq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
24164    static_assert_uimm_bits!(N, 3);
24165    unsafe extern "unadjusted" {
24166        #[cfg_attr(
24167            any(target_arch = "aarch64", target_arch = "arm64ec"),
24168            link_name = "llvm.aarch64.neon.vsli.v16i8"
24169        )]
24170        fn _vsliq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t;
24171    }
24172    unsafe { _vsliq_n_s8(a, b, N) }
24173}
24174#[doc = "Shift Left and Insert (immediate)"]
24175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s16)"]
24176#[inline]
24177#[target_feature(enable = "neon")]
24178#[cfg_attr(test, assert_instr(sli, N = 1))]
24179#[rustc_legacy_const_generics(2)]
24180#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24181pub fn vsli_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
24182    static_assert_uimm_bits!(N, 4);
24183    unsafe extern "unadjusted" {
24184        #[cfg_attr(
24185            any(target_arch = "aarch64", target_arch = "arm64ec"),
24186            link_name = "llvm.aarch64.neon.vsli.v4i16"
24187        )]
24188        fn _vsli_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t;
24189    }
24190    unsafe { _vsli_n_s16(a, b, N) }
24191}
24192#[doc = "Shift Left and Insert (immediate)"]
24193#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s16)"]
24194#[inline]
24195#[target_feature(enable = "neon")]
24196#[cfg_attr(test, assert_instr(sli, N = 1))]
24197#[rustc_legacy_const_generics(2)]
24198#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24199pub fn vsliq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
24200    static_assert_uimm_bits!(N, 4);
24201    unsafe extern "unadjusted" {
24202        #[cfg_attr(
24203            any(target_arch = "aarch64", target_arch = "arm64ec"),
24204            link_name = "llvm.aarch64.neon.vsli.v8i16"
24205        )]
24206        fn _vsliq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t;
24207    }
24208    unsafe { _vsliq_n_s16(a, b, N) }
24209}
24210#[doc = "Shift Left and Insert (immediate)"]
24211#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s32)"]
24212#[inline]
24213#[target_feature(enable = "neon")]
24214#[cfg_attr(test, assert_instr(sli, N = 1))]
24215#[rustc_legacy_const_generics(2)]
24216#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24217pub fn vsli_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
24218    static_assert!(N >= 0 && N <= 31);
24219    unsafe extern "unadjusted" {
24220        #[cfg_attr(
24221            any(target_arch = "aarch64", target_arch = "arm64ec"),
24222            link_name = "llvm.aarch64.neon.vsli.v2i32"
24223        )]
24224        fn _vsli_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t;
24225    }
24226    unsafe { _vsli_n_s32(a, b, N) }
24227}
24228#[doc = "Shift Left and Insert (immediate)"]
24229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s32)"]
24230#[inline]
24231#[target_feature(enable = "neon")]
24232#[cfg_attr(test, assert_instr(sli, N = 1))]
24233#[rustc_legacy_const_generics(2)]
24234#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24235pub fn vsliq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
24236    static_assert!(N >= 0 && N <= 31);
24237    unsafe extern "unadjusted" {
24238        #[cfg_attr(
24239            any(target_arch = "aarch64", target_arch = "arm64ec"),
24240            link_name = "llvm.aarch64.neon.vsli.v4i32"
24241        )]
24242        fn _vsliq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t;
24243    }
24244    unsafe { _vsliq_n_s32(a, b, N) }
24245}
24246#[doc = "Shift Left and Insert (immediate)"]
24247#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s64)"]
24248#[inline]
24249#[target_feature(enable = "neon")]
24250#[cfg_attr(test, assert_instr(sli, N = 1))]
24251#[rustc_legacy_const_generics(2)]
24252#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24253pub fn vsli_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t {
24254    static_assert!(N >= 0 && N <= 63);
24255    unsafe extern "unadjusted" {
24256        #[cfg_attr(
24257            any(target_arch = "aarch64", target_arch = "arm64ec"),
24258            link_name = "llvm.aarch64.neon.vsli.v1i64"
24259        )]
24260        fn _vsli_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t;
24261    }
24262    unsafe { _vsli_n_s64(a, b, N) }
24263}
24264#[doc = "Shift Left and Insert (immediate)"]
24265#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s64)"]
24266#[inline]
24267#[target_feature(enable = "neon")]
24268#[cfg_attr(test, assert_instr(sli, N = 1))]
24269#[rustc_legacy_const_generics(2)]
24270#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24271pub fn vsliq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
24272    static_assert!(N >= 0 && N <= 63);
24273    unsafe extern "unadjusted" {
24274        #[cfg_attr(
24275            any(target_arch = "aarch64", target_arch = "arm64ec"),
24276            link_name = "llvm.aarch64.neon.vsli.v2i64"
24277        )]
24278        fn _vsliq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t;
24279    }
24280    unsafe { _vsliq_n_s64(a, b, N) }
24281}
24282#[doc = "Shift Left and Insert (immediate)"]
24283#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u8)"]
24284#[inline]
24285#[target_feature(enable = "neon")]
24286#[cfg_attr(test, assert_instr(sli, N = 1))]
24287#[rustc_legacy_const_generics(2)]
24288#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24289pub fn vsli_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
24290    static_assert_uimm_bits!(N, 3);
24291    unsafe { transmute(vsli_n_s8::<N>(transmute(a), transmute(b))) }
24292}
24293#[doc = "Shift Left and Insert (immediate)"]
24294#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u8)"]
24295#[inline]
24296#[target_feature(enable = "neon")]
24297#[cfg_attr(test, assert_instr(sli, N = 1))]
24298#[rustc_legacy_const_generics(2)]
24299#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24300pub fn vsliq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
24301    static_assert_uimm_bits!(N, 3);
24302    unsafe { transmute(vsliq_n_s8::<N>(transmute(a), transmute(b))) }
24303}
24304#[doc = "Shift Left and Insert (immediate)"]
24305#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u16)"]
24306#[inline]
24307#[target_feature(enable = "neon")]
24308#[cfg_attr(test, assert_instr(sli, N = 1))]
24309#[rustc_legacy_const_generics(2)]
24310#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24311pub fn vsli_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
24312    static_assert_uimm_bits!(N, 4);
24313    unsafe { transmute(vsli_n_s16::<N>(transmute(a), transmute(b))) }
24314}
24315#[doc = "Shift Left and Insert (immediate)"]
24316#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u16)"]
24317#[inline]
24318#[target_feature(enable = "neon")]
24319#[cfg_attr(test, assert_instr(sli, N = 1))]
24320#[rustc_legacy_const_generics(2)]
24321#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24322pub fn vsliq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
24323    static_assert_uimm_bits!(N, 4);
24324    unsafe { transmute(vsliq_n_s16::<N>(transmute(a), transmute(b))) }
24325}
24326#[doc = "Shift Left and Insert (immediate)"]
24327#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u32)"]
24328#[inline]
24329#[target_feature(enable = "neon")]
24330#[cfg_attr(test, assert_instr(sli, N = 1))]
24331#[rustc_legacy_const_generics(2)]
24332#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24333pub fn vsli_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
24334    static_assert!(N >= 0 && N <= 31);
24335    unsafe { transmute(vsli_n_s32::<N>(transmute(a), transmute(b))) }
24336}
24337#[doc = "Shift Left and Insert (immediate)"]
24338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u32)"]
24339#[inline]
24340#[target_feature(enable = "neon")]
24341#[cfg_attr(test, assert_instr(sli, N = 1))]
24342#[rustc_legacy_const_generics(2)]
24343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24344pub fn vsliq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
24345    static_assert!(N >= 0 && N <= 31);
24346    unsafe { transmute(vsliq_n_s32::<N>(transmute(a), transmute(b))) }
24347}
24348#[doc = "Shift Left and Insert (immediate)"]
24349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u64)"]
24350#[inline]
24351#[target_feature(enable = "neon")]
24352#[cfg_attr(test, assert_instr(sli, N = 1))]
24353#[rustc_legacy_const_generics(2)]
24354#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24355pub fn vsli_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
24356    static_assert!(N >= 0 && N <= 63);
24357    unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
24358}
24359#[doc = "Shift Left and Insert (immediate)"]
24360#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u64)"]
24361#[inline]
24362#[target_feature(enable = "neon")]
24363#[cfg_attr(test, assert_instr(sli, N = 1))]
24364#[rustc_legacy_const_generics(2)]
24365#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24366pub fn vsliq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
24367    static_assert!(N >= 0 && N <= 63);
24368    unsafe { transmute(vsliq_n_s64::<N>(transmute(a), transmute(b))) }
24369}
24370#[doc = "Shift Left and Insert (immediate)"]
24371#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p8)"]
24372#[inline]
24373#[target_feature(enable = "neon")]
24374#[cfg_attr(test, assert_instr(sli, N = 1))]
24375#[rustc_legacy_const_generics(2)]
24376#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24377pub fn vsli_n_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
24378    static_assert_uimm_bits!(N, 3);
24379    unsafe { transmute(vsli_n_s8::<N>(transmute(a), transmute(b))) }
24380}
24381#[doc = "Shift Left and Insert (immediate)"]
24382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p8)"]
24383#[inline]
24384#[target_feature(enable = "neon")]
24385#[cfg_attr(test, assert_instr(sli, N = 1))]
24386#[rustc_legacy_const_generics(2)]
24387#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24388pub fn vsliq_n_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
24389    static_assert_uimm_bits!(N, 3);
24390    unsafe { transmute(vsliq_n_s8::<N>(transmute(a), transmute(b))) }
24391}
24392#[doc = "Shift Left and Insert (immediate)"]
24393#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p16)"]
24394#[inline]
24395#[target_feature(enable = "neon")]
24396#[cfg_attr(test, assert_instr(sli, N = 1))]
24397#[rustc_legacy_const_generics(2)]
24398#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24399pub fn vsli_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
24400    static_assert_uimm_bits!(N, 4);
24401    unsafe { transmute(vsli_n_s16::<N>(transmute(a), transmute(b))) }
24402}
24403#[doc = "Shift Left and Insert (immediate)"]
24404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p16)"]
24405#[inline]
24406#[target_feature(enable = "neon")]
24407#[cfg_attr(test, assert_instr(sli, N = 1))]
24408#[rustc_legacy_const_generics(2)]
24409#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24410pub fn vsliq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
24411    static_assert_uimm_bits!(N, 4);
24412    unsafe { transmute(vsliq_n_s16::<N>(transmute(a), transmute(b))) }
24413}
24414#[doc = "Shift Left and Insert (immediate)"]
24415#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p64)"]
24416#[inline]
24417#[target_feature(enable = "neon,aes")]
24418#[cfg_attr(test, assert_instr(sli, N = 1))]
24419#[rustc_legacy_const_generics(2)]
24420#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24421pub fn vsli_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t {
24422    static_assert!(N >= 0 && N <= 63);
24423    unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
24424}
24425#[doc = "Shift Left and Insert (immediate)"]
24426#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p64)"]
24427#[inline]
24428#[target_feature(enable = "neon,aes")]
24429#[cfg_attr(test, assert_instr(sli, N = 1))]
24430#[rustc_legacy_const_generics(2)]
24431#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24432pub fn vsliq_n_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
24433    static_assert!(N >= 0 && N <= 63);
24434    unsafe { transmute(vsliq_n_s64::<N>(transmute(a), transmute(b))) }
24435}
24436#[doc = "Shift left and insert"]
24437#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_s64)"]
24438#[inline]
24439#[target_feature(enable = "neon")]
24440#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24441#[rustc_legacy_const_generics(2)]
24442#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))]
24443pub fn vslid_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
24444    static_assert!(N >= 0 && N <= 63);
24445    unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
24446}
24447#[doc = "Shift left and insert"]
24448#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_u64)"]
24449#[inline]
24450#[target_feature(enable = "neon")]
24451#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24452#[rustc_legacy_const_generics(2)]
24453#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))]
24454pub fn vslid_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
24455    static_assert!(N >= 0 && N <= 63);
24456    unsafe { transmute(vsli_n_u64::<N>(transmute(a), transmute(b))) }
24457}
24458#[doc = "SM3PARTW1"]
24459#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw1q_u32)"]
24460#[inline]
24461#[target_feature(enable = "neon,sm4")]
24462#[cfg_attr(test, assert_instr(sm3partw1))]
24463#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24464pub fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
24465    unsafe extern "unadjusted" {
24466        #[cfg_attr(
24467            any(target_arch = "aarch64", target_arch = "arm64ec"),
24468            link_name = "llvm.aarch64.crypto.sm3partw1"
24469        )]
24470        fn _vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
24471    }
24472    unsafe { _vsm3partw1q_u32(a, b, c) }
24473}
24474#[doc = "SM3PARTW2"]
24475#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw2q_u32)"]
24476#[inline]
24477#[target_feature(enable = "neon,sm4")]
24478#[cfg_attr(test, assert_instr(sm3partw2))]
24479#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24480pub fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
24481    unsafe extern "unadjusted" {
24482        #[cfg_attr(
24483            any(target_arch = "aarch64", target_arch = "arm64ec"),
24484            link_name = "llvm.aarch64.crypto.sm3partw2"
24485        )]
24486        fn _vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
24487    }
24488    unsafe { _vsm3partw2q_u32(a, b, c) }
24489}
24490#[doc = "SM3SS1"]
24491#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3ss1q_u32)"]
24492#[inline]
24493#[target_feature(enable = "neon,sm4")]
24494#[cfg_attr(test, assert_instr(sm3ss1))]
24495#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24496pub fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
24497    unsafe extern "unadjusted" {
24498        #[cfg_attr(
24499            any(target_arch = "aarch64", target_arch = "arm64ec"),
24500            link_name = "llvm.aarch64.crypto.sm3ss1"
24501        )]
24502        fn _vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
24503    }
24504    unsafe { _vsm3ss1q_u32(a, b, c) }
24505}
24506#[doc = "SM3TT1A"]
24507#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1aq_u32)"]
24508#[inline]
24509#[target_feature(enable = "neon,sm4")]
24510#[cfg_attr(test, assert_instr(sm3tt1a, IMM2 = 0))]
24511#[rustc_legacy_const_generics(3)]
24512#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24513pub fn vsm3tt1aq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
24514    static_assert_uimm_bits!(IMM2, 2);
24515    unsafe extern "unadjusted" {
24516        #[cfg_attr(
24517            any(target_arch = "aarch64", target_arch = "arm64ec"),
24518            link_name = "llvm.aarch64.crypto.sm3tt1a"
24519        )]
24520        fn _vsm3tt1aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
24521    }
24522    unsafe { _vsm3tt1aq_u32(a, b, c, IMM2 as i64) }
24523}
24524#[doc = "SM3TT1B"]
24525#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1bq_u32)"]
24526#[inline]
24527#[target_feature(enable = "neon,sm4")]
24528#[cfg_attr(test, assert_instr(sm3tt1b, IMM2 = 0))]
24529#[rustc_legacy_const_generics(3)]
24530#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24531pub fn vsm3tt1bq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
24532    static_assert_uimm_bits!(IMM2, 2);
24533    unsafe extern "unadjusted" {
24534        #[cfg_attr(
24535            any(target_arch = "aarch64", target_arch = "arm64ec"),
24536            link_name = "llvm.aarch64.crypto.sm3tt1b"
24537        )]
24538        fn _vsm3tt1bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
24539    }
24540    unsafe { _vsm3tt1bq_u32(a, b, c, IMM2 as i64) }
24541}
24542#[doc = "SM3TT2A"]
24543#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2aq_u32)"]
24544#[inline]
24545#[target_feature(enable = "neon,sm4")]
24546#[cfg_attr(test, assert_instr(sm3tt2a, IMM2 = 0))]
24547#[rustc_legacy_const_generics(3)]
24548#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24549pub fn vsm3tt2aq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
24550    static_assert_uimm_bits!(IMM2, 2);
24551    unsafe extern "unadjusted" {
24552        #[cfg_attr(
24553            any(target_arch = "aarch64", target_arch = "arm64ec"),
24554            link_name = "llvm.aarch64.crypto.sm3tt2a"
24555        )]
24556        fn _vsm3tt2aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
24557    }
24558    unsafe { _vsm3tt2aq_u32(a, b, c, IMM2 as i64) }
24559}
24560#[doc = "SM3TT2B"]
24561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2bq_u32)"]
24562#[inline]
24563#[target_feature(enable = "neon,sm4")]
24564#[cfg_attr(test, assert_instr(sm3tt2b, IMM2 = 0))]
24565#[rustc_legacy_const_generics(3)]
24566#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24567pub fn vsm3tt2bq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
24568    static_assert_uimm_bits!(IMM2, 2);
24569    unsafe extern "unadjusted" {
24570        #[cfg_attr(
24571            any(target_arch = "aarch64", target_arch = "arm64ec"),
24572            link_name = "llvm.aarch64.crypto.sm3tt2b"
24573        )]
24574        fn _vsm3tt2bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
24575    }
24576    unsafe { _vsm3tt2bq_u32(a, b, c, IMM2 as i64) }
24577}
24578#[doc = "SM4 key"]
24579#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4ekeyq_u32)"]
24580#[inline]
24581#[target_feature(enable = "neon,sm4")]
24582#[cfg_attr(test, assert_instr(sm4ekey))]
24583#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24584pub fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
24585    unsafe extern "unadjusted" {
24586        #[cfg_attr(
24587            any(target_arch = "aarch64", target_arch = "arm64ec"),
24588            link_name = "llvm.aarch64.crypto.sm4ekey"
24589        )]
24590        fn _vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
24591    }
24592    unsafe { _vsm4ekeyq_u32(a, b) }
24593}
24594#[doc = "SM4 encode"]
24595#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4eq_u32)"]
24596#[inline]
24597#[target_feature(enable = "neon,sm4")]
24598#[cfg_attr(test, assert_instr(sm4e))]
24599#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
24600pub fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
24601    unsafe extern "unadjusted" {
24602        #[cfg_attr(
24603            any(target_arch = "aarch64", target_arch = "arm64ec"),
24604            link_name = "llvm.aarch64.crypto.sm4e"
24605        )]
24606        fn _vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
24607    }
24608    unsafe { _vsm4eq_u32(a, b) }
24609}
24610#[doc = "Unsigned saturating Accumulate of Signed value."]
24611#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u8)"]
24612#[inline]
24613#[target_feature(enable = "neon")]
24614#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24615#[cfg_attr(test, assert_instr(usqadd))]
24616pub fn vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t {
24617    unsafe extern "unadjusted" {
24618        #[cfg_attr(
24619            any(target_arch = "aarch64", target_arch = "arm64ec"),
24620            link_name = "llvm.aarch64.neon.usqadd.v8i8"
24621        )]
24622        fn _vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t;
24623    }
24624    unsafe { _vsqadd_u8(a, b) }
24625}
24626#[doc = "Unsigned saturating Accumulate of Signed value."]
24627#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u8)"]
24628#[inline]
24629#[target_feature(enable = "neon")]
24630#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24631#[cfg_attr(test, assert_instr(usqadd))]
24632pub fn vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t {
24633    unsafe extern "unadjusted" {
24634        #[cfg_attr(
24635            any(target_arch = "aarch64", target_arch = "arm64ec"),
24636            link_name = "llvm.aarch64.neon.usqadd.v16i8"
24637        )]
24638        fn _vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t;
24639    }
24640    unsafe { _vsqaddq_u8(a, b) }
24641}
24642#[doc = "Unsigned saturating Accumulate of Signed value."]
24643#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u16)"]
24644#[inline]
24645#[target_feature(enable = "neon")]
24646#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24647#[cfg_attr(test, assert_instr(usqadd))]
24648pub fn vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t {
24649    unsafe extern "unadjusted" {
24650        #[cfg_attr(
24651            any(target_arch = "aarch64", target_arch = "arm64ec"),
24652            link_name = "llvm.aarch64.neon.usqadd.v4i16"
24653        )]
24654        fn _vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t;
24655    }
24656    unsafe { _vsqadd_u16(a, b) }
24657}
24658#[doc = "Unsigned saturating Accumulate of Signed value."]
24659#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u16)"]
24660#[inline]
24661#[target_feature(enable = "neon")]
24662#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24663#[cfg_attr(test, assert_instr(usqadd))]
24664pub fn vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t {
24665    unsafe extern "unadjusted" {
24666        #[cfg_attr(
24667            any(target_arch = "aarch64", target_arch = "arm64ec"),
24668            link_name = "llvm.aarch64.neon.usqadd.v8i16"
24669        )]
24670        fn _vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t;
24671    }
24672    unsafe { _vsqaddq_u16(a, b) }
24673}
24674#[doc = "Unsigned saturating Accumulate of Signed value."]
24675#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u32)"]
24676#[inline]
24677#[target_feature(enable = "neon")]
24678#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24679#[cfg_attr(test, assert_instr(usqadd))]
24680pub fn vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t {
24681    unsafe extern "unadjusted" {
24682        #[cfg_attr(
24683            any(target_arch = "aarch64", target_arch = "arm64ec"),
24684            link_name = "llvm.aarch64.neon.usqadd.v2i32"
24685        )]
24686        fn _vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t;
24687    }
24688    unsafe { _vsqadd_u32(a, b) }
24689}
24690#[doc = "Unsigned saturating Accumulate of Signed value."]
24691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u32)"]
24692#[inline]
24693#[target_feature(enable = "neon")]
24694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24695#[cfg_attr(test, assert_instr(usqadd))]
24696pub fn vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t {
24697    unsafe extern "unadjusted" {
24698        #[cfg_attr(
24699            any(target_arch = "aarch64", target_arch = "arm64ec"),
24700            link_name = "llvm.aarch64.neon.usqadd.v4i32"
24701        )]
24702        fn _vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t;
24703    }
24704    unsafe { _vsqaddq_u32(a, b) }
24705}
24706#[doc = "Unsigned saturating Accumulate of Signed value."]
24707#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u64)"]
24708#[inline]
24709#[target_feature(enable = "neon")]
24710#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24711#[cfg_attr(test, assert_instr(usqadd))]
24712pub fn vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t {
24713    unsafe extern "unadjusted" {
24714        #[cfg_attr(
24715            any(target_arch = "aarch64", target_arch = "arm64ec"),
24716            link_name = "llvm.aarch64.neon.usqadd.v1i64"
24717        )]
24718        fn _vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t;
24719    }
24720    unsafe { _vsqadd_u64(a, b) }
24721}
24722#[doc = "Unsigned saturating Accumulate of Signed value."]
24723#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u64)"]
24724#[inline]
24725#[target_feature(enable = "neon")]
24726#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24727#[cfg_attr(test, assert_instr(usqadd))]
24728pub fn vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t {
24729    unsafe extern "unadjusted" {
24730        #[cfg_attr(
24731            any(target_arch = "aarch64", target_arch = "arm64ec"),
24732            link_name = "llvm.aarch64.neon.usqadd.v2i64"
24733        )]
24734        fn _vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t;
24735    }
24736    unsafe { _vsqaddq_u64(a, b) }
24737}
24738#[doc = "Unsigned saturating accumulate of signed value"]
24739#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddb_u8)"]
24740#[inline]
24741#[target_feature(enable = "neon")]
24742#[cfg_attr(test, assert_instr(usqadd))]
24743#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24744pub fn vsqaddb_u8(a: u8, b: i8) -> u8 {
24745    unsafe { simd_extract!(vsqadd_u8(vdup_n_u8(a), vdup_n_s8(b)), 0) }
24746}
24747#[doc = "Unsigned saturating accumulate of signed value"]
24748#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddh_u16)"]
24749#[inline]
24750#[target_feature(enable = "neon")]
24751#[cfg_attr(test, assert_instr(usqadd))]
24752#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24753pub fn vsqaddh_u16(a: u16, b: i16) -> u16 {
24754    unsafe { simd_extract!(vsqadd_u16(vdup_n_u16(a), vdup_n_s16(b)), 0) }
24755}
24756#[doc = "Unsigned saturating accumulate of signed value"]
24757#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddd_u64)"]
24758#[inline]
24759#[target_feature(enable = "neon")]
24760#[cfg_attr(test, assert_instr(usqadd))]
24761#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24762pub fn vsqaddd_u64(a: u64, b: i64) -> u64 {
24763    unsafe extern "unadjusted" {
24764        #[cfg_attr(
24765            any(target_arch = "aarch64", target_arch = "arm64ec"),
24766            link_name = "llvm.aarch64.neon.usqadd.i64"
24767        )]
24768        fn _vsqaddd_u64(a: u64, b: i64) -> u64;
24769    }
24770    unsafe { _vsqaddd_u64(a, b) }
24771}
24772#[doc = "Unsigned saturating accumulate of signed value"]
24773#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadds_u32)"]
24774#[inline]
24775#[target_feature(enable = "neon")]
24776#[cfg_attr(test, assert_instr(usqadd))]
24777#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24778pub fn vsqadds_u32(a: u32, b: i32) -> u32 {
24779    unsafe extern "unadjusted" {
24780        #[cfg_attr(
24781            any(target_arch = "aarch64", target_arch = "arm64ec"),
24782            link_name = "llvm.aarch64.neon.usqadd.i32"
24783        )]
24784        fn _vsqadds_u32(a: u32, b: i32) -> u32;
24785    }
24786    unsafe { _vsqadds_u32(a, b) }
24787}
24788#[doc = "Calculates the square root of each lane."]
24789#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f16)"]
24790#[inline]
24791#[cfg_attr(test, assert_instr(fsqrt))]
24792#[target_feature(enable = "neon,fp16")]
24793#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
24794pub fn vsqrt_f16(a: float16x4_t) -> float16x4_t {
24795    unsafe { simd_fsqrt(a) }
24796}
24797#[doc = "Calculates the square root of each lane."]
24798#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f16)"]
24799#[inline]
24800#[cfg_attr(test, assert_instr(fsqrt))]
24801#[target_feature(enable = "neon,fp16")]
24802#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
24803pub fn vsqrtq_f16(a: float16x8_t) -> float16x8_t {
24804    unsafe { simd_fsqrt(a) }
24805}
24806#[doc = "Calculates the square root of each lane."]
24807#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f32)"]
24808#[inline]
24809#[target_feature(enable = "neon")]
24810#[cfg_attr(test, assert_instr(fsqrt))]
24811#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24812pub fn vsqrt_f32(a: float32x2_t) -> float32x2_t {
24813    unsafe { simd_fsqrt(a) }
24814}
24815#[doc = "Calculates the square root of each lane."]
24816#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f32)"]
24817#[inline]
24818#[target_feature(enable = "neon")]
24819#[cfg_attr(test, assert_instr(fsqrt))]
24820#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24821pub fn vsqrtq_f32(a: float32x4_t) -> float32x4_t {
24822    unsafe { simd_fsqrt(a) }
24823}
24824#[doc = "Calculates the square root of each lane."]
24825#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f64)"]
24826#[inline]
24827#[target_feature(enable = "neon")]
24828#[cfg_attr(test, assert_instr(fsqrt))]
24829#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24830pub fn vsqrt_f64(a: float64x1_t) -> float64x1_t {
24831    unsafe { simd_fsqrt(a) }
24832}
24833#[doc = "Calculates the square root of each lane."]
24834#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f64)"]
24835#[inline]
24836#[target_feature(enable = "neon")]
24837#[cfg_attr(test, assert_instr(fsqrt))]
24838#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24839pub fn vsqrtq_f64(a: float64x2_t) -> float64x2_t {
24840    unsafe { simd_fsqrt(a) }
24841}
24842#[doc = "Floating-point round to integral, using current rounding mode"]
24843#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrth_f16)"]
24844#[inline]
24845#[target_feature(enable = "neon,fp16")]
24846#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
24847#[cfg_attr(test, assert_instr(fsqrt))]
24848pub fn vsqrth_f16(a: f16) -> f16 {
24849    unsafe { sqrtf16(a) }
24850}
24851#[doc = "Shift Right and Insert (immediate)"]
24852#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s8)"]
24853#[inline]
24854#[target_feature(enable = "neon")]
24855#[cfg_attr(test, assert_instr(sri, N = 1))]
24856#[rustc_legacy_const_generics(2)]
24857#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24858pub fn vsri_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
24859    static_assert!(N >= 1 && N <= 8);
24860    unsafe extern "unadjusted" {
24861        #[cfg_attr(
24862            any(target_arch = "aarch64", target_arch = "arm64ec"),
24863            link_name = "llvm.aarch64.neon.vsri.v8i8"
24864        )]
24865        fn _vsri_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t;
24866    }
24867    unsafe { _vsri_n_s8(a, b, N) }
24868}
24869#[doc = "Shift Right and Insert (immediate)"]
24870#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s8)"]
24871#[inline]
24872#[target_feature(enable = "neon")]
24873#[cfg_attr(test, assert_instr(sri, N = 1))]
24874#[rustc_legacy_const_generics(2)]
24875#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24876pub fn vsriq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
24877    static_assert!(N >= 1 && N <= 8);
24878    unsafe extern "unadjusted" {
24879        #[cfg_attr(
24880            any(target_arch = "aarch64", target_arch = "arm64ec"),
24881            link_name = "llvm.aarch64.neon.vsri.v16i8"
24882        )]
24883        fn _vsriq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t;
24884    }
24885    unsafe { _vsriq_n_s8(a, b, N) }
24886}
24887#[doc = "Shift Right and Insert (immediate)"]
24888#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s16)"]
24889#[inline]
24890#[target_feature(enable = "neon")]
24891#[cfg_attr(test, assert_instr(sri, N = 1))]
24892#[rustc_legacy_const_generics(2)]
24893#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24894pub fn vsri_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
24895    static_assert!(N >= 1 && N <= 16);
24896    unsafe extern "unadjusted" {
24897        #[cfg_attr(
24898            any(target_arch = "aarch64", target_arch = "arm64ec"),
24899            link_name = "llvm.aarch64.neon.vsri.v4i16"
24900        )]
24901        fn _vsri_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t;
24902    }
24903    unsafe { _vsri_n_s16(a, b, N) }
24904}
24905#[doc = "Shift Right and Insert (immediate)"]
24906#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s16)"]
24907#[inline]
24908#[target_feature(enable = "neon")]
24909#[cfg_attr(test, assert_instr(sri, N = 1))]
24910#[rustc_legacy_const_generics(2)]
24911#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24912pub fn vsriq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
24913    static_assert!(N >= 1 && N <= 16);
24914    unsafe extern "unadjusted" {
24915        #[cfg_attr(
24916            any(target_arch = "aarch64", target_arch = "arm64ec"),
24917            link_name = "llvm.aarch64.neon.vsri.v8i16"
24918        )]
24919        fn _vsriq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t;
24920    }
24921    unsafe { _vsriq_n_s16(a, b, N) }
24922}
24923#[doc = "Shift Right and Insert (immediate)"]
24924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s32)"]
24925#[inline]
24926#[target_feature(enable = "neon")]
24927#[cfg_attr(test, assert_instr(sri, N = 1))]
24928#[rustc_legacy_const_generics(2)]
24929#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24930pub fn vsri_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
24931    static_assert!(N >= 1 && N <= 32);
24932    unsafe extern "unadjusted" {
24933        #[cfg_attr(
24934            any(target_arch = "aarch64", target_arch = "arm64ec"),
24935            link_name = "llvm.aarch64.neon.vsri.v2i32"
24936        )]
24937        fn _vsri_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t;
24938    }
24939    unsafe { _vsri_n_s32(a, b, N) }
24940}
24941#[doc = "Shift Right and Insert (immediate)"]
24942#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s32)"]
24943#[inline]
24944#[target_feature(enable = "neon")]
24945#[cfg_attr(test, assert_instr(sri, N = 1))]
24946#[rustc_legacy_const_generics(2)]
24947#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24948pub fn vsriq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
24949    static_assert!(N >= 1 && N <= 32);
24950    unsafe extern "unadjusted" {
24951        #[cfg_attr(
24952            any(target_arch = "aarch64", target_arch = "arm64ec"),
24953            link_name = "llvm.aarch64.neon.vsri.v4i32"
24954        )]
24955        fn _vsriq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t;
24956    }
24957    unsafe { _vsriq_n_s32(a, b, N) }
24958}
24959#[doc = "Shift Right and Insert (immediate)"]
24960#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s64)"]
24961#[inline]
24962#[target_feature(enable = "neon")]
24963#[cfg_attr(test, assert_instr(sri, N = 1))]
24964#[rustc_legacy_const_generics(2)]
24965#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24966pub fn vsri_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t {
24967    static_assert!(N >= 1 && N <= 64);
24968    unsafe extern "unadjusted" {
24969        #[cfg_attr(
24970            any(target_arch = "aarch64", target_arch = "arm64ec"),
24971            link_name = "llvm.aarch64.neon.vsri.v1i64"
24972        )]
24973        fn _vsri_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t;
24974    }
24975    unsafe { _vsri_n_s64(a, b, N) }
24976}
24977#[doc = "Shift Right and Insert (immediate)"]
24978#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s64)"]
24979#[inline]
24980#[target_feature(enable = "neon")]
24981#[cfg_attr(test, assert_instr(sri, N = 1))]
24982#[rustc_legacy_const_generics(2)]
24983#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24984pub fn vsriq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
24985    static_assert!(N >= 1 && N <= 64);
24986    unsafe extern "unadjusted" {
24987        #[cfg_attr(
24988            any(target_arch = "aarch64", target_arch = "arm64ec"),
24989            link_name = "llvm.aarch64.neon.vsri.v2i64"
24990        )]
24991        fn _vsriq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t;
24992    }
24993    unsafe { _vsriq_n_s64(a, b, N) }
24994}
24995#[doc = "Shift Right and Insert (immediate)"]
24996#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u8)"]
24997#[inline]
24998#[target_feature(enable = "neon")]
24999#[cfg_attr(test, assert_instr(sri, N = 1))]
25000#[rustc_legacy_const_generics(2)]
25001#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25002pub fn vsri_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
25003    static_assert!(N >= 1 && N <= 8);
25004    unsafe { transmute(vsri_n_s8::<N>(transmute(a), transmute(b))) }
25005}
25006#[doc = "Shift Right and Insert (immediate)"]
25007#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u8)"]
25008#[inline]
25009#[target_feature(enable = "neon")]
25010#[cfg_attr(test, assert_instr(sri, N = 1))]
25011#[rustc_legacy_const_generics(2)]
25012#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25013pub fn vsriq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
25014    static_assert!(N >= 1 && N <= 8);
25015    unsafe { transmute(vsriq_n_s8::<N>(transmute(a), transmute(b))) }
25016}
25017#[doc = "Shift Right and Insert (immediate)"]
25018#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u16)"]
25019#[inline]
25020#[target_feature(enable = "neon")]
25021#[cfg_attr(test, assert_instr(sri, N = 1))]
25022#[rustc_legacy_const_generics(2)]
25023#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25024pub fn vsri_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
25025    static_assert!(N >= 1 && N <= 16);
25026    unsafe { transmute(vsri_n_s16::<N>(transmute(a), transmute(b))) }
25027}
25028#[doc = "Shift Right and Insert (immediate)"]
25029#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u16)"]
25030#[inline]
25031#[target_feature(enable = "neon")]
25032#[cfg_attr(test, assert_instr(sri, N = 1))]
25033#[rustc_legacy_const_generics(2)]
25034#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25035pub fn vsriq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
25036    static_assert!(N >= 1 && N <= 16);
25037    unsafe { transmute(vsriq_n_s16::<N>(transmute(a), transmute(b))) }
25038}
25039#[doc = "Shift Right and Insert (immediate)"]
25040#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u32)"]
25041#[inline]
25042#[target_feature(enable = "neon")]
25043#[cfg_attr(test, assert_instr(sri, N = 1))]
25044#[rustc_legacy_const_generics(2)]
25045#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25046pub fn vsri_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
25047    static_assert!(N >= 1 && N <= 32);
25048    unsafe { transmute(vsri_n_s32::<N>(transmute(a), transmute(b))) }
25049}
25050#[doc = "Shift Right and Insert (immediate)"]
25051#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u32)"]
25052#[inline]
25053#[target_feature(enable = "neon")]
25054#[cfg_attr(test, assert_instr(sri, N = 1))]
25055#[rustc_legacy_const_generics(2)]
25056#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25057pub fn vsriq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
25058    static_assert!(N >= 1 && N <= 32);
25059    unsafe { transmute(vsriq_n_s32::<N>(transmute(a), transmute(b))) }
25060}
25061#[doc = "Shift Right and Insert (immediate)"]
25062#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u64)"]
25063#[inline]
25064#[target_feature(enable = "neon")]
25065#[cfg_attr(test, assert_instr(sri, N = 1))]
25066#[rustc_legacy_const_generics(2)]
25067#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25068pub fn vsri_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
25069    static_assert!(N >= 1 && N <= 64);
25070    unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
25071}
25072#[doc = "Shift Right and Insert (immediate)"]
25073#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u64)"]
25074#[inline]
25075#[target_feature(enable = "neon")]
25076#[cfg_attr(test, assert_instr(sri, N = 1))]
25077#[rustc_legacy_const_generics(2)]
25078#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25079pub fn vsriq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
25080    static_assert!(N >= 1 && N <= 64);
25081    unsafe { transmute(vsriq_n_s64::<N>(transmute(a), transmute(b))) }
25082}
25083#[doc = "Shift Right and Insert (immediate)"]
25084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p8)"]
25085#[inline]
25086#[target_feature(enable = "neon")]
25087#[cfg_attr(test, assert_instr(sri, N = 1))]
25088#[rustc_legacy_const_generics(2)]
25089#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25090pub fn vsri_n_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
25091    static_assert!(N >= 1 && N <= 8);
25092    unsafe { transmute(vsri_n_s8::<N>(transmute(a), transmute(b))) }
25093}
25094#[doc = "Shift Right and Insert (immediate)"]
25095#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p8)"]
25096#[inline]
25097#[target_feature(enable = "neon")]
25098#[cfg_attr(test, assert_instr(sri, N = 1))]
25099#[rustc_legacy_const_generics(2)]
25100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25101pub fn vsriq_n_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
25102    static_assert!(N >= 1 && N <= 8);
25103    unsafe { transmute(vsriq_n_s8::<N>(transmute(a), transmute(b))) }
25104}
25105#[doc = "Shift Right and Insert (immediate)"]
25106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p16)"]
25107#[inline]
25108#[target_feature(enable = "neon")]
25109#[cfg_attr(test, assert_instr(sri, N = 1))]
25110#[rustc_legacy_const_generics(2)]
25111#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25112pub fn vsri_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
25113    static_assert!(N >= 1 && N <= 16);
25114    unsafe { transmute(vsri_n_s16::<N>(transmute(a), transmute(b))) }
25115}
25116#[doc = "Shift Right and Insert (immediate)"]
25117#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p16)"]
25118#[inline]
25119#[target_feature(enable = "neon")]
25120#[cfg_attr(test, assert_instr(sri, N = 1))]
25121#[rustc_legacy_const_generics(2)]
25122#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25123pub fn vsriq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
25124    static_assert!(N >= 1 && N <= 16);
25125    unsafe { transmute(vsriq_n_s16::<N>(transmute(a), transmute(b))) }
25126}
25127#[doc = "Shift Right and Insert (immediate)"]
25128#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p64)"]
25129#[inline]
25130#[target_feature(enable = "neon,aes")]
25131#[cfg_attr(test, assert_instr(sri, N = 1))]
25132#[rustc_legacy_const_generics(2)]
25133#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25134pub fn vsri_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t {
25135    static_assert!(N >= 1 && N <= 64);
25136    unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
25137}
25138#[doc = "Shift Right and Insert (immediate)"]
25139#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p64)"]
25140#[inline]
25141#[target_feature(enable = "neon,aes")]
25142#[cfg_attr(test, assert_instr(sri, N = 1))]
25143#[rustc_legacy_const_generics(2)]
25144#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25145pub fn vsriq_n_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
25146    static_assert!(N >= 1 && N <= 64);
25147    unsafe { transmute(vsriq_n_s64::<N>(transmute(a), transmute(b))) }
25148}
25149#[doc = "Shift right and insert"]
25150#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_s64)"]
25151#[inline]
25152#[target_feature(enable = "neon")]
25153#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25154#[rustc_legacy_const_generics(2)]
25155#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sri, N = 2))]
25156pub fn vsrid_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
25157    static_assert!(N >= 1 && N <= 64);
25158    unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
25159}
25160#[doc = "Shift right and insert"]
25161#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_u64)"]
25162#[inline]
25163#[target_feature(enable = "neon")]
25164#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25165#[rustc_legacy_const_generics(2)]
25166#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sri, N = 2))]
25167pub fn vsrid_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
25168    static_assert!(N >= 1 && N <= 64);
25169    unsafe { transmute(vsri_n_u64::<N>(transmute(a), transmute(b))) }
25170}
25171#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25172#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f16)"]
25173#[doc = "## Safety"]
25174#[doc = "  * Neon instrinsic unsafe"]
25175#[inline]
25176#[target_feature(enable = "neon,fp16")]
25177#[cfg_attr(test, assert_instr(str))]
25178#[allow(clippy::cast_ptr_alignment)]
25179#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
25180pub unsafe fn vst1_f16(ptr: *mut f16, a: float16x4_t) {
25181    crate::ptr::write_unaligned(ptr.cast(), a)
25182}
25183#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25184#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f16)"]
25185#[doc = "## Safety"]
25186#[doc = "  * Neon instrinsic unsafe"]
25187#[inline]
25188#[target_feature(enable = "neon,fp16")]
25189#[cfg_attr(test, assert_instr(str))]
25190#[allow(clippy::cast_ptr_alignment)]
25191#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
25192pub unsafe fn vst1q_f16(ptr: *mut f16, a: float16x8_t) {
25193    crate::ptr::write_unaligned(ptr.cast(), a)
25194}
25195#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25196#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32)"]
25197#[doc = "## Safety"]
25198#[doc = "  * Neon instrinsic unsafe"]
25199#[inline]
25200#[target_feature(enable = "neon")]
25201#[cfg_attr(test, assert_instr(str))]
25202#[allow(clippy::cast_ptr_alignment)]
25203#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25204pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) {
25205    crate::ptr::write_unaligned(ptr.cast(), a)
25206}
25207#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25208#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32)"]
25209#[doc = "## Safety"]
25210#[doc = "  * Neon instrinsic unsafe"]
25211#[inline]
25212#[target_feature(enable = "neon")]
25213#[cfg_attr(test, assert_instr(str))]
25214#[allow(clippy::cast_ptr_alignment)]
25215#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25216pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) {
25217    crate::ptr::write_unaligned(ptr.cast(), a)
25218}
25219#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25220#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64)"]
25221#[doc = "## Safety"]
25222#[doc = "  * Neon instrinsic unsafe"]
25223#[inline]
25224#[target_feature(enable = "neon")]
25225#[cfg_attr(test, assert_instr(str))]
25226#[allow(clippy::cast_ptr_alignment)]
25227#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25228pub unsafe fn vst1_f64(ptr: *mut f64, a: float64x1_t) {
25229    crate::ptr::write_unaligned(ptr.cast(), a)
25230}
25231#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25232#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64)"]
25233#[doc = "## Safety"]
25234#[doc = "  * Neon instrinsic unsafe"]
25235#[inline]
25236#[target_feature(enable = "neon")]
25237#[cfg_attr(test, assert_instr(str))]
25238#[allow(clippy::cast_ptr_alignment)]
25239#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25240pub unsafe fn vst1q_f64(ptr: *mut f64, a: float64x2_t) {
25241    crate::ptr::write_unaligned(ptr.cast(), a)
25242}
25243#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25244#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8)"]
25245#[doc = "## Safety"]
25246#[doc = "  * Neon instrinsic unsafe"]
25247#[inline]
25248#[target_feature(enable = "neon")]
25249#[cfg_attr(test, assert_instr(str))]
25250#[allow(clippy::cast_ptr_alignment)]
25251#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25252pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) {
25253    crate::ptr::write_unaligned(ptr.cast(), a)
25254}
25255#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25256#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8)"]
25257#[doc = "## Safety"]
25258#[doc = "  * Neon instrinsic unsafe"]
25259#[inline]
25260#[target_feature(enable = "neon")]
25261#[cfg_attr(test, assert_instr(str))]
25262#[allow(clippy::cast_ptr_alignment)]
25263#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25264pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) {
25265    crate::ptr::write_unaligned(ptr.cast(), a)
25266}
25267#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25268#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16)"]
25269#[doc = "## Safety"]
25270#[doc = "  * Neon instrinsic unsafe"]
25271#[inline]
25272#[target_feature(enable = "neon")]
25273#[cfg_attr(test, assert_instr(str))]
25274#[allow(clippy::cast_ptr_alignment)]
25275#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25276pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) {
25277    crate::ptr::write_unaligned(ptr.cast(), a)
25278}
25279#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25280#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16)"]
25281#[doc = "## Safety"]
25282#[doc = "  * Neon instrinsic unsafe"]
25283#[inline]
25284#[target_feature(enable = "neon")]
25285#[cfg_attr(test, assert_instr(str))]
25286#[allow(clippy::cast_ptr_alignment)]
25287#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25288pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) {
25289    crate::ptr::write_unaligned(ptr.cast(), a)
25290}
25291#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32)"]
25293#[doc = "## Safety"]
25294#[doc = "  * Neon instrinsic unsafe"]
25295#[inline]
25296#[target_feature(enable = "neon")]
25297#[cfg_attr(test, assert_instr(str))]
25298#[allow(clippy::cast_ptr_alignment)]
25299#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25300pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) {
25301    crate::ptr::write_unaligned(ptr.cast(), a)
25302}
25303#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25304#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32)"]
25305#[doc = "## Safety"]
25306#[doc = "  * Neon instrinsic unsafe"]
25307#[inline]
25308#[target_feature(enable = "neon")]
25309#[cfg_attr(test, assert_instr(str))]
25310#[allow(clippy::cast_ptr_alignment)]
25311#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25312pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) {
25313    crate::ptr::write_unaligned(ptr.cast(), a)
25314}
25315#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25316#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64)"]
25317#[doc = "## Safety"]
25318#[doc = "  * Neon instrinsic unsafe"]
25319#[inline]
25320#[target_feature(enable = "neon")]
25321#[cfg_attr(test, assert_instr(str))]
25322#[allow(clippy::cast_ptr_alignment)]
25323#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25324pub unsafe fn vst1_s64(ptr: *mut i64, a: int64x1_t) {
25325    crate::ptr::write_unaligned(ptr.cast(), a)
25326}
25327#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25328#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64)"]
25329#[doc = "## Safety"]
25330#[doc = "  * Neon instrinsic unsafe"]
25331#[inline]
25332#[target_feature(enable = "neon")]
25333#[cfg_attr(test, assert_instr(str))]
25334#[allow(clippy::cast_ptr_alignment)]
25335#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25336pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) {
25337    crate::ptr::write_unaligned(ptr.cast(), a)
25338}
25339#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25340#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8)"]
25341#[doc = "## Safety"]
25342#[doc = "  * Neon instrinsic unsafe"]
25343#[inline]
25344#[target_feature(enable = "neon")]
25345#[cfg_attr(test, assert_instr(str))]
25346#[allow(clippy::cast_ptr_alignment)]
25347#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25348pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) {
25349    crate::ptr::write_unaligned(ptr.cast(), a)
25350}
25351#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25352#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8)"]
25353#[doc = "## Safety"]
25354#[doc = "  * Neon instrinsic unsafe"]
25355#[inline]
25356#[target_feature(enable = "neon")]
25357#[cfg_attr(test, assert_instr(str))]
25358#[allow(clippy::cast_ptr_alignment)]
25359#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25360pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) {
25361    crate::ptr::write_unaligned(ptr.cast(), a)
25362}
25363#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25364#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16)"]
25365#[doc = "## Safety"]
25366#[doc = "  * Neon instrinsic unsafe"]
25367#[inline]
25368#[target_feature(enable = "neon")]
25369#[cfg_attr(test, assert_instr(str))]
25370#[allow(clippy::cast_ptr_alignment)]
25371#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25372pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) {
25373    crate::ptr::write_unaligned(ptr.cast(), a)
25374}
25375#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25376#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16)"]
25377#[doc = "## Safety"]
25378#[doc = "  * Neon instrinsic unsafe"]
25379#[inline]
25380#[target_feature(enable = "neon")]
25381#[cfg_attr(test, assert_instr(str))]
25382#[allow(clippy::cast_ptr_alignment)]
25383#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25384pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) {
25385    crate::ptr::write_unaligned(ptr.cast(), a)
25386}
25387#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25388#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32)"]
25389#[doc = "## Safety"]
25390#[doc = "  * Neon instrinsic unsafe"]
25391#[inline]
25392#[target_feature(enable = "neon")]
25393#[cfg_attr(test, assert_instr(str))]
25394#[allow(clippy::cast_ptr_alignment)]
25395#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25396pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) {
25397    crate::ptr::write_unaligned(ptr.cast(), a)
25398}
25399#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25400#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32)"]
25401#[doc = "## Safety"]
25402#[doc = "  * Neon instrinsic unsafe"]
25403#[inline]
25404#[target_feature(enable = "neon")]
25405#[cfg_attr(test, assert_instr(str))]
25406#[allow(clippy::cast_ptr_alignment)]
25407#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25408pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) {
25409    crate::ptr::write_unaligned(ptr.cast(), a)
25410}
25411#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25412#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64)"]
25413#[doc = "## Safety"]
25414#[doc = "  * Neon instrinsic unsafe"]
25415#[inline]
25416#[target_feature(enable = "neon")]
25417#[cfg_attr(test, assert_instr(str))]
25418#[allow(clippy::cast_ptr_alignment)]
25419#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25420pub unsafe fn vst1_u64(ptr: *mut u64, a: uint64x1_t) {
25421    crate::ptr::write_unaligned(ptr.cast(), a)
25422}
25423#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25424#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64)"]
25425#[doc = "## Safety"]
25426#[doc = "  * Neon instrinsic unsafe"]
25427#[inline]
25428#[target_feature(enable = "neon")]
25429#[cfg_attr(test, assert_instr(str))]
25430#[allow(clippy::cast_ptr_alignment)]
25431#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25432pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) {
25433    crate::ptr::write_unaligned(ptr.cast(), a)
25434}
25435#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8)"]
25437#[doc = "## Safety"]
25438#[doc = "  * Neon instrinsic unsafe"]
25439#[inline]
25440#[target_feature(enable = "neon")]
25441#[cfg_attr(test, assert_instr(str))]
25442#[allow(clippy::cast_ptr_alignment)]
25443#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25444pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) {
25445    crate::ptr::write_unaligned(ptr.cast(), a)
25446}
25447#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25448#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8)"]
25449#[doc = "## Safety"]
25450#[doc = "  * Neon instrinsic unsafe"]
25451#[inline]
25452#[target_feature(enable = "neon")]
25453#[cfg_attr(test, assert_instr(str))]
25454#[allow(clippy::cast_ptr_alignment)]
25455#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25456pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) {
25457    crate::ptr::write_unaligned(ptr.cast(), a)
25458}
25459#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25460#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16)"]
25461#[doc = "## Safety"]
25462#[doc = "  * Neon instrinsic unsafe"]
25463#[inline]
25464#[target_feature(enable = "neon")]
25465#[cfg_attr(test, assert_instr(str))]
25466#[allow(clippy::cast_ptr_alignment)]
25467#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25468pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) {
25469    crate::ptr::write_unaligned(ptr.cast(), a)
25470}
25471#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16)"]
25473#[doc = "## Safety"]
25474#[doc = "  * Neon instrinsic unsafe"]
25475#[inline]
25476#[target_feature(enable = "neon")]
25477#[cfg_attr(test, assert_instr(str))]
25478#[allow(clippy::cast_ptr_alignment)]
25479#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25480pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) {
25481    crate::ptr::write_unaligned(ptr.cast(), a)
25482}
25483#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64)"]
25485#[doc = "## Safety"]
25486#[doc = "  * Neon instrinsic unsafe"]
25487#[inline]
25488#[target_feature(enable = "neon,aes")]
25489#[cfg_attr(test, assert_instr(str))]
25490#[allow(clippy::cast_ptr_alignment)]
25491#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25492pub unsafe fn vst1_p64(ptr: *mut p64, a: poly64x1_t) {
25493    crate::ptr::write_unaligned(ptr.cast(), a)
25494}
25495#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64)"]
25497#[doc = "## Safety"]
25498#[doc = "  * Neon instrinsic unsafe"]
25499#[inline]
25500#[target_feature(enable = "neon,aes")]
25501#[cfg_attr(test, assert_instr(str))]
25502#[allow(clippy::cast_ptr_alignment)]
25503#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25504pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) {
25505    crate::ptr::write_unaligned(ptr.cast(), a)
25506}
25507#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
25508#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x2)"]
25509#[doc = "## Safety"]
25510#[doc = "  * Neon instrinsic unsafe"]
25511#[inline]
25512#[target_feature(enable = "neon")]
25513#[cfg_attr(test, assert_instr(st1))]
25514#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25515pub unsafe fn vst1_f64_x2(a: *mut f64, b: float64x1x2_t) {
25516    unsafe extern "unadjusted" {
25517        #[cfg_attr(
25518            any(target_arch = "aarch64", target_arch = "arm64ec"),
25519            link_name = "llvm.aarch64.neon.st1x2.v1f64.p0"
25520        )]
25521        fn _vst1_f64_x2(a: float64x1_t, b: float64x1_t, ptr: *mut f64);
25522    }
25523    _vst1_f64_x2(b.0, b.1, a)
25524}
25525#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
25526#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x2)"]
25527#[doc = "## Safety"]
25528#[doc = "  * Neon instrinsic unsafe"]
25529#[inline]
25530#[target_feature(enable = "neon")]
25531#[cfg_attr(test, assert_instr(st1))]
25532#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25533pub unsafe fn vst1q_f64_x2(a: *mut f64, b: float64x2x2_t) {
25534    unsafe extern "unadjusted" {
25535        #[cfg_attr(
25536            any(target_arch = "aarch64", target_arch = "arm64ec"),
25537            link_name = "llvm.aarch64.neon.st1x2.v2f64.p0"
25538        )]
25539        fn _vst1q_f64_x2(a: float64x2_t, b: float64x2_t, ptr: *mut f64);
25540    }
25541    _vst1q_f64_x2(b.0, b.1, a)
25542}
25543#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
25544#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x3)"]
25545#[doc = "## Safety"]
25546#[doc = "  * Neon instrinsic unsafe"]
25547#[inline]
25548#[target_feature(enable = "neon")]
25549#[cfg_attr(test, assert_instr(st1))]
25550#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25551pub unsafe fn vst1_f64_x3(a: *mut f64, b: float64x1x3_t) {
25552    unsafe extern "unadjusted" {
25553        #[cfg_attr(
25554            any(target_arch = "aarch64", target_arch = "arm64ec"),
25555            link_name = "llvm.aarch64.neon.st1x3.v1f64.p0"
25556        )]
25557        fn _vst1_f64_x3(a: float64x1_t, b: float64x1_t, c: float64x1_t, ptr: *mut f64);
25558    }
25559    _vst1_f64_x3(b.0, b.1, b.2, a)
25560}
25561#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
25562#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x3)"]
25563#[doc = "## Safety"]
25564#[doc = "  * Neon instrinsic unsafe"]
25565#[inline]
25566#[target_feature(enable = "neon")]
25567#[cfg_attr(test, assert_instr(st1))]
25568#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25569pub unsafe fn vst1q_f64_x3(a: *mut f64, b: float64x2x3_t) {
25570    unsafe extern "unadjusted" {
25571        #[cfg_attr(
25572            any(target_arch = "aarch64", target_arch = "arm64ec"),
25573            link_name = "llvm.aarch64.neon.st1x3.v2f64.p0"
25574        )]
25575        fn _vst1q_f64_x3(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut f64);
25576    }
25577    _vst1q_f64_x3(b.0, b.1, b.2, a)
25578}
25579#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
25580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x4)"]
25581#[doc = "## Safety"]
25582#[doc = "  * Neon instrinsic unsafe"]
25583#[inline]
25584#[target_feature(enable = "neon")]
25585#[cfg_attr(test, assert_instr(st1))]
25586#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25587pub unsafe fn vst1_f64_x4(a: *mut f64, b: float64x1x4_t) {
25588    unsafe extern "unadjusted" {
25589        #[cfg_attr(
25590            any(target_arch = "aarch64", target_arch = "arm64ec"),
25591            link_name = "llvm.aarch64.neon.st1x4.v1f64.p0"
25592        )]
25593        fn _vst1_f64_x4(
25594            a: float64x1_t,
25595            b: float64x1_t,
25596            c: float64x1_t,
25597            d: float64x1_t,
25598            ptr: *mut f64,
25599        );
25600    }
25601    _vst1_f64_x4(b.0, b.1, b.2, b.3, a)
25602}
25603#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
25604#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x4)"]
25605#[doc = "## Safety"]
25606#[doc = "  * Neon instrinsic unsafe"]
25607#[inline]
25608#[target_feature(enable = "neon")]
25609#[cfg_attr(test, assert_instr(st1))]
25610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25611pub unsafe fn vst1q_f64_x4(a: *mut f64, b: float64x2x4_t) {
25612    unsafe extern "unadjusted" {
25613        #[cfg_attr(
25614            any(target_arch = "aarch64", target_arch = "arm64ec"),
25615            link_name = "llvm.aarch64.neon.st1x4.v2f64.p0"
25616        )]
25617        fn _vst1q_f64_x4(
25618            a: float64x2_t,
25619            b: float64x2_t,
25620            c: float64x2_t,
25621            d: float64x2_t,
25622            ptr: *mut f64,
25623        );
25624    }
25625    _vst1q_f64_x4(b.0, b.1, b.2, b.3, a)
25626}
25627#[doc = "Store multiple single-element structures from one, two, three, or four registers"]
25628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f64)"]
25629#[doc = "## Safety"]
25630#[doc = "  * Neon instrinsic unsafe"]
25631#[inline]
25632#[target_feature(enable = "neon")]
25633#[cfg_attr(test, assert_instr(nop, LANE = 0))]
25634#[rustc_legacy_const_generics(2)]
25635#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25636pub unsafe fn vst1_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1_t) {
25637    static_assert!(LANE == 0);
25638    *a = simd_extract!(b, LANE as u32);
25639}
25640#[doc = "Store multiple single-element structures from one, two, three, or four registers"]
25641#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f64)"]
25642#[doc = "## Safety"]
25643#[doc = "  * Neon instrinsic unsafe"]
25644#[inline]
25645#[target_feature(enable = "neon")]
25646#[cfg_attr(test, assert_instr(nop, LANE = 0))]
25647#[rustc_legacy_const_generics(2)]
25648#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25649pub unsafe fn vst1q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2_t) {
25650    static_assert_uimm_bits!(LANE, 1);
25651    *a = simd_extract!(b, LANE as u32);
25652}
25653#[doc = "Store multiple 2-element structures from two registers"]
25654#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f64)"]
25655#[doc = "## Safety"]
25656#[doc = "  * Neon instrinsic unsafe"]
25657#[inline]
25658#[target_feature(enable = "neon")]
25659#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25660#[cfg_attr(test, assert_instr(st1))]
25661pub unsafe fn vst2_f64(a: *mut f64, b: float64x1x2_t) {
25662    unsafe extern "unadjusted" {
25663        #[cfg_attr(
25664            any(target_arch = "aarch64", target_arch = "arm64ec"),
25665            link_name = "llvm.aarch64.neon.st2.v1f64.p0"
25666        )]
25667        fn _vst2_f64(a: float64x1_t, b: float64x1_t, ptr: *mut i8);
25668    }
25669    _vst2_f64(b.0, b.1, a as _)
25670}
25671#[doc = "Store multiple 2-element structures from two registers"]
25672#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f64)"]
25673#[doc = "## Safety"]
25674#[doc = "  * Neon instrinsic unsafe"]
25675#[inline]
25676#[target_feature(enable = "neon")]
25677#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25678#[rustc_legacy_const_generics(2)]
25679#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25680pub unsafe fn vst2_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x2_t) {
25681    static_assert!(LANE == 0);
25682    unsafe extern "unadjusted" {
25683        #[cfg_attr(
25684            any(target_arch = "aarch64", target_arch = "arm64ec"),
25685            link_name = "llvm.aarch64.neon.st2lane.v1f64.p0"
25686        )]
25687        fn _vst2_lane_f64(a: float64x1_t, b: float64x1_t, n: i64, ptr: *mut i8);
25688    }
25689    _vst2_lane_f64(b.0, b.1, LANE as i64, a as _)
25690}
25691#[doc = "Store multiple 2-element structures from two registers"]
25692#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s64)"]
25693#[doc = "## Safety"]
25694#[doc = "  * Neon instrinsic unsafe"]
25695#[inline]
25696#[target_feature(enable = "neon")]
25697#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25698#[rustc_legacy_const_generics(2)]
25699#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25700pub unsafe fn vst2_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x2_t) {
25701    static_assert!(LANE == 0);
25702    unsafe extern "unadjusted" {
25703        #[cfg_attr(
25704            any(target_arch = "aarch64", target_arch = "arm64ec"),
25705            link_name = "llvm.aarch64.neon.st2lane.v1i64.p0"
25706        )]
25707        fn _vst2_lane_s64(a: int64x1_t, b: int64x1_t, n: i64, ptr: *mut i8);
25708    }
25709    _vst2_lane_s64(b.0, b.1, LANE as i64, a as _)
25710}
25711#[doc = "Store multiple 2-element structures from two registers"]
25712#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p64)"]
25713#[doc = "## Safety"]
25714#[doc = "  * Neon instrinsic unsafe"]
25715#[inline]
25716#[target_feature(enable = "neon,aes")]
25717#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25718#[rustc_legacy_const_generics(2)]
25719#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25720pub unsafe fn vst2_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x2_t) {
25721    static_assert!(LANE == 0);
25722    vst2_lane_s64::<LANE>(transmute(a), transmute(b))
25723}
25724#[doc = "Store multiple 2-element structures from two registers"]
25725#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u64)"]
25726#[doc = "## Safety"]
25727#[doc = "  * Neon instrinsic unsafe"]
25728#[inline]
25729#[target_feature(enable = "neon")]
25730#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25731#[rustc_legacy_const_generics(2)]
25732#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25733pub unsafe fn vst2_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x2_t) {
25734    static_assert!(LANE == 0);
25735    vst2_lane_s64::<LANE>(transmute(a), transmute(b))
25736}
25737#[doc = "Store multiple 2-element structures from two registers"]
25738#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f64)"]
25739#[doc = "## Safety"]
25740#[doc = "  * Neon instrinsic unsafe"]
25741#[inline]
25742#[target_feature(enable = "neon")]
25743#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25744#[cfg_attr(test, assert_instr(st2))]
25745pub unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t) {
25746    unsafe extern "unadjusted" {
25747        #[cfg_attr(
25748            any(target_arch = "aarch64", target_arch = "arm64ec"),
25749            link_name = "llvm.aarch64.neon.st2.v2f64.p0"
25750        )]
25751        fn _vst2q_f64(a: float64x2_t, b: float64x2_t, ptr: *mut i8);
25752    }
25753    _vst2q_f64(b.0, b.1, a as _)
25754}
25755#[doc = "Store multiple 2-element structures from two registers"]
25756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s64)"]
25757#[doc = "## Safety"]
25758#[doc = "  * Neon instrinsic unsafe"]
25759#[inline]
25760#[target_feature(enable = "neon")]
25761#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25762#[cfg_attr(test, assert_instr(st2))]
25763pub unsafe fn vst2q_s64(a: *mut i64, b: int64x2x2_t) {
25764    unsafe extern "unadjusted" {
25765        #[cfg_attr(
25766            any(target_arch = "aarch64", target_arch = "arm64ec"),
25767            link_name = "llvm.aarch64.neon.st2.v2i64.p0"
25768        )]
25769        fn _vst2q_s64(a: int64x2_t, b: int64x2_t, ptr: *mut i8);
25770    }
25771    _vst2q_s64(b.0, b.1, a as _)
25772}
25773#[doc = "Store multiple 2-element structures from two registers"]
25774#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f64)"]
25775#[doc = "## Safety"]
25776#[doc = "  * Neon instrinsic unsafe"]
25777#[inline]
25778#[target_feature(enable = "neon")]
25779#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25780#[rustc_legacy_const_generics(2)]
25781#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25782pub unsafe fn vst2q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x2_t) {
25783    static_assert_uimm_bits!(LANE, 1);
25784    unsafe extern "unadjusted" {
25785        #[cfg_attr(
25786            any(target_arch = "aarch64", target_arch = "arm64ec"),
25787            link_name = "llvm.aarch64.neon.st2lane.v2f64.p0"
25788        )]
25789        fn _vst2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *mut i8);
25790    }
25791    _vst2q_lane_f64(b.0, b.1, LANE as i64, a as _)
25792}
25793#[doc = "Store multiple 2-element structures from two registers"]
25794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s8)"]
25795#[doc = "## Safety"]
25796#[doc = "  * Neon instrinsic unsafe"]
25797#[inline]
25798#[target_feature(enable = "neon")]
25799#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25800#[rustc_legacy_const_generics(2)]
25801#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25802pub unsafe fn vst2q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x2_t) {
25803    static_assert_uimm_bits!(LANE, 4);
25804    unsafe extern "unadjusted" {
25805        #[cfg_attr(
25806            any(target_arch = "aarch64", target_arch = "arm64ec"),
25807            link_name = "llvm.aarch64.neon.st2lane.v16i8.p0"
25808        )]
25809        fn _vst2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *mut i8);
25810    }
25811    _vst2q_lane_s8(b.0, b.1, LANE as i64, a as _)
25812}
25813#[doc = "Store multiple 2-element structures from two registers"]
25814#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s64)"]
25815#[doc = "## Safety"]
25816#[doc = "  * Neon instrinsic unsafe"]
25817#[inline]
25818#[target_feature(enable = "neon")]
25819#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25820#[rustc_legacy_const_generics(2)]
25821#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25822pub unsafe fn vst2q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x2_t) {
25823    static_assert_uimm_bits!(LANE, 1);
25824    unsafe extern "unadjusted" {
25825        #[cfg_attr(
25826            any(target_arch = "aarch64", target_arch = "arm64ec"),
25827            link_name = "llvm.aarch64.neon.st2lane.v2i64.p0"
25828        )]
25829        fn _vst2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *mut i8);
25830    }
25831    _vst2q_lane_s64(b.0, b.1, LANE as i64, a as _)
25832}
25833#[doc = "Store multiple 2-element structures from two registers"]
25834#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p64)"]
25835#[doc = "## Safety"]
25836#[doc = "  * Neon instrinsic unsafe"]
25837#[inline]
25838#[target_feature(enable = "neon,aes")]
25839#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25840#[rustc_legacy_const_generics(2)]
25841#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25842pub unsafe fn vst2q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x2_t) {
25843    static_assert_uimm_bits!(LANE, 1);
25844    vst2q_lane_s64::<LANE>(transmute(a), transmute(b))
25845}
25846#[doc = "Store multiple 2-element structures from two registers"]
25847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u8)"]
25848#[doc = "## Safety"]
25849#[doc = "  * Neon instrinsic unsafe"]
25850#[inline]
25851#[target_feature(enable = "neon")]
25852#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25853#[rustc_legacy_const_generics(2)]
25854#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25855pub unsafe fn vst2q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x2_t) {
25856    static_assert_uimm_bits!(LANE, 4);
25857    vst2q_lane_s8::<LANE>(transmute(a), transmute(b))
25858}
25859#[doc = "Store multiple 2-element structures from two registers"]
25860#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u64)"]
25861#[doc = "## Safety"]
25862#[doc = "  * Neon instrinsic unsafe"]
25863#[inline]
25864#[target_feature(enable = "neon")]
25865#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25866#[rustc_legacy_const_generics(2)]
25867#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25868pub unsafe fn vst2q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x2_t) {
25869    static_assert_uimm_bits!(LANE, 1);
25870    vst2q_lane_s64::<LANE>(transmute(a), transmute(b))
25871}
25872#[doc = "Store multiple 2-element structures from two registers"]
25873#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p8)"]
25874#[doc = "## Safety"]
25875#[doc = "  * Neon instrinsic unsafe"]
25876#[inline]
25877#[target_feature(enable = "neon")]
25878#[cfg_attr(test, assert_instr(st2, LANE = 0))]
25879#[rustc_legacy_const_generics(2)]
25880#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25881pub unsafe fn vst2q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x2_t) {
25882    static_assert_uimm_bits!(LANE, 4);
25883    vst2q_lane_s8::<LANE>(transmute(a), transmute(b))
25884}
25885#[doc = "Store multiple 2-element structures from two registers"]
25886#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p64)"]
25887#[doc = "## Safety"]
25888#[doc = "  * Neon instrinsic unsafe"]
25889#[inline]
25890#[target_feature(enable = "neon,aes")]
25891#[cfg_attr(test, assert_instr(st2))]
25892#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25893pub unsafe fn vst2q_p64(a: *mut p64, b: poly64x2x2_t) {
25894    vst2q_s64(transmute(a), transmute(b))
25895}
25896#[doc = "Store multiple 2-element structures from two registers"]
25897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u64)"]
25898#[doc = "## Safety"]
25899#[doc = "  * Neon instrinsic unsafe"]
25900#[inline]
25901#[target_feature(enable = "neon")]
25902#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25903#[cfg_attr(test, assert_instr(st2))]
25904pub unsafe fn vst2q_u64(a: *mut u64, b: uint64x2x2_t) {
25905    vst2q_s64(transmute(a), transmute(b))
25906}
25907#[doc = "Store multiple 3-element structures from three registers"]
25908#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f64)"]
25909#[doc = "## Safety"]
25910#[doc = "  * Neon instrinsic unsafe"]
25911#[inline]
25912#[target_feature(enable = "neon")]
25913#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25914#[cfg_attr(test, assert_instr(nop))]
25915pub unsafe fn vst3_f64(a: *mut f64, b: float64x1x3_t) {
25916    unsafe extern "unadjusted" {
25917        #[cfg_attr(
25918            any(target_arch = "aarch64", target_arch = "arm64ec"),
25919            link_name = "llvm.aarch64.neon.st3.v1f64.p0"
25920        )]
25921        fn _vst3_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, ptr: *mut i8);
25922    }
25923    _vst3_f64(b.0, b.1, b.2, a as _)
25924}
25925#[doc = "Store multiple 3-element structures from three registers"]
25926#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f64)"]
25927#[doc = "## Safety"]
25928#[doc = "  * Neon instrinsic unsafe"]
25929#[inline]
25930#[target_feature(enable = "neon")]
25931#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25932#[rustc_legacy_const_generics(2)]
25933#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25934pub unsafe fn vst3_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x3_t) {
25935    static_assert!(LANE == 0);
25936    unsafe extern "unadjusted" {
25937        #[cfg_attr(
25938            any(target_arch = "aarch64", target_arch = "arm64ec"),
25939            link_name = "llvm.aarch64.neon.st3lane.v1f64.p0"
25940        )]
25941        fn _vst3_lane_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, n: i64, ptr: *mut i8);
25942    }
25943    _vst3_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
25944}
25945#[doc = "Store multiple 3-element structures from three registers"]
25946#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s64)"]
25947#[doc = "## Safety"]
25948#[doc = "  * Neon instrinsic unsafe"]
25949#[inline]
25950#[target_feature(enable = "neon")]
25951#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25952#[rustc_legacy_const_generics(2)]
25953#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25954pub unsafe fn vst3_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x3_t) {
25955    static_assert!(LANE == 0);
25956    unsafe extern "unadjusted" {
25957        #[cfg_attr(
25958            any(target_arch = "aarch64", target_arch = "arm64ec"),
25959            link_name = "llvm.aarch64.neon.st3lane.v1i64.p0"
25960        )]
25961        fn _vst3_lane_s64(a: int64x1_t, b: int64x1_t, c: int64x1_t, n: i64, ptr: *mut i8);
25962    }
25963    _vst3_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
25964}
25965#[doc = "Store multiple 3-element structures from three registers"]
25966#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p64)"]
25967#[doc = "## Safety"]
25968#[doc = "  * Neon instrinsic unsafe"]
25969#[inline]
25970#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25971#[target_feature(enable = "neon,aes")]
25972#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25973#[rustc_legacy_const_generics(2)]
25974pub unsafe fn vst3_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x3_t) {
25975    static_assert!(LANE == 0);
25976    vst3_lane_s64::<LANE>(transmute(a), transmute(b))
25977}
25978#[doc = "Store multiple 3-element structures from three registers"]
25979#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u64)"]
25980#[doc = "## Safety"]
25981#[doc = "  * Neon instrinsic unsafe"]
25982#[inline]
25983#[target_feature(enable = "neon")]
25984#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25985#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25986#[rustc_legacy_const_generics(2)]
25987pub unsafe fn vst3_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x3_t) {
25988    static_assert!(LANE == 0);
25989    vst3_lane_s64::<LANE>(transmute(a), transmute(b))
25990}
25991#[doc = "Store multiple 3-element structures from three registers"]
25992#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f64)"]
25993#[doc = "## Safety"]
25994#[doc = "  * Neon instrinsic unsafe"]
25995#[inline]
25996#[target_feature(enable = "neon")]
25997#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25998#[cfg_attr(test, assert_instr(st3))]
25999pub unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t) {
26000    unsafe extern "unadjusted" {
26001        #[cfg_attr(
26002            any(target_arch = "aarch64", target_arch = "arm64ec"),
26003            link_name = "llvm.aarch64.neon.st3.v2f64.p0"
26004        )]
26005        fn _vst3q_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut i8);
26006    }
26007    _vst3q_f64(b.0, b.1, b.2, a as _)
26008}
26009#[doc = "Store multiple 3-element structures from three registers"]
26010#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s64)"]
26011#[doc = "## Safety"]
26012#[doc = "  * Neon instrinsic unsafe"]
26013#[inline]
26014#[target_feature(enable = "neon")]
26015#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26016#[cfg_attr(test, assert_instr(st3))]
26017pub unsafe fn vst3q_s64(a: *mut i64, b: int64x2x3_t) {
26018    unsafe extern "unadjusted" {
26019        #[cfg_attr(
26020            any(target_arch = "aarch64", target_arch = "arm64ec"),
26021            link_name = "llvm.aarch64.neon.st3.v2i64.p0"
26022        )]
26023        fn _vst3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, ptr: *mut i8);
26024    }
26025    _vst3q_s64(b.0, b.1, b.2, a as _)
26026}
26027#[doc = "Store multiple 3-element structures from three registers"]
26028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f64)"]
26029#[doc = "## Safety"]
26030#[doc = "  * Neon instrinsic unsafe"]
26031#[inline]
26032#[target_feature(enable = "neon")]
26033#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26034#[rustc_legacy_const_generics(2)]
26035#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26036pub unsafe fn vst3q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x3_t) {
26037    static_assert_uimm_bits!(LANE, 1);
26038    unsafe extern "unadjusted" {
26039        #[cfg_attr(
26040            any(target_arch = "aarch64", target_arch = "arm64ec"),
26041            link_name = "llvm.aarch64.neon.st3lane.v2f64.p0"
26042        )]
26043        fn _vst3q_lane_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, n: i64, ptr: *mut i8);
26044    }
26045    _vst3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
26046}
26047#[doc = "Store multiple 3-element structures from three registers"]
26048#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s8)"]
26049#[doc = "## Safety"]
26050#[doc = "  * Neon instrinsic unsafe"]
26051#[inline]
26052#[target_feature(enable = "neon")]
26053#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26054#[rustc_legacy_const_generics(2)]
26055#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26056pub unsafe fn vst3q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x3_t) {
26057    static_assert_uimm_bits!(LANE, 4);
26058    unsafe extern "unadjusted" {
26059        #[cfg_attr(
26060            any(target_arch = "aarch64", target_arch = "arm64ec"),
26061            link_name = "llvm.aarch64.neon.st3lane.v16i8.p0"
26062        )]
26063        fn _vst3q_lane_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t, n: i64, ptr: *mut i8);
26064    }
26065    _vst3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _)
26066}
26067#[doc = "Store multiple 3-element structures from three registers"]
26068#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s64)"]
26069#[doc = "## Safety"]
26070#[doc = "  * Neon instrinsic unsafe"]
26071#[inline]
26072#[target_feature(enable = "neon")]
26073#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26074#[rustc_legacy_const_generics(2)]
26075#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26076pub unsafe fn vst3q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x3_t) {
26077    static_assert_uimm_bits!(LANE, 1);
26078    unsafe extern "unadjusted" {
26079        #[cfg_attr(
26080            any(target_arch = "aarch64", target_arch = "arm64ec"),
26081            link_name = "llvm.aarch64.neon.st3lane.v2i64.p0"
26082        )]
26083        fn _vst3q_lane_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, n: i64, ptr: *mut i8);
26084    }
26085    _vst3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
26086}
26087#[doc = "Store multiple 3-element structures from three registers"]
26088#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p64)"]
26089#[doc = "## Safety"]
26090#[doc = "  * Neon instrinsic unsafe"]
26091#[inline]
26092#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26093#[target_feature(enable = "neon,aes")]
26094#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26095#[rustc_legacy_const_generics(2)]
26096pub unsafe fn vst3q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x3_t) {
26097    static_assert_uimm_bits!(LANE, 1);
26098    vst3q_lane_s64::<LANE>(transmute(a), transmute(b))
26099}
26100#[doc = "Store multiple 3-element structures from three registers"]
26101#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u8)"]
26102#[doc = "## Safety"]
26103#[doc = "  * Neon instrinsic unsafe"]
26104#[inline]
26105#[target_feature(enable = "neon")]
26106#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26107#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26108#[rustc_legacy_const_generics(2)]
26109pub unsafe fn vst3q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x3_t) {
26110    static_assert_uimm_bits!(LANE, 4);
26111    vst3q_lane_s8::<LANE>(transmute(a), transmute(b))
26112}
26113#[doc = "Store multiple 3-element structures from three registers"]
26114#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u64)"]
26115#[doc = "## Safety"]
26116#[doc = "  * Neon instrinsic unsafe"]
26117#[inline]
26118#[target_feature(enable = "neon")]
26119#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26120#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26121#[rustc_legacy_const_generics(2)]
26122pub unsafe fn vst3q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x3_t) {
26123    static_assert_uimm_bits!(LANE, 1);
26124    vst3q_lane_s64::<LANE>(transmute(a), transmute(b))
26125}
26126#[doc = "Store multiple 3-element structures from three registers"]
26127#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p8)"]
26128#[doc = "## Safety"]
26129#[doc = "  * Neon instrinsic unsafe"]
26130#[inline]
26131#[target_feature(enable = "neon")]
26132#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26133#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26134#[rustc_legacy_const_generics(2)]
26135pub unsafe fn vst3q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x3_t) {
26136    static_assert_uimm_bits!(LANE, 4);
26137    vst3q_lane_s8::<LANE>(transmute(a), transmute(b))
26138}
26139#[doc = "Store multiple 3-element structures from three registers"]
26140#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p64)"]
26141#[doc = "## Safety"]
26142#[doc = "  * Neon instrinsic unsafe"]
26143#[inline]
26144#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26145#[target_feature(enable = "neon,aes")]
26146#[cfg_attr(test, assert_instr(st3))]
26147pub unsafe fn vst3q_p64(a: *mut p64, b: poly64x2x3_t) {
26148    vst3q_s64(transmute(a), transmute(b))
26149}
26150#[doc = "Store multiple 3-element structures from three registers"]
26151#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u64)"]
26152#[doc = "## Safety"]
26153#[doc = "  * Neon instrinsic unsafe"]
26154#[inline]
26155#[target_feature(enable = "neon")]
26156#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26157#[cfg_attr(test, assert_instr(st3))]
26158pub unsafe fn vst3q_u64(a: *mut u64, b: uint64x2x3_t) {
26159    vst3q_s64(transmute(a), transmute(b))
26160}
26161#[doc = "Store multiple 4-element structures from four registers"]
26162#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f64)"]
26163#[doc = "## Safety"]
26164#[doc = "  * Neon instrinsic unsafe"]
26165#[inline]
26166#[target_feature(enable = "neon")]
26167#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26168#[cfg_attr(test, assert_instr(nop))]
26169pub unsafe fn vst4_f64(a: *mut f64, b: float64x1x4_t) {
26170    unsafe extern "unadjusted" {
26171        #[cfg_attr(
26172            any(target_arch = "aarch64", target_arch = "arm64ec"),
26173            link_name = "llvm.aarch64.neon.st4.v1f64.p0"
26174        )]
26175        fn _vst4_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, d: float64x1_t, ptr: *mut i8);
26176    }
26177    _vst4_f64(b.0, b.1, b.2, b.3, a as _)
26178}
26179#[doc = "Store multiple 4-element structures from four registers"]
26180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f64)"]
26181#[doc = "## Safety"]
26182#[doc = "  * Neon instrinsic unsafe"]
26183#[inline]
26184#[target_feature(enable = "neon")]
26185#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26186#[rustc_legacy_const_generics(2)]
26187#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26188pub unsafe fn vst4_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x4_t) {
26189    static_assert!(LANE == 0);
26190    unsafe extern "unadjusted" {
26191        #[cfg_attr(
26192            any(target_arch = "aarch64", target_arch = "arm64ec"),
26193            link_name = "llvm.aarch64.neon.st4lane.v1f64.p0"
26194        )]
26195        fn _vst4_lane_f64(
26196            a: float64x1_t,
26197            b: float64x1_t,
26198            c: float64x1_t,
26199            d: float64x1_t,
26200            n: i64,
26201            ptr: *mut i8,
26202        );
26203    }
26204    _vst4_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
26205}
26206#[doc = "Store multiple 4-element structures from four registers"]
26207#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s64)"]
26208#[doc = "## Safety"]
26209#[doc = "  * Neon instrinsic unsafe"]
26210#[inline]
26211#[target_feature(enable = "neon")]
26212#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26213#[rustc_legacy_const_generics(2)]
26214#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26215pub unsafe fn vst4_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x4_t) {
26216    static_assert!(LANE == 0);
26217    unsafe extern "unadjusted" {
26218        #[cfg_attr(
26219            any(target_arch = "aarch64", target_arch = "arm64ec"),
26220            link_name = "llvm.aarch64.neon.st4lane.v1i64.p0"
26221        )]
26222        fn _vst4_lane_s64(
26223            a: int64x1_t,
26224            b: int64x1_t,
26225            c: int64x1_t,
26226            d: int64x1_t,
26227            n: i64,
26228            ptr: *mut i8,
26229        );
26230    }
26231    _vst4_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
26232}
26233#[doc = "Store multiple 4-element structures from four registers"]
26234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p64)"]
26235#[doc = "## Safety"]
26236#[doc = "  * Neon instrinsic unsafe"]
26237#[inline]
26238#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26239#[target_feature(enable = "neon,aes")]
26240#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26241#[rustc_legacy_const_generics(2)]
26242pub unsafe fn vst4_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x4_t) {
26243    static_assert!(LANE == 0);
26244    vst4_lane_s64::<LANE>(transmute(a), transmute(b))
26245}
26246#[doc = "Store multiple 4-element structures from four registers"]
26247#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u64)"]
26248#[doc = "## Safety"]
26249#[doc = "  * Neon instrinsic unsafe"]
26250#[inline]
26251#[target_feature(enable = "neon")]
26252#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26253#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26254#[rustc_legacy_const_generics(2)]
26255pub unsafe fn vst4_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x4_t) {
26256    static_assert!(LANE == 0);
26257    vst4_lane_s64::<LANE>(transmute(a), transmute(b))
26258}
26259#[doc = "Store multiple 4-element structures from four registers"]
26260#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f64)"]
26261#[doc = "## Safety"]
26262#[doc = "  * Neon instrinsic unsafe"]
26263#[inline]
26264#[target_feature(enable = "neon")]
26265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26266#[cfg_attr(test, assert_instr(st4))]
26267pub unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t) {
26268    unsafe extern "unadjusted" {
26269        #[cfg_attr(
26270            any(target_arch = "aarch64", target_arch = "arm64ec"),
26271            link_name = "llvm.aarch64.neon.st4.v2f64.p0"
26272        )]
26273        fn _vst4q_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, d: float64x2_t, ptr: *mut i8);
26274    }
26275    _vst4q_f64(b.0, b.1, b.2, b.3, a as _)
26276}
26277#[doc = "Store multiple 4-element structures from four registers"]
26278#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s64)"]
26279#[doc = "## Safety"]
26280#[doc = "  * Neon instrinsic unsafe"]
26281#[inline]
26282#[target_feature(enable = "neon")]
26283#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26284#[cfg_attr(test, assert_instr(st4))]
26285pub unsafe fn vst4q_s64(a: *mut i64, b: int64x2x4_t) {
26286    unsafe extern "unadjusted" {
26287        #[cfg_attr(
26288            any(target_arch = "aarch64", target_arch = "arm64ec"),
26289            link_name = "llvm.aarch64.neon.st4.v2i64.p0"
26290        )]
26291        fn _vst4q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t, ptr: *mut i8);
26292    }
26293    _vst4q_s64(b.0, b.1, b.2, b.3, a as _)
26294}
26295#[doc = "Store multiple 4-element structures from four registers"]
26296#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f64)"]
26297#[doc = "## Safety"]
26298#[doc = "  * Neon instrinsic unsafe"]
26299#[inline]
26300#[target_feature(enable = "neon")]
26301#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26302#[rustc_legacy_const_generics(2)]
26303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26304pub unsafe fn vst4q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x4_t) {
26305    static_assert_uimm_bits!(LANE, 1);
26306    unsafe extern "unadjusted" {
26307        #[cfg_attr(
26308            any(target_arch = "aarch64", target_arch = "arm64ec"),
26309            link_name = "llvm.aarch64.neon.st4lane.v2f64.p0"
26310        )]
26311        fn _vst4q_lane_f64(
26312            a: float64x2_t,
26313            b: float64x2_t,
26314            c: float64x2_t,
26315            d: float64x2_t,
26316            n: i64,
26317            ptr: *mut i8,
26318        );
26319    }
26320    _vst4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
26321}
26322#[doc = "Store multiple 4-element structures from four registers"]
26323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s8)"]
26324#[doc = "## Safety"]
26325#[doc = "  * Neon instrinsic unsafe"]
26326#[inline]
26327#[target_feature(enable = "neon")]
26328#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26329#[rustc_legacy_const_generics(2)]
26330#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26331pub unsafe fn vst4q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x4_t) {
26332    static_assert_uimm_bits!(LANE, 4);
26333    unsafe extern "unadjusted" {
26334        #[cfg_attr(
26335            any(target_arch = "aarch64", target_arch = "arm64ec"),
26336            link_name = "llvm.aarch64.neon.st4lane.v16i8.p0"
26337        )]
26338        fn _vst4q_lane_s8(
26339            a: int8x16_t,
26340            b: int8x16_t,
26341            c: int8x16_t,
26342            d: int8x16_t,
26343            n: i64,
26344            ptr: *mut i8,
26345        );
26346    }
26347    _vst4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _)
26348}
26349#[doc = "Store multiple 4-element structures from four registers"]
26350#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s64)"]
26351#[doc = "## Safety"]
26352#[doc = "  * Neon instrinsic unsafe"]
26353#[inline]
26354#[target_feature(enable = "neon")]
26355#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26356#[rustc_legacy_const_generics(2)]
26357#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26358pub unsafe fn vst4q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x4_t) {
26359    static_assert_uimm_bits!(LANE, 1);
26360    unsafe extern "unadjusted" {
26361        #[cfg_attr(
26362            any(target_arch = "aarch64", target_arch = "arm64ec"),
26363            link_name = "llvm.aarch64.neon.st4lane.v2i64.p0"
26364        )]
26365        fn _vst4q_lane_s64(
26366            a: int64x2_t,
26367            b: int64x2_t,
26368            c: int64x2_t,
26369            d: int64x2_t,
26370            n: i64,
26371            ptr: *mut i8,
26372        );
26373    }
26374    _vst4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
26375}
26376#[doc = "Store multiple 4-element structures from four registers"]
26377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p64)"]
26378#[doc = "## Safety"]
26379#[doc = "  * Neon instrinsic unsafe"]
26380#[inline]
26381#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26382#[target_feature(enable = "neon,aes")]
26383#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26384#[rustc_legacy_const_generics(2)]
26385pub unsafe fn vst4q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x4_t) {
26386    static_assert_uimm_bits!(LANE, 1);
26387    vst4q_lane_s64::<LANE>(transmute(a), transmute(b))
26388}
26389#[doc = "Store multiple 4-element structures from four registers"]
26390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u8)"]
26391#[doc = "## Safety"]
26392#[doc = "  * Neon instrinsic unsafe"]
26393#[inline]
26394#[target_feature(enable = "neon")]
26395#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26396#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26397#[rustc_legacy_const_generics(2)]
26398pub unsafe fn vst4q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x4_t) {
26399    static_assert_uimm_bits!(LANE, 4);
26400    vst4q_lane_s8::<LANE>(transmute(a), transmute(b))
26401}
26402#[doc = "Store multiple 4-element structures from four registers"]
26403#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u64)"]
26404#[doc = "## Safety"]
26405#[doc = "  * Neon instrinsic unsafe"]
26406#[inline]
26407#[target_feature(enable = "neon")]
26408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26409#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26410#[rustc_legacy_const_generics(2)]
26411pub unsafe fn vst4q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x4_t) {
26412    static_assert_uimm_bits!(LANE, 1);
26413    vst4q_lane_s64::<LANE>(transmute(a), transmute(b))
26414}
26415#[doc = "Store multiple 4-element structures from four registers"]
26416#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p8)"]
26417#[doc = "## Safety"]
26418#[doc = "  * Neon instrinsic unsafe"]
26419#[inline]
26420#[target_feature(enable = "neon")]
26421#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26422#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26423#[rustc_legacy_const_generics(2)]
26424pub unsafe fn vst4q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x4_t) {
26425    static_assert_uimm_bits!(LANE, 4);
26426    vst4q_lane_s8::<LANE>(transmute(a), transmute(b))
26427}
26428#[doc = "Store multiple 4-element structures from four registers"]
26429#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p64)"]
26430#[doc = "## Safety"]
26431#[doc = "  * Neon instrinsic unsafe"]
26432#[inline]
26433#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26434#[target_feature(enable = "neon,aes")]
26435#[cfg_attr(test, assert_instr(st4))]
26436pub unsafe fn vst4q_p64(a: *mut p64, b: poly64x2x4_t) {
26437    vst4q_s64(transmute(a), transmute(b))
26438}
26439#[doc = "Store multiple 4-element structures from four registers"]
26440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u64)"]
26441#[doc = "## Safety"]
26442#[doc = "  * Neon instrinsic unsafe"]
26443#[inline]
26444#[target_feature(enable = "neon")]
26445#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26446#[cfg_attr(test, assert_instr(st4))]
26447pub unsafe fn vst4q_u64(a: *mut u64, b: uint64x2x4_t) {
26448    vst4q_s64(transmute(a), transmute(b))
26449}
26450#[doc = "Subtract"]
26451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f64)"]
26452#[inline]
26453#[target_feature(enable = "neon")]
26454#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26455#[cfg_attr(test, assert_instr(fsub))]
26456pub fn vsub_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
26457    unsafe { simd_sub(a, b) }
26458}
26459#[doc = "Subtract"]
26460#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f64)"]
26461#[inline]
26462#[target_feature(enable = "neon")]
26463#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26464#[cfg_attr(test, assert_instr(fsub))]
26465pub fn vsubq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
26466    unsafe { simd_sub(a, b) }
26467}
26468#[doc = "Subtract"]
26469#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_s64)"]
26470#[inline]
26471#[target_feature(enable = "neon")]
26472#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26473#[cfg_attr(test, assert_instr(nop))]
26474pub fn vsubd_s64(a: i64, b: i64) -> i64 {
26475    a.wrapping_sub(b)
26476}
26477#[doc = "Subtract"]
26478#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_u64)"]
26479#[inline]
26480#[target_feature(enable = "neon")]
26481#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26482#[cfg_attr(test, assert_instr(nop))]
26483pub fn vsubd_u64(a: u64, b: u64) -> u64 {
26484    a.wrapping_sub(b)
26485}
26486#[doc = "Subtract"]
26487#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubh_f16)"]
26488#[inline]
26489#[target_feature(enable = "neon,fp16")]
26490#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
26491#[cfg_attr(test, assert_instr(nop))]
26492pub fn vsubh_f16(a: f16, b: f16) -> f16 {
26493    a - b
26494}
26495#[doc = "Signed Subtract Long"]
26496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s8)"]
26497#[inline]
26498#[target_feature(enable = "neon")]
26499#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26500#[cfg_attr(test, assert_instr(ssubl2))]
26501pub fn vsubl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
26502    unsafe {
26503        let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
26504        let d: int16x8_t = simd_cast(c);
26505        let e: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
26506        let f: int16x8_t = simd_cast(e);
26507        simd_sub(d, f)
26508    }
26509}
26510#[doc = "Signed Subtract Long"]
26511#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s16)"]
26512#[inline]
26513#[target_feature(enable = "neon")]
26514#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26515#[cfg_attr(test, assert_instr(ssubl2))]
26516pub fn vsubl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
26517    unsafe {
26518        let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
26519        let d: int32x4_t = simd_cast(c);
26520        let e: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
26521        let f: int32x4_t = simd_cast(e);
26522        simd_sub(d, f)
26523    }
26524}
26525#[doc = "Signed Subtract Long"]
26526#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s32)"]
26527#[inline]
26528#[target_feature(enable = "neon")]
26529#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26530#[cfg_attr(test, assert_instr(ssubl2))]
26531pub fn vsubl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
26532    unsafe {
26533        let c: int32x2_t = simd_shuffle!(a, a, [2, 3]);
26534        let d: int64x2_t = simd_cast(c);
26535        let e: int32x2_t = simd_shuffle!(b, b, [2, 3]);
26536        let f: int64x2_t = simd_cast(e);
26537        simd_sub(d, f)
26538    }
26539}
26540#[doc = "Unsigned Subtract Long"]
26541#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u8)"]
26542#[inline]
26543#[target_feature(enable = "neon")]
26544#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26545#[cfg_attr(test, assert_instr(usubl2))]
26546pub fn vsubl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
26547    unsafe {
26548        let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
26549        let d: uint16x8_t = simd_cast(c);
26550        let e: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
26551        let f: uint16x8_t = simd_cast(e);
26552        simd_sub(d, f)
26553    }
26554}
26555#[doc = "Unsigned Subtract Long"]
26556#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u16)"]
26557#[inline]
26558#[target_feature(enable = "neon")]
26559#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26560#[cfg_attr(test, assert_instr(usubl2))]
26561pub fn vsubl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
26562    unsafe {
26563        let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
26564        let d: uint32x4_t = simd_cast(c);
26565        let e: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
26566        let f: uint32x4_t = simd_cast(e);
26567        simd_sub(d, f)
26568    }
26569}
26570#[doc = "Unsigned Subtract Long"]
26571#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u32)"]
26572#[inline]
26573#[target_feature(enable = "neon")]
26574#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26575#[cfg_attr(test, assert_instr(usubl2))]
26576pub fn vsubl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
26577    unsafe {
26578        let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
26579        let d: uint64x2_t = simd_cast(c);
26580        let e: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
26581        let f: uint64x2_t = simd_cast(e);
26582        simd_sub(d, f)
26583    }
26584}
26585#[doc = "Signed Subtract Wide"]
26586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s8)"]
26587#[inline]
26588#[target_feature(enable = "neon")]
26589#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26590#[cfg_attr(test, assert_instr(ssubw2))]
26591pub fn vsubw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t {
26592    unsafe {
26593        let c: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
26594        simd_sub(a, simd_cast(c))
26595    }
26596}
26597#[doc = "Signed Subtract Wide"]
26598#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s16)"]
26599#[inline]
26600#[target_feature(enable = "neon")]
26601#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26602#[cfg_attr(test, assert_instr(ssubw2))]
26603pub fn vsubw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t {
26604    unsafe {
26605        let c: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
26606        simd_sub(a, simd_cast(c))
26607    }
26608}
26609#[doc = "Signed Subtract Wide"]
26610#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s32)"]
26611#[inline]
26612#[target_feature(enable = "neon")]
26613#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26614#[cfg_attr(test, assert_instr(ssubw2))]
26615pub fn vsubw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t {
26616    unsafe {
26617        let c: int32x2_t = simd_shuffle!(b, b, [2, 3]);
26618        simd_sub(a, simd_cast(c))
26619    }
26620}
26621#[doc = "Unsigned Subtract Wide"]
26622#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u8)"]
26623#[inline]
26624#[target_feature(enable = "neon")]
26625#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26626#[cfg_attr(test, assert_instr(usubw2))]
26627pub fn vsubw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t {
26628    unsafe {
26629        let c: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
26630        simd_sub(a, simd_cast(c))
26631    }
26632}
26633#[doc = "Unsigned Subtract Wide"]
26634#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u16)"]
26635#[inline]
26636#[target_feature(enable = "neon")]
26637#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26638#[cfg_attr(test, assert_instr(usubw2))]
26639pub fn vsubw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t {
26640    unsafe {
26641        let c: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
26642        simd_sub(a, simd_cast(c))
26643    }
26644}
26645#[doc = "Unsigned Subtract Wide"]
26646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u32)"]
26647#[inline]
26648#[target_feature(enable = "neon")]
26649#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26650#[cfg_attr(test, assert_instr(usubw2))]
26651pub fn vsubw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t {
26652    unsafe {
26653        let c: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
26654        simd_sub(a, simd_cast(c))
26655    }
26656}
26657#[doc = "Dot product index form with signed and unsigned integers"]
26658#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudot_laneq_s32)"]
26659#[inline]
26660#[target_feature(enable = "neon,i8mm")]
26661#[cfg_attr(test, assert_instr(sudot, LANE = 3))]
26662#[rustc_legacy_const_generics(3)]
26663#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")]
26664pub fn vsudot_laneq_s32<const LANE: i32>(a: int32x2_t, b: int8x8_t, c: uint8x16_t) -> int32x2_t {
26665    static_assert_uimm_bits!(LANE, 2);
26666    unsafe {
26667        let c: uint32x4_t = transmute(c);
26668        let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
26669        vusdot_s32(a, transmute(c), b)
26670    }
26671}
26672#[doc = "Dot product index form with signed and unsigned integers"]
26673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudotq_laneq_s32)"]
26674#[inline]
26675#[target_feature(enable = "neon,i8mm")]
26676#[cfg_attr(test, assert_instr(sudot, LANE = 3))]
26677#[rustc_legacy_const_generics(3)]
26678#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")]
26679pub fn vsudotq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int8x16_t, c: uint8x16_t) -> int32x4_t {
26680    static_assert_uimm_bits!(LANE, 2);
26681    unsafe {
26682        let c: uint32x4_t = transmute(c);
26683        let c: uint32x4_t =
26684            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
26685        vusdotq_s32(a, transmute(c), b)
26686    }
26687}
26688#[doc = "Table look-up"]
26689#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_s8)"]
26690#[inline]
26691#[target_feature(enable = "neon")]
26692#[cfg_attr(test, assert_instr(tbl))]
26693#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26694pub fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
26695    vqtbl1_s8(vcombine_s8(a, unsafe { crate::mem::zeroed() }), unsafe {
26696        {
26697            transmute(b)
26698        }
26699    })
26700}
26701#[doc = "Table look-up"]
26702#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8)"]
26703#[inline]
26704#[target_feature(enable = "neon")]
26705#[cfg_attr(test, assert_instr(tbl))]
26706#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26707pub fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
26708    vqtbl1_u8(vcombine_u8(a, unsafe { crate::mem::zeroed() }), b)
26709}
26710#[doc = "Table look-up"]
26711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_p8)"]
26712#[inline]
26713#[target_feature(enable = "neon")]
26714#[cfg_attr(test, assert_instr(tbl))]
26715#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26716pub fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t {
26717    vqtbl1_p8(vcombine_p8(a, unsafe { crate::mem::zeroed() }), b)
26718}
26719#[doc = "Table look-up"]
26720#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_s8)"]
26721#[inline]
26722#[target_feature(enable = "neon")]
26723#[cfg_attr(test, assert_instr(tbl))]
26724#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26725pub fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t {
26726    unsafe { vqtbl1(transmute(vcombine_s8(a.0, a.1)), transmute(b)) }
26727}
26728#[doc = "Table look-up"]
26729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"]
26730#[inline]
26731#[cfg(target_endian = "little")]
26732#[target_feature(enable = "neon")]
26733#[cfg_attr(test, assert_instr(tbl))]
26734#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26735pub fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t {
26736    unsafe { transmute(vqtbl1(transmute(vcombine_u8(a.0, a.1)), b)) }
26737}
26738#[doc = "Table look-up"]
26739#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"]
26740#[inline]
26741#[cfg(target_endian = "big")]
26742#[target_feature(enable = "neon")]
26743#[cfg_attr(test, assert_instr(tbl))]
26744#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26745pub fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t {
26746    let mut a: uint8x8x2_t = a;
26747    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
26748    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
26749    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
26750    unsafe {
26751        let ret_val: uint8x8_t = transmute(vqtbl1(transmute(vcombine_u8(a.0, a.1)), b));
26752        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
26753    }
26754}
26755#[doc = "Table look-up"]
26756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"]
26757#[inline]
26758#[cfg(target_endian = "little")]
26759#[target_feature(enable = "neon")]
26760#[cfg_attr(test, assert_instr(tbl))]
26761#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26762pub fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t {
26763    unsafe { transmute(vqtbl1(transmute(vcombine_p8(a.0, a.1)), b)) }
26764}
26765#[doc = "Table look-up"]
26766#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"]
26767#[inline]
26768#[cfg(target_endian = "big")]
26769#[target_feature(enable = "neon")]
26770#[cfg_attr(test, assert_instr(tbl))]
26771#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26772pub fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t {
26773    let mut a: poly8x8x2_t = a;
26774    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
26775    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
26776    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
26777    unsafe {
26778        let ret_val: poly8x8_t = transmute(vqtbl1(transmute(vcombine_p8(a.0, a.1)), b));
26779        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
26780    }
26781}
26782#[doc = "Table look-up"]
26783#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_s8)"]
26784#[inline]
26785#[target_feature(enable = "neon")]
26786#[cfg_attr(test, assert_instr(tbl))]
26787#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26788pub fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t {
26789    let x = int8x16x2_t(
26790        vcombine_s8(a.0, a.1),
26791        vcombine_s8(a.2, unsafe { crate::mem::zeroed() }),
26792    );
26793    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))) }
26794}
26795#[doc = "Table look-up"]
26796#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"]
26797#[inline]
26798#[cfg(target_endian = "little")]
26799#[target_feature(enable = "neon")]
26800#[cfg_attr(test, assert_instr(tbl))]
26801#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26802pub fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t {
26803    let x = uint8x16x2_t(
26804        vcombine_u8(a.0, a.1),
26805        vcombine_u8(a.2, unsafe { crate::mem::zeroed() }),
26806    );
26807    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
26808}
26809#[doc = "Table look-up"]
26810#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"]
26811#[inline]
26812#[cfg(target_endian = "big")]
26813#[target_feature(enable = "neon")]
26814#[cfg_attr(test, assert_instr(tbl))]
26815#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26816pub fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t {
26817    let mut a: uint8x8x3_t = a;
26818    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
26819    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
26820    a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
26821    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
26822    let x = uint8x16x2_t(
26823        vcombine_u8(a.0, a.1),
26824        vcombine_u8(a.2, unsafe { crate::mem::zeroed() }),
26825    );
26826    unsafe {
26827        let ret_val: uint8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b));
26828        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
26829    }
26830}
26831#[doc = "Table look-up"]
26832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"]
26833#[inline]
26834#[cfg(target_endian = "little")]
26835#[target_feature(enable = "neon")]
26836#[cfg_attr(test, assert_instr(tbl))]
26837#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26838pub fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t {
26839    let x = poly8x16x2_t(
26840        vcombine_p8(a.0, a.1),
26841        vcombine_p8(a.2, unsafe { crate::mem::zeroed() }),
26842    );
26843    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
26844}
26845#[doc = "Table look-up"]
26846#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"]
26847#[inline]
26848#[cfg(target_endian = "big")]
26849#[target_feature(enable = "neon")]
26850#[cfg_attr(test, assert_instr(tbl))]
26851#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26852pub fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t {
26853    let mut a: poly8x8x3_t = a;
26854    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
26855    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
26856    a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
26857    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
26858    let x = poly8x16x2_t(
26859        vcombine_p8(a.0, a.1),
26860        vcombine_p8(a.2, unsafe { crate::mem::zeroed() }),
26861    );
26862    unsafe {
26863        let ret_val: poly8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b));
26864        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
26865    }
26866}
26867#[doc = "Table look-up"]
26868#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_s8)"]
26869#[inline]
26870#[target_feature(enable = "neon")]
26871#[cfg_attr(test, assert_instr(tbl))]
26872#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26873pub fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t {
26874    let x = int8x16x2_t(vcombine_s8(a.0, a.1), vcombine_s8(a.2, a.3));
26875    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))) }
26876}
26877#[doc = "Table look-up"]
26878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"]
26879#[inline]
26880#[cfg(target_endian = "little")]
26881#[target_feature(enable = "neon")]
26882#[cfg_attr(test, assert_instr(tbl))]
26883#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26884pub fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t {
26885    let x = uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, a.3));
26886    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
26887}
26888#[doc = "Table look-up"]
26889#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"]
26890#[inline]
26891#[cfg(target_endian = "big")]
26892#[target_feature(enable = "neon")]
26893#[cfg_attr(test, assert_instr(tbl))]
26894#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26895pub fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t {
26896    let mut a: uint8x8x4_t = a;
26897    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
26898    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
26899    a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
26900    a.3 = unsafe { simd_shuffle!(a.3, a.3, [7, 6, 5, 4, 3, 2, 1, 0]) };
26901    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
26902    let x = uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, a.3));
26903    unsafe {
26904        let ret_val: uint8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b));
26905        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
26906    }
26907}
26908#[doc = "Table look-up"]
26909#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"]
26910#[inline]
26911#[cfg(target_endian = "little")]
26912#[target_feature(enable = "neon")]
26913#[cfg_attr(test, assert_instr(tbl))]
26914#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26915pub fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t {
26916    let x = poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3));
26917    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
26918}
26919#[doc = "Table look-up"]
26920#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"]
26921#[inline]
26922#[cfg(target_endian = "big")]
26923#[target_feature(enable = "neon")]
26924#[cfg_attr(test, assert_instr(tbl))]
26925#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26926pub fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t {
26927    let mut a: poly8x8x4_t = a;
26928    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
26929    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
26930    a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
26931    a.3 = unsafe { simd_shuffle!(a.3, a.3, [7, 6, 5, 4, 3, 2, 1, 0]) };
26932    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
26933    let x = poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3));
26934    unsafe {
26935        let ret_val: poly8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b));
26936        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
26937    }
26938}
26939#[doc = "Extended table look-up"]
26940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_s8)"]
26941#[inline]
26942#[target_feature(enable = "neon")]
26943#[cfg_attr(test, assert_instr(tbx))]
26944#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26945pub fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t {
26946    unsafe {
26947        simd_select(
26948            simd_lt::<int8x8_t, int8x8_t>(c, transmute(i8x8::splat(8))),
26949            transmute(vqtbx1(
26950                transmute(a),
26951                transmute(vcombine_s8(b, crate::mem::zeroed())),
26952                transmute(c),
26953            )),
26954            a,
26955        )
26956    }
26957}
26958#[doc = "Extended table look-up"]
26959#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_u8)"]
26960#[inline]
26961#[target_feature(enable = "neon")]
26962#[cfg_attr(test, assert_instr(tbx))]
26963#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26964pub fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t {
26965    unsafe {
26966        simd_select(
26967            simd_lt::<uint8x8_t, int8x8_t>(c, transmute(u8x8::splat(8))),
26968            transmute(vqtbx1(
26969                transmute(a),
26970                transmute(vcombine_u8(b, crate::mem::zeroed())),
26971                c,
26972            )),
26973            a,
26974        )
26975    }
26976}
26977#[doc = "Extended table look-up"]
26978#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_p8)"]
26979#[inline]
26980#[target_feature(enable = "neon")]
26981#[cfg_attr(test, assert_instr(tbx))]
26982#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26983pub fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t {
26984    unsafe {
26985        simd_select(
26986            simd_lt::<uint8x8_t, int8x8_t>(c, transmute(u8x8::splat(8))),
26987            transmute(vqtbx1(
26988                transmute(a),
26989                transmute(vcombine_p8(b, crate::mem::zeroed())),
26990                c,
26991            )),
26992            a,
26993        )
26994    }
26995}
26996#[doc = "Extended table look-up"]
26997#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_s8)"]
26998#[inline]
26999#[target_feature(enable = "neon")]
27000#[cfg_attr(test, assert_instr(tbx))]
27001#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27002pub fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t {
27003    unsafe { vqtbx1(transmute(a), transmute(vcombine_s8(b.0, b.1)), transmute(c)) }
27004}
27005#[doc = "Extended table look-up"]
27006#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"]
27007#[inline]
27008#[cfg(target_endian = "little")]
27009#[target_feature(enable = "neon")]
27010#[cfg_attr(test, assert_instr(tbx))]
27011#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27012pub fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t {
27013    unsafe { transmute(vqtbx1(transmute(a), transmute(vcombine_u8(b.0, b.1)), c)) }
27014}
27015#[doc = "Extended table look-up"]
27016#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"]
27017#[inline]
27018#[cfg(target_endian = "big")]
27019#[target_feature(enable = "neon")]
27020#[cfg_attr(test, assert_instr(tbx))]
27021#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27022pub fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t {
27023    let mut b: uint8x8x2_t = b;
27024    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27025    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27026    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27027    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27028    unsafe {
27029        let ret_val: uint8x8_t =
27030            transmute(vqtbx1(transmute(a), transmute(vcombine_u8(b.0, b.1)), c));
27031        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27032    }
27033}
27034#[doc = "Extended table look-up"]
27035#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"]
27036#[inline]
27037#[cfg(target_endian = "little")]
27038#[target_feature(enable = "neon")]
27039#[cfg_attr(test, assert_instr(tbx))]
27040#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27041pub fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t {
27042    unsafe { transmute(vqtbx1(transmute(a), transmute(vcombine_p8(b.0, b.1)), c)) }
27043}
27044#[doc = "Extended table look-up"]
27045#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"]
27046#[inline]
27047#[cfg(target_endian = "big")]
27048#[target_feature(enable = "neon")]
27049#[cfg_attr(test, assert_instr(tbx))]
27050#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27051pub fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t {
27052    let mut b: poly8x8x2_t = b;
27053    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27054    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27055    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27056    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27057    unsafe {
27058        let ret_val: poly8x8_t =
27059            transmute(vqtbx1(transmute(a), transmute(vcombine_p8(b.0, b.1)), c));
27060        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27061    }
27062}
27063#[doc = "Extended table look-up"]
27064#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_s8)"]
27065#[inline]
27066#[target_feature(enable = "neon")]
27067#[cfg_attr(test, assert_instr(tbx))]
27068#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27069pub fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t {
27070    let x = int8x16x2_t(
27071        vcombine_s8(b.0, b.1),
27072        vcombine_s8(b.2, unsafe { crate::mem::zeroed() }),
27073    );
27074    unsafe {
27075        transmute(simd_select(
27076            simd_lt::<int8x8_t, int8x8_t>(transmute(c), transmute(i8x8::splat(24))),
27077            transmute(vqtbx2(
27078                transmute(a),
27079                transmute(x.0),
27080                transmute(x.1),
27081                transmute(c),
27082            )),
27083            a,
27084        ))
27085    }
27086}
27087#[doc = "Extended table look-up"]
27088#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"]
27089#[inline]
27090#[cfg(target_endian = "little")]
27091#[target_feature(enable = "neon")]
27092#[cfg_attr(test, assert_instr(tbx))]
27093#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27094pub fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t {
27095    let x = uint8x16x2_t(
27096        vcombine_u8(b.0, b.1),
27097        vcombine_u8(b.2, unsafe { crate::mem::zeroed() }),
27098    );
27099    unsafe {
27100        transmute(simd_select(
27101            simd_lt::<uint8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
27102            transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
27103            a,
27104        ))
27105    }
27106}
27107#[doc = "Extended table look-up"]
27108#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"]
27109#[inline]
27110#[cfg(target_endian = "big")]
27111#[target_feature(enable = "neon")]
27112#[cfg_attr(test, assert_instr(tbx))]
27113#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27114pub fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t {
27115    let mut b: uint8x8x3_t = b;
27116    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27117    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27118    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27119    b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27120    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27121    let x = uint8x16x2_t(
27122        vcombine_u8(b.0, b.1),
27123        vcombine_u8(b.2, unsafe { crate::mem::zeroed() }),
27124    );
27125    unsafe {
27126        let ret_val: uint8x8_t = transmute(simd_select(
27127            simd_lt::<uint8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
27128            transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
27129            a,
27130        ));
27131        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27132    }
27133}
27134#[doc = "Extended table look-up"]
27135#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"]
27136#[inline]
27137#[cfg(target_endian = "little")]
27138#[target_feature(enable = "neon")]
27139#[cfg_attr(test, assert_instr(tbx))]
27140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27141pub fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t {
27142    let x = poly8x16x2_t(
27143        vcombine_p8(b.0, b.1),
27144        vcombine_p8(b.2, unsafe { crate::mem::zeroed() }),
27145    );
27146    unsafe {
27147        transmute(simd_select(
27148            simd_lt::<poly8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
27149            transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
27150            a,
27151        ))
27152    }
27153}
27154#[doc = "Extended table look-up"]
27155#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"]
27156#[inline]
27157#[cfg(target_endian = "big")]
27158#[target_feature(enable = "neon")]
27159#[cfg_attr(test, assert_instr(tbx))]
27160#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27161pub fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t {
27162    let mut b: poly8x8x3_t = b;
27163    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27164    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27165    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27166    b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27167    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27168    let x = poly8x16x2_t(
27169        vcombine_p8(b.0, b.1),
27170        vcombine_p8(b.2, unsafe { crate::mem::zeroed() }),
27171    );
27172    unsafe {
27173        let ret_val: poly8x8_t = transmute(simd_select(
27174            simd_lt::<poly8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
27175            transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
27176            a,
27177        ));
27178        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27179    }
27180}
27181#[doc = "Extended table look-up"]
27182#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"]
27183#[inline]
27184#[target_feature(enable = "neon")]
27185#[cfg_attr(test, assert_instr(tbx))]
27186#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27187pub fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t {
27188    unsafe {
27189        vqtbx2(
27190            transmute(a),
27191            transmute(vcombine_s8(b.0, b.1)),
27192            transmute(vcombine_s8(b.2, b.3)),
27193            transmute(c),
27194        )
27195    }
27196}
27197#[doc = "Extended table look-up"]
27198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"]
27199#[inline]
27200#[cfg(target_endian = "little")]
27201#[target_feature(enable = "neon")]
27202#[cfg_attr(test, assert_instr(tbx))]
27203#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27204pub fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t {
27205    unsafe {
27206        transmute(vqtbx2(
27207            transmute(a),
27208            transmute(vcombine_u8(b.0, b.1)),
27209            transmute(vcombine_u8(b.2, b.3)),
27210            c,
27211        ))
27212    }
27213}
27214#[doc = "Extended table look-up"]
27215#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"]
27216#[inline]
27217#[cfg(target_endian = "big")]
27218#[target_feature(enable = "neon")]
27219#[cfg_attr(test, assert_instr(tbx))]
27220#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27221pub fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t {
27222    let mut b: uint8x8x4_t = b;
27223    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27224    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27225    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27226    b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27227    b.3 = unsafe { simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]) };
27228    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27229    unsafe {
27230        let ret_val: uint8x8_t = transmute(vqtbx2(
27231            transmute(a),
27232            transmute(vcombine_u8(b.0, b.1)),
27233            transmute(vcombine_u8(b.2, b.3)),
27234            c,
27235        ));
27236        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27237    }
27238}
27239#[doc = "Extended table look-up"]
27240#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"]
27241#[inline]
27242#[cfg(target_endian = "little")]
27243#[target_feature(enable = "neon")]
27244#[cfg_attr(test, assert_instr(tbx))]
27245#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27246pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t {
27247    unsafe {
27248        transmute(vqtbx2(
27249            transmute(a),
27250            transmute(vcombine_p8(b.0, b.1)),
27251            transmute(vcombine_p8(b.2, b.3)),
27252            c,
27253        ))
27254    }
27255}
27256#[doc = "Extended table look-up"]
27257#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"]
27258#[inline]
27259#[cfg(target_endian = "big")]
27260#[target_feature(enable = "neon")]
27261#[cfg_attr(test, assert_instr(tbx))]
27262#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27263pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t {
27264    let mut b: poly8x8x4_t = b;
27265    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27266    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27267    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27268    b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27269    b.3 = unsafe { simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]) };
27270    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27271    unsafe {
27272        let ret_val: poly8x8_t = transmute(vqtbx2(
27273            transmute(a),
27274            transmute(vcombine_p8(b.0, b.1)),
27275            transmute(vcombine_p8(b.2, b.3)),
27276            c,
27277        ));
27278        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27279    }
27280}
27281#[doc = "Transpose vectors"]
27282#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f16)"]
27283#[inline]
27284#[target_feature(enable = "neon,fp16")]
27285#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
27286#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27287pub fn vtrn1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
27288    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
27289}
27290#[doc = "Transpose vectors"]
27291#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f16)"]
27292#[inline]
27293#[target_feature(enable = "neon,fp16")]
27294#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
27295#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27296pub fn vtrn1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
27297    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
27298}
27299#[doc = "Transpose vectors"]
27300#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f32)"]
27301#[inline]
27302#[target_feature(enable = "neon")]
27303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27304#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27305pub fn vtrn1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
27306    unsafe { simd_shuffle!(a, b, [0, 2]) }
27307}
27308#[doc = "Transpose vectors"]
27309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f64)"]
27310#[inline]
27311#[target_feature(enable = "neon")]
27312#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27313#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27314pub fn vtrn1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
27315    unsafe { simd_shuffle!(a, b, [0, 2]) }
27316}
27317#[doc = "Transpose vectors"]
27318#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s32)"]
27319#[inline]
27320#[target_feature(enable = "neon")]
27321#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27322#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27323pub fn vtrn1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
27324    unsafe { simd_shuffle!(a, b, [0, 2]) }
27325}
27326#[doc = "Transpose vectors"]
27327#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s64)"]
27328#[inline]
27329#[target_feature(enable = "neon")]
27330#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27331#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27332pub fn vtrn1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
27333    unsafe { simd_shuffle!(a, b, [0, 2]) }
27334}
27335#[doc = "Transpose vectors"]
27336#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u32)"]
27337#[inline]
27338#[target_feature(enable = "neon")]
27339#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27340#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27341pub fn vtrn1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
27342    unsafe { simd_shuffle!(a, b, [0, 2]) }
27343}
27344#[doc = "Transpose vectors"]
27345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u64)"]
27346#[inline]
27347#[target_feature(enable = "neon")]
27348#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27349#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27350pub fn vtrn1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
27351    unsafe { simd_shuffle!(a, b, [0, 2]) }
27352}
27353#[doc = "Transpose vectors"]
27354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p64)"]
27355#[inline]
27356#[target_feature(enable = "neon")]
27357#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27358#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27359pub fn vtrn1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
27360    unsafe { simd_shuffle!(a, b, [0, 2]) }
27361}
27362#[doc = "Transpose vectors"]
27363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f32)"]
27364#[inline]
27365#[target_feature(enable = "neon")]
27366#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27367#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27368pub fn vtrn1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
27369    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
27370}
27371#[doc = "Transpose vectors"]
27372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s8)"]
27373#[inline]
27374#[target_feature(enable = "neon")]
27375#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27376#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27377pub fn vtrn1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
27378    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
27379}
27380#[doc = "Transpose vectors"]
27381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s8)"]
27382#[inline]
27383#[target_feature(enable = "neon")]
27384#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27385#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27386pub fn vtrn1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
27387    unsafe {
27388        simd_shuffle!(
27389            a,
27390            b,
27391            [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
27392        )
27393    }
27394}
27395#[doc = "Transpose vectors"]
27396#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s16)"]
27397#[inline]
27398#[target_feature(enable = "neon")]
27399#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27400#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27401pub fn vtrn1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
27402    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
27403}
27404#[doc = "Transpose vectors"]
27405#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s16)"]
27406#[inline]
27407#[target_feature(enable = "neon")]
27408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27409#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27410pub fn vtrn1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
27411    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
27412}
27413#[doc = "Transpose vectors"]
27414#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s32)"]
27415#[inline]
27416#[target_feature(enable = "neon")]
27417#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27418#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27419pub fn vtrn1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
27420    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
27421}
27422#[doc = "Transpose vectors"]
27423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u8)"]
27424#[inline]
27425#[target_feature(enable = "neon")]
27426#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27427#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27428pub fn vtrn1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
27429    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
27430}
27431#[doc = "Transpose vectors"]
27432#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u8)"]
27433#[inline]
27434#[target_feature(enable = "neon")]
27435#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27436#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27437pub fn vtrn1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
27438    unsafe {
27439        simd_shuffle!(
27440            a,
27441            b,
27442            [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
27443        )
27444    }
27445}
27446#[doc = "Transpose vectors"]
27447#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u16)"]
27448#[inline]
27449#[target_feature(enable = "neon")]
27450#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27451#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27452pub fn vtrn1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
27453    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
27454}
27455#[doc = "Transpose vectors"]
27456#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u16)"]
27457#[inline]
27458#[target_feature(enable = "neon")]
27459#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27460#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27461pub fn vtrn1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
27462    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
27463}
27464#[doc = "Transpose vectors"]
27465#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u32)"]
27466#[inline]
27467#[target_feature(enable = "neon")]
27468#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27469#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27470pub fn vtrn1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
27471    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
27472}
27473#[doc = "Transpose vectors"]
27474#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p8)"]
27475#[inline]
27476#[target_feature(enable = "neon")]
27477#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27478#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27479pub fn vtrn1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
27480    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
27481}
27482#[doc = "Transpose vectors"]
27483#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p8)"]
27484#[inline]
27485#[target_feature(enable = "neon")]
27486#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27487#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27488pub fn vtrn1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
27489    unsafe {
27490        simd_shuffle!(
27491            a,
27492            b,
27493            [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
27494        )
27495    }
27496}
27497#[doc = "Transpose vectors"]
27498#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p16)"]
27499#[inline]
27500#[target_feature(enable = "neon")]
27501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27502#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27503pub fn vtrn1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
27504    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
27505}
27506#[doc = "Transpose vectors"]
27507#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p16)"]
27508#[inline]
27509#[target_feature(enable = "neon")]
27510#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27511#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
27512pub fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
27513    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
27514}
27515#[doc = "Transpose vectors"]
27516#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f16)"]
27517#[inline]
27518#[target_feature(enable = "neon,fp16")]
27519#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
27520#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27521pub fn vtrn2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
27522    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
27523}
27524#[doc = "Transpose vectors"]
27525#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f16)"]
27526#[inline]
27527#[target_feature(enable = "neon,fp16")]
27528#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
27529#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27530pub fn vtrn2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
27531    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
27532}
27533#[doc = "Transpose vectors"]
27534#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f32)"]
27535#[inline]
27536#[target_feature(enable = "neon")]
27537#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27538#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27539pub fn vtrn2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
27540    unsafe { simd_shuffle!(a, b, [1, 3]) }
27541}
27542#[doc = "Transpose vectors"]
27543#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f64)"]
27544#[inline]
27545#[target_feature(enable = "neon")]
27546#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27547#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27548pub fn vtrn2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
27549    unsafe { simd_shuffle!(a, b, [1, 3]) }
27550}
27551#[doc = "Transpose vectors"]
27552#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s32)"]
27553#[inline]
27554#[target_feature(enable = "neon")]
27555#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27556#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27557pub fn vtrn2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
27558    unsafe { simd_shuffle!(a, b, [1, 3]) }
27559}
27560#[doc = "Transpose vectors"]
27561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s64)"]
27562#[inline]
27563#[target_feature(enable = "neon")]
27564#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27565#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27566pub fn vtrn2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
27567    unsafe { simd_shuffle!(a, b, [1, 3]) }
27568}
27569#[doc = "Transpose vectors"]
27570#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u32)"]
27571#[inline]
27572#[target_feature(enable = "neon")]
27573#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27574#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27575pub fn vtrn2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
27576    unsafe { simd_shuffle!(a, b, [1, 3]) }
27577}
27578#[doc = "Transpose vectors"]
27579#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u64)"]
27580#[inline]
27581#[target_feature(enable = "neon")]
27582#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27583#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27584pub fn vtrn2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
27585    unsafe { simd_shuffle!(a, b, [1, 3]) }
27586}
27587#[doc = "Transpose vectors"]
27588#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p64)"]
27589#[inline]
27590#[target_feature(enable = "neon")]
27591#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27592#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27593pub fn vtrn2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
27594    unsafe { simd_shuffle!(a, b, [1, 3]) }
27595}
27596#[doc = "Transpose vectors"]
27597#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f32)"]
27598#[inline]
27599#[target_feature(enable = "neon")]
27600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27601#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27602pub fn vtrn2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
27603    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
27604}
27605#[doc = "Transpose vectors"]
27606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s8)"]
27607#[inline]
27608#[target_feature(enable = "neon")]
27609#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27610#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27611pub fn vtrn2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
27612    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
27613}
27614#[doc = "Transpose vectors"]
27615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s8)"]
27616#[inline]
27617#[target_feature(enable = "neon")]
27618#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27619#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27620pub fn vtrn2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
27621    unsafe {
27622        simd_shuffle!(
27623            a,
27624            b,
27625            [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
27626        )
27627    }
27628}
27629#[doc = "Transpose vectors"]
27630#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s16)"]
27631#[inline]
27632#[target_feature(enable = "neon")]
27633#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27634#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27635pub fn vtrn2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
27636    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
27637}
27638#[doc = "Transpose vectors"]
27639#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s16)"]
27640#[inline]
27641#[target_feature(enable = "neon")]
27642#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27643#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27644pub fn vtrn2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
27645    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
27646}
27647#[doc = "Transpose vectors"]
27648#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s32)"]
27649#[inline]
27650#[target_feature(enable = "neon")]
27651#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27652#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27653pub fn vtrn2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
27654    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
27655}
27656#[doc = "Transpose vectors"]
27657#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u8)"]
27658#[inline]
27659#[target_feature(enable = "neon")]
27660#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27661#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27662pub fn vtrn2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
27663    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
27664}
27665#[doc = "Transpose vectors"]
27666#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u8)"]
27667#[inline]
27668#[target_feature(enable = "neon")]
27669#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27670#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27671pub fn vtrn2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
27672    unsafe {
27673        simd_shuffle!(
27674            a,
27675            b,
27676            [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
27677        )
27678    }
27679}
27680#[doc = "Transpose vectors"]
27681#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u16)"]
27682#[inline]
27683#[target_feature(enable = "neon")]
27684#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27685#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27686pub fn vtrn2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
27687    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
27688}
27689#[doc = "Transpose vectors"]
27690#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u16)"]
27691#[inline]
27692#[target_feature(enable = "neon")]
27693#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27694#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27695pub fn vtrn2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
27696    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
27697}
27698#[doc = "Transpose vectors"]
27699#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u32)"]
27700#[inline]
27701#[target_feature(enable = "neon")]
27702#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27703#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27704pub fn vtrn2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
27705    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
27706}
27707#[doc = "Transpose vectors"]
27708#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p8)"]
27709#[inline]
27710#[target_feature(enable = "neon")]
27711#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27712#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27713pub fn vtrn2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
27714    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
27715}
27716#[doc = "Transpose vectors"]
27717#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p8)"]
27718#[inline]
27719#[target_feature(enable = "neon")]
27720#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27721#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27722pub fn vtrn2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
27723    unsafe {
27724        simd_shuffle!(
27725            a,
27726            b,
27727            [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
27728        )
27729    }
27730}
27731#[doc = "Transpose vectors"]
27732#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p16)"]
27733#[inline]
27734#[target_feature(enable = "neon")]
27735#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27736#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27737pub fn vtrn2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
27738    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
27739}
27740#[doc = "Transpose vectors"]
27741#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p16)"]
27742#[inline]
27743#[target_feature(enable = "neon")]
27744#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27745#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
27746pub fn vtrn2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
27747    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
27748}
27749#[doc = "Signed compare bitwise Test bits nonzero"]
27750#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s64)"]
27751#[inline]
27752#[target_feature(enable = "neon")]
27753#[cfg_attr(test, assert_instr(cmtst))]
27754#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27755pub fn vtst_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
27756    unsafe {
27757        let c: int64x1_t = simd_and(a, b);
27758        let d: i64x1 = i64x1::new(0);
27759        simd_ne(c, transmute(d))
27760    }
27761}
27762#[doc = "Signed compare bitwise Test bits nonzero"]
27763#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s64)"]
27764#[inline]
27765#[target_feature(enable = "neon")]
27766#[cfg_attr(test, assert_instr(cmtst))]
27767#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27768pub fn vtstq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
27769    unsafe {
27770        let c: int64x2_t = simd_and(a, b);
27771        let d: i64x2 = i64x2::new(0, 0);
27772        simd_ne(c, transmute(d))
27773    }
27774}
27775#[doc = "Signed compare bitwise Test bits nonzero"]
27776#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p64)"]
27777#[inline]
27778#[target_feature(enable = "neon")]
27779#[cfg_attr(test, assert_instr(cmtst))]
27780#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27781pub fn vtst_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t {
27782    unsafe {
27783        let c: poly64x1_t = simd_and(a, b);
27784        let d: i64x1 = i64x1::new(0);
27785        simd_ne(c, transmute(d))
27786    }
27787}
27788#[doc = "Signed compare bitwise Test bits nonzero"]
27789#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p64)"]
27790#[inline]
27791#[target_feature(enable = "neon")]
27792#[cfg_attr(test, assert_instr(cmtst))]
27793#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27794pub fn vtstq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t {
27795    unsafe {
27796        let c: poly64x2_t = simd_and(a, b);
27797        let d: i64x2 = i64x2::new(0, 0);
27798        simd_ne(c, transmute(d))
27799    }
27800}
27801#[doc = "Unsigned compare bitwise Test bits nonzero"]
27802#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u64)"]
27803#[inline]
27804#[target_feature(enable = "neon")]
27805#[cfg_attr(test, assert_instr(cmtst))]
27806#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27807pub fn vtst_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
27808    unsafe {
27809        let c: uint64x1_t = simd_and(a, b);
27810        let d: u64x1 = u64x1::new(0);
27811        simd_ne(c, transmute(d))
27812    }
27813}
27814#[doc = "Unsigned compare bitwise Test bits nonzero"]
27815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u64)"]
27816#[inline]
27817#[target_feature(enable = "neon")]
27818#[cfg_attr(test, assert_instr(cmtst))]
27819#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27820pub fn vtstq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
27821    unsafe {
27822        let c: uint64x2_t = simd_and(a, b);
27823        let d: u64x2 = u64x2::new(0, 0);
27824        simd_ne(c, transmute(d))
27825    }
27826}
27827#[doc = "Compare bitwise test bits nonzero"]
27828#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_s64)"]
27829#[inline]
27830#[target_feature(enable = "neon")]
27831#[cfg_attr(test, assert_instr(tst))]
27832#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27833pub fn vtstd_s64(a: i64, b: i64) -> u64 {
27834    unsafe { transmute(vtst_s64(transmute(a), transmute(b))) }
27835}
27836#[doc = "Compare bitwise test bits nonzero"]
27837#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_u64)"]
27838#[inline]
27839#[target_feature(enable = "neon")]
27840#[cfg_attr(test, assert_instr(tst))]
27841#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27842pub fn vtstd_u64(a: u64, b: u64) -> u64 {
27843    unsafe { transmute(vtst_u64(transmute(a), transmute(b))) }
27844}
27845#[doc = "Signed saturating Accumulate of Unsigned value."]
27846#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s8)"]
27847#[inline]
27848#[target_feature(enable = "neon")]
27849#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27850#[cfg_attr(test, assert_instr(suqadd))]
27851pub fn vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t {
27852    unsafe extern "unadjusted" {
27853        #[cfg_attr(
27854            any(target_arch = "aarch64", target_arch = "arm64ec"),
27855            link_name = "llvm.aarch64.neon.suqadd.v8i8"
27856        )]
27857        fn _vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t;
27858    }
27859    unsafe { _vuqadd_s8(a, b) }
27860}
27861#[doc = "Signed saturating Accumulate of Unsigned value."]
27862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s8)"]
27863#[inline]
27864#[target_feature(enable = "neon")]
27865#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27866#[cfg_attr(test, assert_instr(suqadd))]
27867pub fn vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
27868    unsafe extern "unadjusted" {
27869        #[cfg_attr(
27870            any(target_arch = "aarch64", target_arch = "arm64ec"),
27871            link_name = "llvm.aarch64.neon.suqadd.v16i8"
27872        )]
27873        fn _vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t;
27874    }
27875    unsafe { _vuqaddq_s8(a, b) }
27876}
27877#[doc = "Signed saturating Accumulate of Unsigned value."]
27878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s16)"]
27879#[inline]
27880#[target_feature(enable = "neon")]
27881#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27882#[cfg_attr(test, assert_instr(suqadd))]
27883pub fn vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t {
27884    unsafe extern "unadjusted" {
27885        #[cfg_attr(
27886            any(target_arch = "aarch64", target_arch = "arm64ec"),
27887            link_name = "llvm.aarch64.neon.suqadd.v4i16"
27888        )]
27889        fn _vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t;
27890    }
27891    unsafe { _vuqadd_s16(a, b) }
27892}
27893#[doc = "Signed saturating Accumulate of Unsigned value."]
27894#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s16)"]
27895#[inline]
27896#[target_feature(enable = "neon")]
27897#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27898#[cfg_attr(test, assert_instr(suqadd))]
27899pub fn vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t {
27900    unsafe extern "unadjusted" {
27901        #[cfg_attr(
27902            any(target_arch = "aarch64", target_arch = "arm64ec"),
27903            link_name = "llvm.aarch64.neon.suqadd.v8i16"
27904        )]
27905        fn _vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t;
27906    }
27907    unsafe { _vuqaddq_s16(a, b) }
27908}
27909#[doc = "Signed saturating Accumulate of Unsigned value."]
27910#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s32)"]
27911#[inline]
27912#[target_feature(enable = "neon")]
27913#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27914#[cfg_attr(test, assert_instr(suqadd))]
27915pub fn vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t {
27916    unsafe extern "unadjusted" {
27917        #[cfg_attr(
27918            any(target_arch = "aarch64", target_arch = "arm64ec"),
27919            link_name = "llvm.aarch64.neon.suqadd.v2i32"
27920        )]
27921        fn _vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t;
27922    }
27923    unsafe { _vuqadd_s32(a, b) }
27924}
27925#[doc = "Signed saturating Accumulate of Unsigned value."]
27926#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s32)"]
27927#[inline]
27928#[target_feature(enable = "neon")]
27929#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27930#[cfg_attr(test, assert_instr(suqadd))]
27931pub fn vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t {
27932    unsafe extern "unadjusted" {
27933        #[cfg_attr(
27934            any(target_arch = "aarch64", target_arch = "arm64ec"),
27935            link_name = "llvm.aarch64.neon.suqadd.v4i32"
27936        )]
27937        fn _vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t;
27938    }
27939    unsafe { _vuqaddq_s32(a, b) }
27940}
27941#[doc = "Signed saturating Accumulate of Unsigned value."]
27942#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s64)"]
27943#[inline]
27944#[target_feature(enable = "neon")]
27945#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27946#[cfg_attr(test, assert_instr(suqadd))]
27947pub fn vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t {
27948    unsafe extern "unadjusted" {
27949        #[cfg_attr(
27950            any(target_arch = "aarch64", target_arch = "arm64ec"),
27951            link_name = "llvm.aarch64.neon.suqadd.v1i64"
27952        )]
27953        fn _vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t;
27954    }
27955    unsafe { _vuqadd_s64(a, b) }
27956}
27957#[doc = "Signed saturating Accumulate of Unsigned value."]
27958#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s64)"]
27959#[inline]
27960#[target_feature(enable = "neon")]
27961#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27962#[cfg_attr(test, assert_instr(suqadd))]
27963pub fn vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t {
27964    unsafe extern "unadjusted" {
27965        #[cfg_attr(
27966            any(target_arch = "aarch64", target_arch = "arm64ec"),
27967            link_name = "llvm.aarch64.neon.suqadd.v2i64"
27968        )]
27969        fn _vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t;
27970    }
27971    unsafe { _vuqaddq_s64(a, b) }
27972}
27973#[doc = "Signed saturating accumulate of unsigned value"]
27974#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddb_s8)"]
27975#[inline]
27976#[target_feature(enable = "neon")]
27977#[cfg_attr(test, assert_instr(suqadd))]
27978#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27979pub fn vuqaddb_s8(a: i8, b: u8) -> i8 {
27980    unsafe { simd_extract!(vuqadd_s8(vdup_n_s8(a), vdup_n_u8(b)), 0) }
27981}
27982#[doc = "Signed saturating accumulate of unsigned value"]
27983#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddh_s16)"]
27984#[inline]
27985#[target_feature(enable = "neon")]
27986#[cfg_attr(test, assert_instr(suqadd))]
27987#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27988pub fn vuqaddh_s16(a: i16, b: u16) -> i16 {
27989    unsafe { simd_extract!(vuqadd_s16(vdup_n_s16(a), vdup_n_u16(b)), 0) }
27990}
27991#[doc = "Signed saturating accumulate of unsigned value"]
27992#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddd_s64)"]
27993#[inline]
27994#[target_feature(enable = "neon")]
27995#[cfg_attr(test, assert_instr(suqadd))]
27996#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27997pub fn vuqaddd_s64(a: i64, b: u64) -> i64 {
27998    unsafe extern "unadjusted" {
27999        #[cfg_attr(
28000            any(target_arch = "aarch64", target_arch = "arm64ec"),
28001            link_name = "llvm.aarch64.neon.suqadd.i64"
28002        )]
28003        fn _vuqaddd_s64(a: i64, b: u64) -> i64;
28004    }
28005    unsafe { _vuqaddd_s64(a, b) }
28006}
28007#[doc = "Signed saturating accumulate of unsigned value"]
28008#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadds_s32)"]
28009#[inline]
28010#[target_feature(enable = "neon")]
28011#[cfg_attr(test, assert_instr(suqadd))]
28012#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28013pub fn vuqadds_s32(a: i32, b: u32) -> i32 {
28014    unsafe extern "unadjusted" {
28015        #[cfg_attr(
28016            any(target_arch = "aarch64", target_arch = "arm64ec"),
28017            link_name = "llvm.aarch64.neon.suqadd.i32"
28018        )]
28019        fn _vuqadds_s32(a: i32, b: u32) -> i32;
28020    }
28021    unsafe { _vuqadds_s32(a, b) }
28022}
28023#[doc = "Dot product index form with unsigned and signed integers"]
28024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_laneq_s32)"]
28025#[inline]
28026#[target_feature(enable = "neon,i8mm")]
28027#[cfg_attr(test, assert_instr(usdot, LANE = 3))]
28028#[rustc_legacy_const_generics(3)]
28029#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")]
28030pub fn vusdot_laneq_s32<const LANE: i32>(a: int32x2_t, b: uint8x8_t, c: int8x16_t) -> int32x2_t {
28031    static_assert_uimm_bits!(LANE, 2);
28032    unsafe {
28033        let c: int32x4_t = transmute(c);
28034        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
28035        vusdot_s32(a, b, transmute(c))
28036    }
28037}
28038#[doc = "Dot product index form with unsigned and signed integers"]
28039#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_laneq_s32)"]
28040#[inline]
28041#[target_feature(enable = "neon,i8mm")]
28042#[cfg_attr(test, assert_instr(usdot, LANE = 3))]
28043#[rustc_legacy_const_generics(3)]
28044#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")]
28045pub fn vusdotq_laneq_s32<const LANE: i32>(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t {
28046    static_assert_uimm_bits!(LANE, 2);
28047    unsafe {
28048        let c: int32x4_t = transmute(c);
28049        let c: int32x4_t =
28050            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
28051        vusdotq_s32(a, b, transmute(c))
28052    }
28053}
28054#[doc = "Unzip vectors"]
28055#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f16)"]
28056#[inline]
28057#[target_feature(enable = "neon,fp16")]
28058#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
28059#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28060pub fn vuzp1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
28061    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28062}
28063#[doc = "Unzip vectors"]
28064#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f16)"]
28065#[inline]
28066#[target_feature(enable = "neon,fp16")]
28067#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
28068#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28069pub fn vuzp1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
28070    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28071}
28072#[doc = "Unzip vectors"]
28073#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f32)"]
28074#[inline]
28075#[target_feature(enable = "neon")]
28076#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28077#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28078pub fn vuzp1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
28079    unsafe { simd_shuffle!(a, b, [0, 2]) }
28080}
28081#[doc = "Unzip vectors"]
28082#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f64)"]
28083#[inline]
28084#[target_feature(enable = "neon")]
28085#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28086#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28087pub fn vuzp1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
28088    unsafe { simd_shuffle!(a, b, [0, 2]) }
28089}
28090#[doc = "Unzip vectors"]
28091#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s32)"]
28092#[inline]
28093#[target_feature(enable = "neon")]
28094#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28095#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28096pub fn vuzp1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
28097    unsafe { simd_shuffle!(a, b, [0, 2]) }
28098}
28099#[doc = "Unzip vectors"]
28100#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s64)"]
28101#[inline]
28102#[target_feature(enable = "neon")]
28103#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28104#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28105pub fn vuzp1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
28106    unsafe { simd_shuffle!(a, b, [0, 2]) }
28107}
28108#[doc = "Unzip vectors"]
28109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u32)"]
28110#[inline]
28111#[target_feature(enable = "neon")]
28112#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28113#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28114pub fn vuzp1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
28115    unsafe { simd_shuffle!(a, b, [0, 2]) }
28116}
28117#[doc = "Unzip vectors"]
28118#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u64)"]
28119#[inline]
28120#[target_feature(enable = "neon")]
28121#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28122#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28123pub fn vuzp1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
28124    unsafe { simd_shuffle!(a, b, [0, 2]) }
28125}
28126#[doc = "Unzip vectors"]
28127#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p64)"]
28128#[inline]
28129#[target_feature(enable = "neon")]
28130#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28131#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28132pub fn vuzp1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
28133    unsafe { simd_shuffle!(a, b, [0, 2]) }
28134}
28135#[doc = "Unzip vectors"]
28136#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f32)"]
28137#[inline]
28138#[target_feature(enable = "neon")]
28139#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28140#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28141pub fn vuzp1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
28142    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28143}
28144#[doc = "Unzip vectors"]
28145#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s8)"]
28146#[inline]
28147#[target_feature(enable = "neon")]
28148#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28149#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28150pub fn vuzp1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
28151    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28152}
28153#[doc = "Unzip vectors"]
28154#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s8)"]
28155#[inline]
28156#[target_feature(enable = "neon")]
28157#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28158#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28159pub fn vuzp1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
28160    unsafe {
28161        simd_shuffle!(
28162            a,
28163            b,
28164            [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
28165        )
28166    }
28167}
28168#[doc = "Unzip vectors"]
28169#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s16)"]
28170#[inline]
28171#[target_feature(enable = "neon")]
28172#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28173#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28174pub fn vuzp1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
28175    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28176}
28177#[doc = "Unzip vectors"]
28178#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s16)"]
28179#[inline]
28180#[target_feature(enable = "neon")]
28181#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28182#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28183pub fn vuzp1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
28184    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28185}
28186#[doc = "Unzip vectors"]
28187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s32)"]
28188#[inline]
28189#[target_feature(enable = "neon")]
28190#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28191#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28192pub fn vuzp1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
28193    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28194}
28195#[doc = "Unzip vectors"]
28196#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u8)"]
28197#[inline]
28198#[target_feature(enable = "neon")]
28199#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28200#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28201pub fn vuzp1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
28202    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28203}
28204#[doc = "Unzip vectors"]
28205#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u8)"]
28206#[inline]
28207#[target_feature(enable = "neon")]
28208#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28209#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28210pub fn vuzp1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
28211    unsafe {
28212        simd_shuffle!(
28213            a,
28214            b,
28215            [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
28216        )
28217    }
28218}
28219#[doc = "Unzip vectors"]
28220#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u16)"]
28221#[inline]
28222#[target_feature(enable = "neon")]
28223#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28224#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28225pub fn vuzp1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
28226    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28227}
28228#[doc = "Unzip vectors"]
28229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u16)"]
28230#[inline]
28231#[target_feature(enable = "neon")]
28232#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28233#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28234pub fn vuzp1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
28235    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28236}
28237#[doc = "Unzip vectors"]
28238#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u32)"]
28239#[inline]
28240#[target_feature(enable = "neon")]
28241#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28242#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28243pub fn vuzp1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
28244    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28245}
28246#[doc = "Unzip vectors"]
28247#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p8)"]
28248#[inline]
28249#[target_feature(enable = "neon")]
28250#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28251#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28252pub fn vuzp1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
28253    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28254}
28255#[doc = "Unzip vectors"]
28256#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p8)"]
28257#[inline]
28258#[target_feature(enable = "neon")]
28259#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28260#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28261pub fn vuzp1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
28262    unsafe {
28263        simd_shuffle!(
28264            a,
28265            b,
28266            [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
28267        )
28268    }
28269}
28270#[doc = "Unzip vectors"]
28271#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p16)"]
28272#[inline]
28273#[target_feature(enable = "neon")]
28274#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28275#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28276pub fn vuzp1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
28277    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28278}
28279#[doc = "Unzip vectors"]
28280#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p16)"]
28281#[inline]
28282#[target_feature(enable = "neon")]
28283#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28284#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28285pub fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
28286    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28287}
28288#[doc = "Unzip vectors"]
28289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f16)"]
28290#[inline]
28291#[target_feature(enable = "neon,fp16")]
28292#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
28293#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28294pub fn vuzp2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
28295    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
28296}
28297#[doc = "Unzip vectors"]
28298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f16)"]
28299#[inline]
28300#[target_feature(enable = "neon,fp16")]
28301#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
28302#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28303pub fn vuzp2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
28304    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
28305}
28306#[doc = "Unzip vectors"]
28307#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f32)"]
28308#[inline]
28309#[target_feature(enable = "neon")]
28310#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28311#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28312pub fn vuzp2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
28313    unsafe { simd_shuffle!(a, b, [1, 3]) }
28314}
28315#[doc = "Unzip vectors"]
28316#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f64)"]
28317#[inline]
28318#[target_feature(enable = "neon")]
28319#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28320#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28321pub fn vuzp2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
28322    unsafe { simd_shuffle!(a, b, [1, 3]) }
28323}
28324#[doc = "Unzip vectors"]
28325#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s32)"]
28326#[inline]
28327#[target_feature(enable = "neon")]
28328#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28329#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28330pub fn vuzp2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
28331    unsafe { simd_shuffle!(a, b, [1, 3]) }
28332}
28333#[doc = "Unzip vectors"]
28334#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s64)"]
28335#[inline]
28336#[target_feature(enable = "neon")]
28337#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28338#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28339pub fn vuzp2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
28340    unsafe { simd_shuffle!(a, b, [1, 3]) }
28341}
28342#[doc = "Unzip vectors"]
28343#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u32)"]
28344#[inline]
28345#[target_feature(enable = "neon")]
28346#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28347#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28348pub fn vuzp2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
28349    unsafe { simd_shuffle!(a, b, [1, 3]) }
28350}
28351#[doc = "Unzip vectors"]
28352#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u64)"]
28353#[inline]
28354#[target_feature(enable = "neon")]
28355#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28356#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28357pub fn vuzp2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
28358    unsafe { simd_shuffle!(a, b, [1, 3]) }
28359}
28360#[doc = "Unzip vectors"]
28361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p64)"]
28362#[inline]
28363#[target_feature(enable = "neon")]
28364#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28365#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28366pub fn vuzp2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
28367    unsafe { simd_shuffle!(a, b, [1, 3]) }
28368}
28369#[doc = "Unzip vectors"]
28370#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f32)"]
28371#[inline]
28372#[target_feature(enable = "neon")]
28373#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28374#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28375pub fn vuzp2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
28376    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
28377}
28378#[doc = "Unzip vectors"]
28379#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s8)"]
28380#[inline]
28381#[target_feature(enable = "neon")]
28382#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28383#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28384pub fn vuzp2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
28385    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
28386}
28387#[doc = "Unzip vectors"]
28388#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s8)"]
28389#[inline]
28390#[target_feature(enable = "neon")]
28391#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28392#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28393pub fn vuzp2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
28394    unsafe {
28395        simd_shuffle!(
28396            a,
28397            b,
28398            [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
28399        )
28400    }
28401}
28402#[doc = "Unzip vectors"]
28403#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s16)"]
28404#[inline]
28405#[target_feature(enable = "neon")]
28406#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28407#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28408pub fn vuzp2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
28409    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
28410}
28411#[doc = "Unzip vectors"]
28412#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s16)"]
28413#[inline]
28414#[target_feature(enable = "neon")]
28415#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28416#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28417pub fn vuzp2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
28418    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
28419}
28420#[doc = "Unzip vectors"]
28421#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s32)"]
28422#[inline]
28423#[target_feature(enable = "neon")]
28424#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28425#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28426pub fn vuzp2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
28427    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
28428}
28429#[doc = "Unzip vectors"]
28430#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u8)"]
28431#[inline]
28432#[target_feature(enable = "neon")]
28433#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28434#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28435pub fn vuzp2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
28436    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
28437}
28438#[doc = "Unzip vectors"]
28439#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u8)"]
28440#[inline]
28441#[target_feature(enable = "neon")]
28442#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28443#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28444pub fn vuzp2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
28445    unsafe {
28446        simd_shuffle!(
28447            a,
28448            b,
28449            [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
28450        )
28451    }
28452}
28453#[doc = "Unzip vectors"]
28454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u16)"]
28455#[inline]
28456#[target_feature(enable = "neon")]
28457#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28458#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28459pub fn vuzp2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
28460    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
28461}
28462#[doc = "Unzip vectors"]
28463#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u16)"]
28464#[inline]
28465#[target_feature(enable = "neon")]
28466#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28467#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28468pub fn vuzp2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
28469    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
28470}
28471#[doc = "Unzip vectors"]
28472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u32)"]
28473#[inline]
28474#[target_feature(enable = "neon")]
28475#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28476#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28477pub fn vuzp2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
28478    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
28479}
28480#[doc = "Unzip vectors"]
28481#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p8)"]
28482#[inline]
28483#[target_feature(enable = "neon")]
28484#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28485#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28486pub fn vuzp2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
28487    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
28488}
28489#[doc = "Unzip vectors"]
28490#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p8)"]
28491#[inline]
28492#[target_feature(enable = "neon")]
28493#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28494#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28495pub fn vuzp2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
28496    unsafe {
28497        simd_shuffle!(
28498            a,
28499            b,
28500            [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
28501        )
28502    }
28503}
28504#[doc = "Unzip vectors"]
28505#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p16)"]
28506#[inline]
28507#[target_feature(enable = "neon")]
28508#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28509#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28510pub fn vuzp2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
28511    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
28512}
28513#[doc = "Unzip vectors"]
28514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p16)"]
28515#[inline]
28516#[target_feature(enable = "neon")]
28517#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28518#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
28519pub fn vuzp2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
28520    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
28521}
28522#[doc = "Exclusive OR and rotate"]
28523#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vxarq_u64)"]
28524#[inline]
28525#[target_feature(enable = "neon,sha3")]
28526#[cfg_attr(test, assert_instr(xar, IMM6 = 0))]
28527#[rustc_legacy_const_generics(2)]
28528#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
28529pub fn vxarq_u64<const IMM6: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
28530    static_assert_uimm_bits!(IMM6, 6);
28531    unsafe extern "unadjusted" {
28532        #[cfg_attr(
28533            any(target_arch = "aarch64", target_arch = "arm64ec"),
28534            link_name = "llvm.aarch64.crypto.xar"
28535        )]
28536        fn _vxarq_u64(a: uint64x2_t, b: uint64x2_t, n: i64) -> uint64x2_t;
28537    }
28538    unsafe { _vxarq_u64(a, b, IMM6 as i64) }
28539}
28540#[doc = "Zip vectors"]
28541#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f16)"]
28542#[inline]
28543#[target_feature(enable = "neon,fp16")]
28544#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
28545#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28546pub fn vzip1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
28547    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
28548}
28549#[doc = "Zip vectors"]
28550#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f16)"]
28551#[inline]
28552#[target_feature(enable = "neon,fp16")]
28553#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
28554#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28555pub fn vzip1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
28556    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
28557}
28558#[doc = "Zip vectors"]
28559#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f32)"]
28560#[inline]
28561#[target_feature(enable = "neon")]
28562#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28563#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28564pub fn vzip1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
28565    unsafe { simd_shuffle!(a, b, [0, 2]) }
28566}
28567#[doc = "Zip vectors"]
28568#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f32)"]
28569#[inline]
28570#[target_feature(enable = "neon")]
28571#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28572#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28573pub fn vzip1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
28574    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
28575}
28576#[doc = "Zip vectors"]
28577#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f64)"]
28578#[inline]
28579#[target_feature(enable = "neon")]
28580#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28581#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28582pub fn vzip1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
28583    unsafe { simd_shuffle!(a, b, [0, 2]) }
28584}
28585#[doc = "Zip vectors"]
28586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s8)"]
28587#[inline]
28588#[target_feature(enable = "neon")]
28589#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28590#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28591pub fn vzip1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
28592    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
28593}
28594#[doc = "Zip vectors"]
28595#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s8)"]
28596#[inline]
28597#[target_feature(enable = "neon")]
28598#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28599#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28600pub fn vzip1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
28601    unsafe {
28602        simd_shuffle!(
28603            a,
28604            b,
28605            [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
28606        )
28607    }
28608}
28609#[doc = "Zip vectors"]
28610#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s16)"]
28611#[inline]
28612#[target_feature(enable = "neon")]
28613#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28614#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28615pub fn vzip1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
28616    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
28617}
28618#[doc = "Zip vectors"]
28619#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s16)"]
28620#[inline]
28621#[target_feature(enable = "neon")]
28622#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28623#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28624pub fn vzip1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
28625    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
28626}
28627#[doc = "Zip vectors"]
28628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s32)"]
28629#[inline]
28630#[target_feature(enable = "neon")]
28631#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28632#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28633pub fn vzip1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
28634    unsafe { simd_shuffle!(a, b, [0, 2]) }
28635}
28636#[doc = "Zip vectors"]
28637#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s32)"]
28638#[inline]
28639#[target_feature(enable = "neon")]
28640#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28641#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28642pub fn vzip1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
28643    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
28644}
28645#[doc = "Zip vectors"]
28646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s64)"]
28647#[inline]
28648#[target_feature(enable = "neon")]
28649#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28650#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28651pub fn vzip1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
28652    unsafe { simd_shuffle!(a, b, [0, 2]) }
28653}
28654#[doc = "Zip vectors"]
28655#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u8)"]
28656#[inline]
28657#[target_feature(enable = "neon")]
28658#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28659#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28660pub fn vzip1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
28661    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
28662}
28663#[doc = "Zip vectors"]
28664#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u8)"]
28665#[inline]
28666#[target_feature(enable = "neon")]
28667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28668#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28669pub fn vzip1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
28670    unsafe {
28671        simd_shuffle!(
28672            a,
28673            b,
28674            [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
28675        )
28676    }
28677}
28678#[doc = "Zip vectors"]
28679#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u16)"]
28680#[inline]
28681#[target_feature(enable = "neon")]
28682#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28683#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28684pub fn vzip1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
28685    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
28686}
28687#[doc = "Zip vectors"]
28688#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u16)"]
28689#[inline]
28690#[target_feature(enable = "neon")]
28691#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28692#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28693pub fn vzip1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
28694    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
28695}
28696#[doc = "Zip vectors"]
28697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u32)"]
28698#[inline]
28699#[target_feature(enable = "neon")]
28700#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28701#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28702pub fn vzip1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
28703    unsafe { simd_shuffle!(a, b, [0, 2]) }
28704}
28705#[doc = "Zip vectors"]
28706#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u32)"]
28707#[inline]
28708#[target_feature(enable = "neon")]
28709#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28710#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28711pub fn vzip1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
28712    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
28713}
28714#[doc = "Zip vectors"]
28715#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u64)"]
28716#[inline]
28717#[target_feature(enable = "neon")]
28718#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28719#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28720pub fn vzip1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
28721    unsafe { simd_shuffle!(a, b, [0, 2]) }
28722}
28723#[doc = "Zip vectors"]
28724#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p8)"]
28725#[inline]
28726#[target_feature(enable = "neon")]
28727#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28728#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28729pub fn vzip1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
28730    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
28731}
28732#[doc = "Zip vectors"]
28733#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p8)"]
28734#[inline]
28735#[target_feature(enable = "neon")]
28736#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28737#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28738pub fn vzip1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
28739    unsafe {
28740        simd_shuffle!(
28741            a,
28742            b,
28743            [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
28744        )
28745    }
28746}
28747#[doc = "Zip vectors"]
28748#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p16)"]
28749#[inline]
28750#[target_feature(enable = "neon")]
28751#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28752#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28753pub fn vzip1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
28754    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
28755}
28756#[doc = "Zip vectors"]
28757#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p16)"]
28758#[inline]
28759#[target_feature(enable = "neon")]
28760#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28761#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28762pub fn vzip1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
28763    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
28764}
28765#[doc = "Zip vectors"]
28766#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p64)"]
28767#[inline]
28768#[target_feature(enable = "neon")]
28769#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28770#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28771pub fn vzip1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
28772    unsafe { simd_shuffle!(a, b, [0, 2]) }
28773}
28774#[doc = "Zip vectors"]
28775#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f16)"]
28776#[inline]
28777#[target_feature(enable = "neon,fp16")]
28778#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
28779#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28780pub fn vzip2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
28781    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
28782}
28783#[doc = "Zip vectors"]
28784#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f16)"]
28785#[inline]
28786#[target_feature(enable = "neon,fp16")]
28787#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
28788#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28789pub fn vzip2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
28790    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
28791}
28792#[doc = "Zip vectors"]
28793#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f32)"]
28794#[inline]
28795#[target_feature(enable = "neon")]
28796#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28797#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28798pub fn vzip2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
28799    unsafe { simd_shuffle!(a, b, [1, 3]) }
28800}
28801#[doc = "Zip vectors"]
28802#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f32)"]
28803#[inline]
28804#[target_feature(enable = "neon")]
28805#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28806#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28807pub fn vzip2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
28808    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
28809}
28810#[doc = "Zip vectors"]
28811#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f64)"]
28812#[inline]
28813#[target_feature(enable = "neon")]
28814#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28815#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28816pub fn vzip2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
28817    unsafe { simd_shuffle!(a, b, [1, 3]) }
28818}
28819#[doc = "Zip vectors"]
28820#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s8)"]
28821#[inline]
28822#[target_feature(enable = "neon")]
28823#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28824#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28825pub fn vzip2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
28826    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
28827}
28828#[doc = "Zip vectors"]
28829#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s8)"]
28830#[inline]
28831#[target_feature(enable = "neon")]
28832#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28833#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28834pub fn vzip2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
28835    unsafe {
28836        simd_shuffle!(
28837            a,
28838            b,
28839            [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
28840        )
28841    }
28842}
28843#[doc = "Zip vectors"]
28844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s16)"]
28845#[inline]
28846#[target_feature(enable = "neon")]
28847#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28848#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28849pub fn vzip2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
28850    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
28851}
28852#[doc = "Zip vectors"]
28853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s16)"]
28854#[inline]
28855#[target_feature(enable = "neon")]
28856#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28857#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28858pub fn vzip2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
28859    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
28860}
28861#[doc = "Zip vectors"]
28862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s32)"]
28863#[inline]
28864#[target_feature(enable = "neon")]
28865#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28866#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28867pub fn vzip2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
28868    unsafe { simd_shuffle!(a, b, [1, 3]) }
28869}
28870#[doc = "Zip vectors"]
28871#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s32)"]
28872#[inline]
28873#[target_feature(enable = "neon")]
28874#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28875#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28876pub fn vzip2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
28877    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
28878}
28879#[doc = "Zip vectors"]
28880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s64)"]
28881#[inline]
28882#[target_feature(enable = "neon")]
28883#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28884#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28885pub fn vzip2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
28886    unsafe { simd_shuffle!(a, b, [1, 3]) }
28887}
28888#[doc = "Zip vectors"]
28889#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u8)"]
28890#[inline]
28891#[target_feature(enable = "neon")]
28892#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28893#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28894pub fn vzip2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
28895    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
28896}
28897#[doc = "Zip vectors"]
28898#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u8)"]
28899#[inline]
28900#[target_feature(enable = "neon")]
28901#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28902#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28903pub fn vzip2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
28904    unsafe {
28905        simd_shuffle!(
28906            a,
28907            b,
28908            [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
28909        )
28910    }
28911}
28912#[doc = "Zip vectors"]
28913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u16)"]
28914#[inline]
28915#[target_feature(enable = "neon")]
28916#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28917#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28918pub fn vzip2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
28919    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
28920}
28921#[doc = "Zip vectors"]
28922#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u16)"]
28923#[inline]
28924#[target_feature(enable = "neon")]
28925#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28926#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28927pub fn vzip2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
28928    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
28929}
28930#[doc = "Zip vectors"]
28931#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u32)"]
28932#[inline]
28933#[target_feature(enable = "neon")]
28934#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28935#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28936pub fn vzip2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
28937    unsafe { simd_shuffle!(a, b, [1, 3]) }
28938}
28939#[doc = "Zip vectors"]
28940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u32)"]
28941#[inline]
28942#[target_feature(enable = "neon")]
28943#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28944#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28945pub fn vzip2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
28946    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
28947}
28948#[doc = "Zip vectors"]
28949#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u64)"]
28950#[inline]
28951#[target_feature(enable = "neon")]
28952#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28953#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28954pub fn vzip2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
28955    unsafe { simd_shuffle!(a, b, [1, 3]) }
28956}
28957#[doc = "Zip vectors"]
28958#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p8)"]
28959#[inline]
28960#[target_feature(enable = "neon")]
28961#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28962#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28963pub fn vzip2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
28964    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
28965}
28966#[doc = "Zip vectors"]
28967#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p8)"]
28968#[inline]
28969#[target_feature(enable = "neon")]
28970#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28971#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28972pub fn vzip2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
28973    unsafe {
28974        simd_shuffle!(
28975            a,
28976            b,
28977            [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
28978        )
28979    }
28980}
28981#[doc = "Zip vectors"]
28982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p16)"]
28983#[inline]
28984#[target_feature(enable = "neon")]
28985#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28986#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28987pub fn vzip2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
28988    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
28989}
28990#[doc = "Zip vectors"]
28991#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p16)"]
28992#[inline]
28993#[target_feature(enable = "neon")]
28994#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28995#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28996pub fn vzip2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
28997    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
28998}
28999#[doc = "Zip vectors"]
29000#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p64)"]
29001#[inline]
29002#[target_feature(enable = "neon")]
29003#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29004#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29005pub fn vzip2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
29006    unsafe { simd_shuffle!(a, b, [1, 3]) }
29007}