diff --git a/crates/core_arch/src/aarch64/neon/generated.rs b/crates/core_arch/src/aarch64/neon/generated.rs index 30512e11bb..4af66039b8 100644 --- a/crates/core_arch/src/aarch64/neon/generated.rs +++ b/crates/core_arch/src/aarch64/neon/generated.rs @@ -27,9 +27,9 @@ pub unsafe fn __crc32cd(crc: u32, data: u64) -> u32 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crc32cx" )] - fn ___crc32cd(crc: i32, data: i64) -> i32; + fn ___crc32cd(crc: u32, data: u64) -> u32; } - ___crc32cd(crc.as_signed(), data.as_signed()).as_unsigned() + ___crc32cd(crc, data) } #[doc = "CRC32 single round checksum for quad words (64 bits)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32d)"] @@ -46,9 +46,9 @@ pub unsafe fn __crc32d(crc: u32, data: u64) -> u32 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crc32x" )] - fn ___crc32d(crc: i32, data: i64) -> i32; + fn ___crc32d(crc: u32, data: u64) -> u32; } - ___crc32d(crc.as_signed(), data.as_signed()).as_unsigned() + ___crc32d(crc, data) } #[doc = "Signed Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s8)"] @@ -507,9 +507,9 @@ pub unsafe fn vaddlv_u16(a: uint16x4_t) -> u32 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlv.i32.v4i16" )] - fn _vaddlv_u16(a: int16x4_t) -> i32; + fn _vaddlv_u16(a: uint16x4_t) -> u32; } - _vaddlv_u16(a.as_signed()).as_unsigned() + _vaddlv_u16(a) } #[doc = "Unsigned Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u16)"] @@ -525,9 +525,9 @@ pub unsafe fn vaddlvq_u16(a: uint16x8_t) -> u32 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlv.i32.v8i16" )] - fn _vaddlvq_u16(a: int16x8_t) -> i32; + fn _vaddlvq_u16(a: uint16x8_t) -> u32; } - _vaddlvq_u16(a.as_signed()).as_unsigned() + _vaddlvq_u16(a) } #[doc = "Unsigned Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u32)"] @@ -543,9 +543,9 @@ pub unsafe fn vaddlvq_u32(a: uint32x4_t) -> u64 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlv.i64.v4i32" )] - fn _vaddlvq_u32(a: int32x4_t) -> i64; + fn _vaddlvq_u32(a: uint32x4_t) -> u64; } - _vaddlvq_u32(a.as_signed()).as_unsigned() + _vaddlvq_u32(a) } #[doc = "Unsigned Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u32)"] @@ -561,9 +561,9 @@ pub unsafe fn vaddlv_u32(a: uint32x2_t) -> u64 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlv.i64.v2i32" )] - fn _vaddlv_u32(a: int32x2_t) -> i64; + fn _vaddlv_u32(a: uint32x2_t) -> u64; } - _vaddlv_u32(a.as_signed()).as_unsigned() + _vaddlv_u32(a) } #[doc = "Unsigned Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u8)"] @@ -579,9 +579,9 @@ pub unsafe fn vaddlv_u8(a: uint8x8_t) -> u16 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlv.i32.v8i8" )] - fn _vaddlv_u8(a: int8x8_t) -> i32; + fn _vaddlv_u8(a: uint8x8_t) -> i32; } - _vaddlv_u8(a.as_signed()).as_unsigned() as u16 + _vaddlv_u8(a) as u16 } #[doc = "Unsigned Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u8)"] @@ -597,9 +597,9 @@ pub unsafe fn vaddlvq_u8(a: uint8x16_t) -> u16 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddlv.i32.v16i8" )] - fn _vaddlvq_u8(a: int8x16_t) -> i32; + fn _vaddlvq_u8(a: uint8x16_t) -> i32; } - _vaddlvq_u8(a.as_signed()).as_unsigned() as u16 + _vaddlvq_u8(a) as u16 } #[doc = "Floating-point add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_f32)"] @@ -777,9 +777,9 @@ pub unsafe fn vaddv_u32(a: uint32x2_t) -> u32 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddv.i32.v2i32" )] - fn _vaddv_u32(a: int32x2_t) -> i32; + fn _vaddv_u32(a: uint32x2_t) -> u32; } - _vaddv_u32(a.as_signed()).as_unsigned() + _vaddv_u32(a) } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u8)"] @@ -795,9 +795,9 @@ pub unsafe fn vaddv_u8(a: uint8x8_t) -> u8 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddv.i32.v8i8" )] - fn _vaddv_u8(a: int8x8_t) -> i8; + fn _vaddv_u8(a: uint8x8_t) -> u8; } - _vaddv_u8(a.as_signed()).as_unsigned() + _vaddv_u8(a) } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u8)"] @@ -813,9 +813,9 @@ pub unsafe fn vaddvq_u8(a: uint8x16_t) -> u8 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddv.i32.v16i8" )] - fn _vaddvq_u8(a: int8x16_t) -> i8; + fn _vaddvq_u8(a: uint8x16_t) -> u8; } - _vaddvq_u8(a.as_signed()).as_unsigned() + _vaddvq_u8(a) } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u16)"] @@ -831,9 +831,9 @@ pub unsafe fn vaddv_u16(a: uint16x4_t) -> u16 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddv.i32.v4i16" )] - fn _vaddv_u16(a: int16x4_t) -> i16; + fn _vaddv_u16(a: uint16x4_t) -> u16; } - _vaddv_u16(a.as_signed()).as_unsigned() + _vaddv_u16(a) } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u16)"] @@ -849,9 +849,9 @@ pub unsafe fn vaddvq_u16(a: uint16x8_t) -> u16 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddv.i32.v8i16" )] - fn _vaddvq_u16(a: int16x8_t) -> i16; + fn _vaddvq_u16(a: uint16x8_t) -> u16; } - _vaddvq_u16(a.as_signed()).as_unsigned() + _vaddvq_u16(a) } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u32)"] @@ -867,9 +867,9 @@ pub unsafe fn vaddvq_u32(a: uint32x4_t) -> u32 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddv.i32.v4i32" )] - fn _vaddvq_u32(a: int32x4_t) -> i32; + fn _vaddvq_u32(a: uint32x4_t) -> u32; } - _vaddvq_u32(a.as_signed()).as_unsigned() + _vaddvq_u32(a) } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s64)"] @@ -903,9 +903,9 @@ pub unsafe fn vaddvq_u64(a: uint64x2_t) -> u64 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uaddv.i64.v2i64" )] - fn _vaddvq_u64(a: int64x2_t) -> i64; + fn _vaddvq_u64(a: uint64x2_t) -> u64; } - _vaddvq_u64(a.as_signed()).as_unsigned() + _vaddvq_u64(a) } #[doc = "Bit clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s8)"] @@ -993,9 +993,9 @@ pub unsafe fn vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16 any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxu.v16i8" )] - fn _vbcaxq_u8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; + fn _vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t; } - _vbcaxq_u8(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() + _vbcaxq_u8(a, b, c) } #[doc = "Bit clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u16)"] @@ -1011,9 +1011,9 @@ pub unsafe fn vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxu.v8i16" )] - fn _vbcaxq_u16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; + fn _vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t; } - _vbcaxq_u16(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() + _vbcaxq_u16(a, b, c) } #[doc = "Bit clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u32)"] @@ -1029,9 +1029,9 @@ pub unsafe fn vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxu.v4i32" )] - fn _vbcaxq_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; + fn _vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t; } - _vbcaxq_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() + _vbcaxq_u32(a, b, c) } #[doc = "Bit clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u64)"] @@ -1047,9 +1047,9 @@ pub unsafe fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.bcaxu.v2i64" )] - fn _vbcaxq_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; + fn _vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t; } - _vbcaxq_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() + _vbcaxq_u64(a, b, c) } #[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f16)"] @@ -1249,9 +1249,9 @@ pub unsafe fn vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facge.v1i64.v1f64" )] - fn _vcage_f64(a: float64x1_t, b: float64x1_t) -> int64x1_t; + fn _vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t; } - _vcage_f64(a, b).as_unsigned() + _vcage_f64(a, b) } #[doc = "Floating-point absolute compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f64)"] @@ -1267,9 +1267,9 @@ pub unsafe fn vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facge.v2i64.v2f64" )] - fn _vcageq_f64(a: float64x2_t, b: float64x2_t) -> int64x2_t; + fn _vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t; } - _vcageq_f64(a, b).as_unsigned() + _vcageq_f64(a, b) } #[doc = "Floating-point absolute compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaged_f64)"] @@ -1285,9 +1285,9 @@ pub unsafe fn vcaged_f64(a: f64, b: f64) -> u64 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facge.i64.f64" )] - fn _vcaged_f64(a: f64, b: f64) -> i64; + fn _vcaged_f64(a: f64, b: f64) -> u64; } - _vcaged_f64(a, b).as_unsigned() + _vcaged_f64(a, b) } #[doc = "Floating-point absolute compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcages_f32)"] @@ -1303,9 +1303,9 @@ pub unsafe fn vcages_f32(a: f32, b: f32) -> u32 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facge.i32.f32" )] - fn _vcages_f32(a: f32, b: f32) -> i32; + fn _vcages_f32(a: f32, b: f32) -> u32; } - _vcages_f32(a, b).as_unsigned() + _vcages_f32(a, b) } #[doc = "Floating-point absolute compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageh_f16)"] @@ -1323,7 +1323,7 @@ pub unsafe fn vcageh_f16(a: f16, b: f16) -> u16 { )] fn _vcageh_f16(a: f16, b: f16) -> i32; } - _vcageh_f16(a, b).as_unsigned() as u16 + _vcageh_f16(a, b) as u16 } #[doc = "Floating-point absolute compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f64)"] @@ -1339,9 +1339,9 @@ pub unsafe fn vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facgt.v1i64.v1f64" )] - fn _vcagt_f64(a: float64x1_t, b: float64x1_t) -> int64x1_t; + fn _vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t; } - _vcagt_f64(a, b).as_unsigned() + _vcagt_f64(a, b) } #[doc = "Floating-point absolute compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f64)"] @@ -1357,9 +1357,9 @@ pub unsafe fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facgt.v2i64.v2f64" )] - fn _vcagtq_f64(a: float64x2_t, b: float64x2_t) -> int64x2_t; + fn _vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t; } - _vcagtq_f64(a, b).as_unsigned() + _vcagtq_f64(a, b) } #[doc = "Floating-point absolute compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtd_f64)"] @@ -1375,9 +1375,9 @@ pub unsafe fn vcagtd_f64(a: f64, b: f64) -> u64 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facgt.i64.f64" )] - fn _vcagtd_f64(a: f64, b: f64) -> i64; + fn _vcagtd_f64(a: f64, b: f64) -> u64; } - _vcagtd_f64(a, b).as_unsigned() + _vcagtd_f64(a, b) } #[doc = "Floating-point absolute compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagts_f32)"] @@ -1393,9 +1393,9 @@ pub unsafe fn vcagts_f32(a: f32, b: f32) -> u32 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facgt.i32.f32" )] - fn _vcagts_f32(a: f32, b: f32) -> i32; + fn _vcagts_f32(a: f32, b: f32) -> u32; } - _vcagts_f32(a, b).as_unsigned() + _vcagts_f32(a, b) } #[doc = "Floating-point absolute compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagth_f16)"] @@ -1413,7 +1413,7 @@ pub unsafe fn vcagth_f16(a: f16, b: f16) -> u16 { )] fn _vcagth_f16(a: f16, b: f16) -> i32; } - _vcagth_f16(a, b).as_unsigned() as u16 + _vcagth_f16(a, b) as u16 } #[doc = "Floating-point absolute compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f64)"] @@ -7799,9 +7799,9 @@ pub unsafe fn vcvt_n_f64_u64(a: uint64x1_t) -> float64x1_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxu2fp.v1f64.v1i64" )] - fn _vcvt_n_f64_u64(a: int64x1_t, n: i32) -> float64x1_t; + fn _vcvt_n_f64_u64(a: uint64x1_t, n: i32) -> float64x1_t; } - _vcvt_n_f64_u64(a.as_signed(), N) + _vcvt_n_f64_u64(a, N) } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_u64)"] @@ -7819,9 +7819,9 @@ pub unsafe fn vcvtq_n_f64_u64(a: uint64x2_t) -> float64x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64" )] - fn _vcvtq_n_f64_u64(a: int64x2_t, n: i32) -> float64x2_t; + fn _vcvtq_n_f64_u64(a: uint64x2_t, n: i32) -> float64x2_t; } - _vcvtq_n_f64_u64(a.as_signed(), N) + _vcvtq_n_f64_u64(a, N) } #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s64_f64)"] @@ -7879,9 +7879,9 @@ pub unsafe fn vcvt_n_u64_f64(a: float64x1_t) -> uint64x1_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxu.v1i64.v1f64" )] - fn _vcvt_n_u64_f64(a: float64x1_t, n: i32) -> int64x1_t; + fn _vcvt_n_u64_f64(a: float64x1_t, n: i32) -> uint64x1_t; } - _vcvt_n_u64_f64(a, N).as_unsigned() + _vcvt_n_u64_f64(a, N) } #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u64_f64)"] @@ -7899,9 +7899,9 @@ pub unsafe fn vcvtq_n_u64_f64(a: float64x2_t) -> uint64x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i64.v2f64" )] - fn _vcvtq_n_u64_f64(a: float64x2_t, n: i32) -> int64x2_t; + fn _vcvtq_n_u64_f64(a: float64x2_t, n: i32) -> uint64x2_t; } - _vcvtq_n_u64_f64(a, N).as_unsigned() + _vcvtq_n_u64_f64(a, N) } #[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s64_f64)"] @@ -7953,9 +7953,9 @@ pub unsafe fn vcvt_u64_f64(a: float64x1_t) -> uint64x1_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fptoui.sat.v1i64.v1f64" )] - fn _vcvt_u64_f64(a: float64x1_t) -> int64x1_t; + fn _vcvt_u64_f64(a: float64x1_t) -> uint64x1_t; } - _vcvt_u64_f64(a).as_unsigned() + _vcvt_u64_f64(a) } #[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u64_f64)"] @@ -7971,9 +7971,9 @@ pub unsafe fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fptoui.sat.v2i64.v2f64" )] - fn _vcvtq_u64_f64(a: float64x2_t) -> int64x2_t; + fn _vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t; } - _vcvtq_u64_f64(a).as_unsigned() + _vcvtq_u64_f64(a) } #[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s16_f16)"] @@ -8097,9 +8097,9 @@ pub unsafe fn vcvta_u16_f16(a: float16x4_t) -> uint16x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtau.v4i16.v4f16" )] - fn _vcvta_u16_f16(a: float16x4_t) -> int16x4_t; + fn _vcvta_u16_f16(a: float16x4_t) -> uint16x4_t; } - _vcvta_u16_f16(a).as_unsigned() + _vcvta_u16_f16(a) } #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u16_f16)"] @@ -8115,9 +8115,9 @@ pub unsafe fn vcvtaq_u16_f16(a: float16x8_t) -> uint16x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtau.v8i16.v8f16" )] - fn _vcvtaq_u16_f16(a: float16x8_t) -> int16x8_t; + fn _vcvtaq_u16_f16(a: float16x8_t) -> uint16x8_t; } - _vcvtaq_u16_f16(a).as_unsigned() + _vcvtaq_u16_f16(a) } #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u32_f32)"] @@ -8133,9 +8133,9 @@ pub unsafe fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtau.v2i32.v2f32" )] - fn _vcvta_u32_f32(a: float32x2_t) -> int32x2_t; + fn _vcvta_u32_f32(a: float32x2_t) -> uint32x2_t; } - _vcvta_u32_f32(a).as_unsigned() + _vcvta_u32_f32(a) } #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u32_f32)"] @@ -8151,9 +8151,9 @@ pub unsafe fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtau.v4i32.v4f32" )] - fn _vcvtaq_u32_f32(a: float32x4_t) -> int32x4_t; + fn _vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t; } - _vcvtaq_u32_f32(a).as_unsigned() + _vcvtaq_u32_f32(a) } #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u64_f64)"] @@ -8169,9 +8169,9 @@ pub unsafe fn vcvta_u64_f64(a: float64x1_t) -> uint64x1_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtau.v1i64.v1f64" )] - fn _vcvta_u64_f64(a: float64x1_t) -> int64x1_t; + fn _vcvta_u64_f64(a: float64x1_t) -> uint64x1_t; } - _vcvta_u64_f64(a).as_unsigned() + _vcvta_u64_f64(a) } #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u64_f64)"] @@ -8187,9 +8187,9 @@ pub unsafe fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtau.v2i64.v2f64" )] - fn _vcvtaq_u64_f64(a: float64x2_t) -> int64x2_t; + fn _vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t; } - _vcvtaq_u64_f64(a).as_unsigned() + _vcvtaq_u64_f64(a) } #[doc = "Floating-point convert to integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s16_f16)"] @@ -8263,9 +8263,9 @@ pub unsafe fn vcvtah_u32_f16(a: f16) -> u32 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtau.i32.f16" )] - fn _vcvtah_u32_f16(a: f16) -> i32; + fn _vcvtah_u32_f16(a: f16) -> u32; } - _vcvtah_u32_f16(a).as_unsigned() + _vcvtah_u32_f16(a) } #[doc = "Floating-point convert to integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u64_f16)"] @@ -8281,9 +8281,9 @@ pub unsafe fn vcvtah_u64_f16(a: f16) -> u64 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtau.i64.f16" )] - fn _vcvtah_u64_f16(a: f16) -> i64; + fn _vcvtah_u64_f16(a: f16) -> u64; } - _vcvtah_u64_f16(a).as_unsigned() + _vcvtah_u64_f16(a) } #[doc = "Floating-point convert to integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_s32_f32)"] @@ -8335,9 +8335,9 @@ pub unsafe fn vcvtas_u32_f32(a: f32) -> u32 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtau.i32.f32" )] - fn _vcvtas_u32_f32(a: f32) -> i32; + fn _vcvtas_u32_f32(a: f32) -> u32; } - _vcvtas_u32_f32(a).as_unsigned() + _vcvtas_u32_f32(a) } #[doc = "Floating-point convert to integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_u64_f64)"] @@ -8353,9 +8353,9 @@ pub unsafe fn vcvtad_u64_f64(a: f64) -> u64 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtau.i64.f64" )] - fn _vcvtad_u64_f64(a: f64) -> i64; + fn _vcvtad_u64_f64(a: f64) -> u64; } - _vcvtad_u64_f64(a).as_unsigned() + _vcvtad_u64_f64(a) } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_s64)"] @@ -8527,9 +8527,9 @@ pub unsafe fn vcvth_n_f16_u32(a: u32) -> f16 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxu2fp.f16.i32" )] - fn _vcvth_n_f16_u32(a: i32, n: i32) -> f16; + fn _vcvth_n_f16_u32(a: u32, n: i32) -> f16; } - _vcvth_n_f16_u32(a.as_signed(), N) + _vcvth_n_f16_u32(a, N) } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u64)"] @@ -8547,9 +8547,9 @@ pub unsafe fn vcvth_n_f16_u64(a: u64) -> f16 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxu2fp.f16.i64" )] - fn _vcvth_n_f16_u64(a: i64, n: i32) -> f16; + fn _vcvth_n_f16_u64(a: u64, n: i32) -> f16; } - _vcvth_n_f16_u64(a.as_signed(), N) + _vcvth_n_f16_u64(a, N) } #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s16_f16)"] @@ -8633,9 +8633,9 @@ pub unsafe fn vcvth_n_u32_f16(a: f16) -> u32 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f16" )] - fn _vcvth_n_u32_f16(a: f16, n: i32) -> i32; + fn _vcvth_n_u32_f16(a: f16, n: i32) -> u32; } - _vcvth_n_u32_f16(a, N).as_unsigned() + _vcvth_n_u32_f16(a, N) } #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u64_f16)"] @@ -8653,9 +8653,9 @@ pub unsafe fn vcvth_n_u64_f16(a: f16) -> u64 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f16" )] - fn _vcvth_n_u64_f16(a: f16, n: i32) -> i64; + fn _vcvth_n_u64_f16(a: f16, n: i32) -> u64; } - _vcvth_n_u64_f16(a, N).as_unsigned() + _vcvth_n_u64_f16(a, N) } #[doc = "Floating-point convert to signed fixed-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s16_f16)"] @@ -8845,9 +8845,9 @@ pub unsafe fn vcvtm_u16_f16(a: float16x4_t) -> uint16x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtmu.v4i16.v4f16" )] - fn _vcvtm_u16_f16(a: float16x4_t) -> int16x4_t; + fn _vcvtm_u16_f16(a: float16x4_t) -> uint16x4_t; } - _vcvtm_u16_f16(a).as_unsigned() + _vcvtm_u16_f16(a) } #[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u16_f16)"] @@ -8863,9 +8863,9 @@ pub unsafe fn vcvtmq_u16_f16(a: float16x8_t) -> uint16x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtmu.v8i16.v8f16" )] - fn _vcvtmq_u16_f16(a: float16x8_t) -> int16x8_t; + fn _vcvtmq_u16_f16(a: float16x8_t) -> uint16x8_t; } - _vcvtmq_u16_f16(a).as_unsigned() + _vcvtmq_u16_f16(a) } #[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u32_f32)"] @@ -8881,9 +8881,9 @@ pub unsafe fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtmu.v2i32.v2f32" )] - fn _vcvtm_u32_f32(a: float32x2_t) -> int32x2_t; + fn _vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t; } - _vcvtm_u32_f32(a).as_unsigned() + _vcvtm_u32_f32(a) } #[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u32_f32)"] @@ -8899,9 +8899,9 @@ pub unsafe fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtmu.v4i32.v4f32" )] - fn _vcvtmq_u32_f32(a: float32x4_t) -> int32x4_t; + fn _vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t; } - _vcvtmq_u32_f32(a).as_unsigned() + _vcvtmq_u32_f32(a) } #[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u64_f64)"] @@ -8917,9 +8917,9 @@ pub unsafe fn vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtmu.v1i64.v1f64" )] - fn _vcvtm_u64_f64(a: float64x1_t) -> int64x1_t; + fn _vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t; } - _vcvtm_u64_f64(a).as_unsigned() + _vcvtm_u64_f64(a) } #[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u64_f64)"] @@ -8935,9 +8935,9 @@ pub unsafe fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtmu.v2i64.v2f64" )] - fn _vcvtmq_u64_f64(a: float64x2_t) -> int64x2_t; + fn _vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t; } - _vcvtmq_u64_f64(a).as_unsigned() + _vcvtmq_u64_f64(a) } #[doc = "Floating-point convert to integer, rounding towards minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s16_f16)"] @@ -9011,9 +9011,9 @@ pub unsafe fn vcvtmh_u32_f16(a: f16) -> u32 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtmu.i32.f16" )] - fn _vcvtmh_u32_f16(a: f16) -> i32; + fn _vcvtmh_u32_f16(a: f16) -> u32; } - _vcvtmh_u32_f16(a).as_unsigned() + _vcvtmh_u32_f16(a) } #[doc = "Floating-point convert to unsigned integer, rounding towards minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u64_f16)"] @@ -9029,9 +9029,9 @@ pub unsafe fn vcvtmh_u64_f16(a: f16) -> u64 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtmu.i64.f16" )] - fn _vcvtmh_u64_f16(a: f16) -> i64; + fn _vcvtmh_u64_f16(a: f16) -> u64; } - _vcvtmh_u64_f16(a).as_unsigned() + _vcvtmh_u64_f16(a) } #[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_s32_f32)"] @@ -9083,9 +9083,9 @@ pub unsafe fn vcvtms_u32_f32(a: f32) -> u32 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtmu.i32.f32" )] - fn _vcvtms_u32_f32(a: f32) -> i32; + fn _vcvtms_u32_f32(a: f32) -> u32; } - _vcvtms_u32_f32(a).as_unsigned() + _vcvtms_u32_f32(a) } #[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_u64_f64)"] @@ -9101,9 +9101,9 @@ pub unsafe fn vcvtmd_u64_f64(a: f64) -> u64 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtmu.i64.f64" )] - fn _vcvtmd_u64_f64(a: f64) -> i64; + fn _vcvtmd_u64_f64(a: f64) -> u64; } - _vcvtmd_u64_f64(a).as_unsigned() + _vcvtmd_u64_f64(a) } #[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s16_f16)"] @@ -9227,9 +9227,9 @@ pub unsafe fn vcvtn_u16_f16(a: float16x4_t) -> uint16x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtnu.v4i16.v4f16" )] - fn _vcvtn_u16_f16(a: float16x4_t) -> int16x4_t; + fn _vcvtn_u16_f16(a: float16x4_t) -> uint16x4_t; } - _vcvtn_u16_f16(a).as_unsigned() + _vcvtn_u16_f16(a) } #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u16_f16)"] @@ -9245,9 +9245,9 @@ pub unsafe fn vcvtnq_u16_f16(a: float16x8_t) -> uint16x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtnu.v8i16.v8f16" )] - fn _vcvtnq_u16_f16(a: float16x8_t) -> int16x8_t; + fn _vcvtnq_u16_f16(a: float16x8_t) -> uint16x8_t; } - _vcvtnq_u16_f16(a).as_unsigned() + _vcvtnq_u16_f16(a) } #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u32_f32)"] @@ -9263,9 +9263,9 @@ pub unsafe fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtnu.v2i32.v2f32" )] - fn _vcvtn_u32_f32(a: float32x2_t) -> int32x2_t; + fn _vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t; } - _vcvtn_u32_f32(a).as_unsigned() + _vcvtn_u32_f32(a) } #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u32_f32)"] @@ -9281,9 +9281,9 @@ pub unsafe fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtnu.v4i32.v4f32" )] - fn _vcvtnq_u32_f32(a: float32x4_t) -> int32x4_t; + fn _vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t; } - _vcvtnq_u32_f32(a).as_unsigned() + _vcvtnq_u32_f32(a) } #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u64_f64)"] @@ -9299,9 +9299,9 @@ pub unsafe fn vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtnu.v1i64.v1f64" )] - fn _vcvtn_u64_f64(a: float64x1_t) -> int64x1_t; + fn _vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t; } - _vcvtn_u64_f64(a).as_unsigned() + _vcvtn_u64_f64(a) } #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u64_f64)"] @@ -9317,9 +9317,9 @@ pub unsafe fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtnu.v2i64.v2f64" )] - fn _vcvtnq_u64_f64(a: float64x2_t) -> int64x2_t; + fn _vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t; } - _vcvtnq_u64_f64(a).as_unsigned() + _vcvtnq_u64_f64(a) } #[doc = "Floating-point convert to integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s16_f16)"] @@ -9393,9 +9393,9 @@ pub unsafe fn vcvtnh_u32_f16(a: f16) -> u32 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtnu.i32.f16" )] - fn _vcvtnh_u32_f16(a: f16) -> i32; + fn _vcvtnh_u32_f16(a: f16) -> u32; } - _vcvtnh_u32_f16(a).as_unsigned() + _vcvtnh_u32_f16(a) } #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u64_f16)"] @@ -9411,9 +9411,9 @@ pub unsafe fn vcvtnh_u64_f16(a: f16) -> u64 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtnu.i64.f16" )] - fn _vcvtnh_u64_f16(a: f16) -> i64; + fn _vcvtnh_u64_f16(a: f16) -> u64; } - _vcvtnh_u64_f16(a).as_unsigned() + _vcvtnh_u64_f16(a) } #[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_s32_f32)"] @@ -9465,9 +9465,9 @@ pub unsafe fn vcvtns_u32_f32(a: f32) -> u32 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtnu.i32.f32" )] - fn _vcvtns_u32_f32(a: f32) -> i32; + fn _vcvtns_u32_f32(a: f32) -> u32; } - _vcvtns_u32_f32(a).as_unsigned() + _vcvtns_u32_f32(a) } #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_u64_f64)"] @@ -9483,9 +9483,9 @@ pub unsafe fn vcvtnd_u64_f64(a: f64) -> u64 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtnu.i64.f64" )] - fn _vcvtnd_u64_f64(a: f64) -> i64; + fn _vcvtnd_u64_f64(a: f64) -> u64; } - _vcvtnd_u64_f64(a).as_unsigned() + _vcvtnd_u64_f64(a) } #[doc = "Floating-point convert to signed integer, rounding to plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s16_f16)"] @@ -9609,9 +9609,9 @@ pub unsafe fn vcvtp_u16_f16(a: float16x4_t) -> uint16x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtpu.v4i16.v4f16" )] - fn _vcvtp_u16_f16(a: float16x4_t) -> int16x4_t; + fn _vcvtp_u16_f16(a: float16x4_t) -> uint16x4_t; } - _vcvtp_u16_f16(a).as_unsigned() + _vcvtp_u16_f16(a) } #[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u16_f16)"] @@ -9627,9 +9627,9 @@ pub unsafe fn vcvtpq_u16_f16(a: float16x8_t) -> uint16x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtpu.v8i16.v8f16" )] - fn _vcvtpq_u16_f16(a: float16x8_t) -> int16x8_t; + fn _vcvtpq_u16_f16(a: float16x8_t) -> uint16x8_t; } - _vcvtpq_u16_f16(a).as_unsigned() + _vcvtpq_u16_f16(a) } #[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u32_f32)"] @@ -9645,9 +9645,9 @@ pub unsafe fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtpu.v2i32.v2f32" )] - fn _vcvtp_u32_f32(a: float32x2_t) -> int32x2_t; + fn _vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t; } - _vcvtp_u32_f32(a).as_unsigned() + _vcvtp_u32_f32(a) } #[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u32_f32)"] @@ -9663,9 +9663,9 @@ pub unsafe fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtpu.v4i32.v4f32" )] - fn _vcvtpq_u32_f32(a: float32x4_t) -> int32x4_t; + fn _vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t; } - _vcvtpq_u32_f32(a).as_unsigned() + _vcvtpq_u32_f32(a) } #[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u64_f64)"] @@ -9681,9 +9681,9 @@ pub unsafe fn vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtpu.v1i64.v1f64" )] - fn _vcvtp_u64_f64(a: float64x1_t) -> int64x1_t; + fn _vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t; } - _vcvtp_u64_f64(a).as_unsigned() + _vcvtp_u64_f64(a) } #[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u64_f64)"] @@ -9699,9 +9699,9 @@ pub unsafe fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtpu.v2i64.v2f64" )] - fn _vcvtpq_u64_f64(a: float64x2_t) -> int64x2_t; + fn _vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t; } - _vcvtpq_u64_f64(a).as_unsigned() + _vcvtpq_u64_f64(a) } #[doc = "Floating-point convert to integer, rounding to plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s16_f16)"] @@ -9775,9 +9775,9 @@ pub unsafe fn vcvtph_u32_f16(a: f16) -> u32 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtpu.i32.f16" )] - fn _vcvtph_u32_f16(a: f16) -> i32; + fn _vcvtph_u32_f16(a: f16) -> u32; } - _vcvtph_u32_f16(a).as_unsigned() + _vcvtph_u32_f16(a) } #[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u64_f16)"] @@ -9793,9 +9793,9 @@ pub unsafe fn vcvtph_u64_f16(a: f16) -> u64 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtpu.i64.f16" )] - fn _vcvtph_u64_f16(a: f16) -> i64; + fn _vcvtph_u64_f16(a: f16) -> u64; } - _vcvtph_u64_f16(a).as_unsigned() + _vcvtph_u64_f16(a) } #[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_s32_f32)"] @@ -9847,9 +9847,9 @@ pub unsafe fn vcvtps_u32_f32(a: f32) -> u32 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtpu.i32.f32" )] - fn _vcvtps_u32_f32(a: f32) -> i32; + fn _vcvtps_u32_f32(a: f32) -> u32; } - _vcvtps_u32_f32(a).as_unsigned() + _vcvtps_u32_f32(a) } #[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_u64_f64)"] @@ -9865,9 +9865,9 @@ pub unsafe fn vcvtpd_u64_f64(a: f64) -> u64 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.fcvtpu.i64.f64" )] - fn _vcvtpd_u64_f64(a: f64) -> i64; + fn _vcvtpd_u64_f64(a: f64) -> u64; } - _vcvtpd_u64_f64(a).as_unsigned() + _vcvtpd_u64_f64(a) } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_u32)"] @@ -9947,9 +9947,9 @@ pub unsafe fn vcvts_n_f32_u32(a: u32) -> f32 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxu2fp.f32.i32" )] - fn _vcvts_n_f32_u32(a: i32, n: i32) -> f32; + fn _vcvts_n_f32_u32(a: u32, n: i32) -> f32; } - _vcvts_n_f32_u32(a.as_signed(), N) + _vcvts_n_f32_u32(a, N) } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_u64)"] @@ -9967,9 +9967,9 @@ pub unsafe fn vcvtd_n_f64_u64(a: u64) -> f64 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxu2fp.f64.i64" )] - fn _vcvtd_n_f64_u64(a: i64, n: i32) -> f64; + fn _vcvtd_n_f64_u64(a: u64, n: i32) -> f64; } - _vcvtd_n_f64_u64(a.as_signed(), N) + _vcvtd_n_f64_u64(a, N) } #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_s32_f32)"] @@ -10027,9 +10027,9 @@ pub unsafe fn vcvts_n_u32_f32(a: f32) -> u32 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f32" )] - fn _vcvts_n_u32_f32(a: f32, n: i32) -> i32; + fn _vcvts_n_u32_f32(a: f32, n: i32) -> u32; } - _vcvts_n_u32_f32(a, N).as_unsigned() + _vcvts_n_u32_f32(a, N) } #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_u64_f64)"] @@ -10047,9 +10047,9 @@ pub unsafe fn vcvtd_n_u64_f64(a: f64) -> u64 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f64" )] - fn _vcvtd_n_u64_f64(a: f64, n: i32) -> i64; + fn _vcvtd_n_u64_f64(a: f64, n: i32) -> u64; } - _vcvtd_n_u64_f64(a, N).as_unsigned() + _vcvtd_n_u64_f64(a, N) } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_s32_f32)"] @@ -10816,9 +10816,9 @@ pub unsafe fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16 any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3u.v16i8" )] - fn _veor3q_u8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; + fn _veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t; } - _veor3q_u8(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() + _veor3q_u8(a, b, c) } #[doc = "Three-way exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u16)"] @@ -10834,9 +10834,9 @@ pub unsafe fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3u.v8i16" )] - fn _veor3q_u16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; + fn _veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t; } - _veor3q_u16(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() + _veor3q_u16(a, b, c) } #[doc = "Three-way exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u32)"] @@ -10852,9 +10852,9 @@ pub unsafe fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3u.v4i32" )] - fn _veor3q_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; + fn _veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t; } - _veor3q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() + _veor3q_u32(a, b, c) } #[doc = "Three-way exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u64)"] @@ -10870,9 +10870,9 @@ pub unsafe fn veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.eor3u.v2i64" )] - fn _veor3q_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; + fn _veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t; } - _veor3q_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() + _veor3q_u64(a, b, c) } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f64)"] @@ -14207,9 +14207,9 @@ pub unsafe fn vmaxv_u8(a: uint8x8_t) -> u8 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umaxv.i8.v8i8" )] - fn _vmaxv_u8(a: int8x8_t) -> i8; + fn _vmaxv_u8(a: uint8x8_t) -> u8; } - _vmaxv_u8(a.as_signed()).as_unsigned() + _vmaxv_u8(a) } #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u8)"] @@ -14225,9 +14225,9 @@ pub unsafe fn vmaxvq_u8(a: uint8x16_t) -> u8 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umaxv.i8.v16i8" )] - fn _vmaxvq_u8(a: int8x16_t) -> i8; + fn _vmaxvq_u8(a: uint8x16_t) -> u8; } - _vmaxvq_u8(a.as_signed()).as_unsigned() + _vmaxvq_u8(a) } #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u16)"] @@ -14243,9 +14243,9 @@ pub unsafe fn vmaxv_u16(a: uint16x4_t) -> u16 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umaxv.i16.v4i16" )] - fn _vmaxv_u16(a: int16x4_t) -> i16; + fn _vmaxv_u16(a: uint16x4_t) -> u16; } - _vmaxv_u16(a.as_signed()).as_unsigned() + _vmaxv_u16(a) } #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u16)"] @@ -14261,9 +14261,9 @@ pub unsafe fn vmaxvq_u16(a: uint16x8_t) -> u16 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umaxv.i16.v8i16" )] - fn _vmaxvq_u16(a: int16x8_t) -> i16; + fn _vmaxvq_u16(a: uint16x8_t) -> u16; } - _vmaxvq_u16(a.as_signed()).as_unsigned() + _vmaxvq_u16(a) } #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u32)"] @@ -14279,9 +14279,9 @@ pub unsafe fn vmaxv_u32(a: uint32x2_t) -> u32 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umaxv.i32.v2i32" )] - fn _vmaxv_u32(a: int32x2_t) -> i32; + fn _vmaxv_u32(a: uint32x2_t) -> u32; } - _vmaxv_u32(a.as_signed()).as_unsigned() + _vmaxv_u32(a) } #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u32)"] @@ -14297,9 +14297,9 @@ pub unsafe fn vmaxvq_u32(a: uint32x4_t) -> u32 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umaxv.i32.v4i32" )] - fn _vmaxvq_u32(a: int32x4_t) -> i32; + fn _vmaxvq_u32(a: uint32x4_t) -> u32; } - _vmaxvq_u32(a.as_signed()).as_unsigned() + _vmaxvq_u32(a) } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f64)"] @@ -14711,9 +14711,9 @@ pub unsafe fn vminv_u8(a: uint8x8_t) -> u8 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uminv.i8.v8i8" )] - fn _vminv_u8(a: int8x8_t) -> i8; + fn _vminv_u8(a: uint8x8_t) -> u8; } - _vminv_u8(a.as_signed()).as_unsigned() + _vminv_u8(a) } #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u8)"] @@ -14729,9 +14729,9 @@ pub unsafe fn vminvq_u8(a: uint8x16_t) -> u8 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uminv.i8.v16i8" )] - fn _vminvq_u8(a: int8x16_t) -> i8; + fn _vminvq_u8(a: uint8x16_t) -> u8; } - _vminvq_u8(a.as_signed()).as_unsigned() + _vminvq_u8(a) } #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u16)"] @@ -14747,9 +14747,9 @@ pub unsafe fn vminv_u16(a: uint16x4_t) -> u16 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uminv.i16.v4i16" )] - fn _vminv_u16(a: int16x4_t) -> i16; + fn _vminv_u16(a: uint16x4_t) -> u16; } - _vminv_u16(a.as_signed()).as_unsigned() + _vminv_u16(a) } #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u16)"] @@ -14765,9 +14765,9 @@ pub unsafe fn vminvq_u16(a: uint16x8_t) -> u16 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uminv.i16.v8i16" )] - fn _vminvq_u16(a: int16x8_t) -> i16; + fn _vminvq_u16(a: uint16x8_t) -> u16; } - _vminvq_u16(a.as_signed()).as_unsigned() + _vminvq_u16(a) } #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u32)"] @@ -14783,9 +14783,9 @@ pub unsafe fn vminv_u32(a: uint32x2_t) -> u32 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uminv.i32.v2i32" )] - fn _vminv_u32(a: int32x2_t) -> i32; + fn _vminv_u32(a: uint32x2_t) -> u32; } - _vminv_u32(a.as_signed()).as_unsigned() + _vminv_u32(a) } #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u32)"] @@ -14801,9 +14801,9 @@ pub unsafe fn vminvq_u32(a: uint32x4_t) -> u32 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uminv.i32.v4i32" )] - fn _vminvq_u32(a: int32x4_t) -> i32; + fn _vminvq_u32(a: uint32x4_t) -> u32; } - _vminvq_u32(a.as_signed()).as_unsigned() + _vminvq_u32(a) } #[doc = "Floating-point multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f64)"] @@ -17351,9 +17351,9 @@ pub unsafe fn vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umaxp.v16i8" )] - fn _vpmaxq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + fn _vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; } - _vpmaxq_u8(a.as_signed(), b.as_signed()).as_unsigned() + _vpmaxq_u8(a, b) } #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u16)"] @@ -17369,9 +17369,9 @@ pub unsafe fn vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umaxp.v8i16" )] - fn _vpmaxq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + fn _vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; } - _vpmaxq_u16(a.as_signed(), b.as_signed()).as_unsigned() + _vpmaxq_u16(a, b) } #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u32)"] @@ -17387,9 +17387,9 @@ pub unsafe fn vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umaxp.v4i32" )] - fn _vpmaxq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + fn _vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; } - _vpmaxq_u32(a.as_signed(), b.as_signed()).as_unsigned() + _vpmaxq_u32(a, b) } #[doc = "Floating-point maximum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxqd_f64)"] @@ -17693,9 +17693,9 @@ pub unsafe fn vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uminp.v16i8" )] - fn _vpminq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + fn _vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; } - _vpminq_u8(a.as_signed(), b.as_signed()).as_unsigned() + _vpminq_u8(a, b) } #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u16)"] @@ -17711,9 +17711,9 @@ pub unsafe fn vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uminp.v8i16" )] - fn _vpminq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + fn _vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; } - _vpminq_u16(a.as_signed(), b.as_signed()).as_unsigned() + _vpminq_u16(a, b) } #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u32)"] @@ -17729,9 +17729,9 @@ pub unsafe fn vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uminp.v4i32" )] - fn _vpminq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + fn _vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; } - _vpminq_u32(a.as_signed(), b.as_signed()).as_unsigned() + _vpminq_u32(a, b) } #[doc = "Floating-point minimum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminqd_f64)"] @@ -17965,9 +17965,9 @@ pub unsafe fn vqadds_u32(a: u32, b: u32) -> u32 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqadd.i32" )] - fn _vqadds_u32(a: i32, b: i32) -> i32; + fn _vqadds_u32(a: u32, b: u32) -> u32; } - _vqadds_u32(a.as_signed(), b.as_signed()).as_unsigned() + _vqadds_u32(a, b) } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_u64)"] @@ -17983,9 +17983,9 @@ pub unsafe fn vqaddd_u64(a: u64, b: u64) -> u64 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqadd.i64" )] - fn _vqaddd_u64(a: i64, b: i64) -> i64; + fn _vqaddd_u64(a: u64, b: u64) -> u64; } - _vqaddd_u64(a.as_signed(), b.as_signed()).as_unsigned() + _vqaddd_u64(a, b) } #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s16)"] @@ -18898,9 +18898,9 @@ pub unsafe fn vqmovnd_u64(a: u64) -> u32 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.scalar.uqxtn.i32.i64" )] - fn _vqmovnd_u64(a: i64) -> i32; + fn _vqmovnd_u64(a: u64) -> u32; } - _vqmovnd_u64(a.as_signed()).as_unsigned() + _vqmovnd_u64(a) } #[doc = "Saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_s16)"] @@ -19916,9 +19916,9 @@ pub unsafe fn vqrshls_u32(a: u32, b: i32) -> u32 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqrshl.i32" )] - fn _vqrshls_u32(a: i32, b: i32) -> i32; + fn _vqrshls_u32(a: u32, b: i32) -> u32; } - _vqrshls_u32(a.as_signed(), b).as_unsigned() + _vqrshls_u32(a, b) } #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_u64)"] @@ -19934,9 +19934,9 @@ pub unsafe fn vqrshld_u64(a: u64, b: i64) -> u64 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqrshl.i64" )] - fn _vqrshld_u64(a: i64, b: i64) -> i64; + fn _vqrshld_u64(a: u64, b: i64) -> u64; } - _vqrshld_u64(a.as_signed(), b).as_unsigned() + _vqrshld_u64(a, b) } #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s16)"] @@ -20401,9 +20401,9 @@ pub unsafe fn vqshld_u64(a: u64, b: i64) -> u64 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqshl.i64" )] - fn _vqshld_u64(a: i64, b: i64) -> i64; + fn _vqshld_u64(a: u64, b: i64) -> u64; } - _vqshld_u64(a.as_signed(), b).as_unsigned() + _vqshld_u64(a, b) } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlub_n_s8)"] @@ -20579,9 +20579,9 @@ pub unsafe fn vqshrnd_n_u64(a: u64) -> u32 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqshrn.i32" )] - fn _vqshrnd_n_u64(a: i64, n: i32) -> i32; + fn _vqshrnd_n_u64(a: u64, n: i32) -> u32; } - _vqshrnd_n_u64(a.as_signed(), N).as_unsigned() + _vqshrnd_n_u64(a, N) } #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_s16)"] @@ -20819,9 +20819,9 @@ pub unsafe fn vqsubs_u32(a: u32, b: u32) -> u32 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.i32" )] - fn _vqsubs_u32(a: i32, b: i32) -> i32; + fn _vqsubs_u32(a: u32, b: u32) -> u32; } - _vqsubs_u32(a.as_signed(), b.as_signed()).as_unsigned() + _vqsubs_u32(a, b) } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_u64)"] @@ -20837,9 +20837,9 @@ pub unsafe fn vqsubd_u64(a: u64, b: u64) -> u64 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqsub.i64" )] - fn _vqsubd_u64(a: i64, b: i64) -> i64; + fn _vqsubd_u64(a: u64, b: u64) -> u64; } - _vqsubd_u64(a.as_signed(), b.as_signed()).as_unsigned() + _vqsubd_u64(a, b) } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1)"] @@ -20855,9 +20855,9 @@ unsafe fn vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbl1.v8i8" )] - fn _vqtbl1(a: int8x16_t, b: int8x8_t) -> int8x8_t; + fn _vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t; } - _vqtbl1(a, b.as_signed()) + _vqtbl1(a, b) } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q)"] @@ -20873,9 +20873,9 @@ unsafe fn vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbl1.v16i8" )] - fn _vqtbl1q(a: int8x16_t, b: int8x16_t) -> int8x16_t; + fn _vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t; } - _vqtbl1q(a, b.as_signed()) + _vqtbl1q(a, b) } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_s8)"] @@ -20961,9 +20961,9 @@ unsafe fn vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbl2.v8i8" )] - fn _vqtbl2(a: int8x16_t, b: int8x16_t, c: int8x8_t) -> int8x8_t; + fn _vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t; } - _vqtbl2(a, b, c.as_signed()) + _vqtbl2(a, b, c) } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q)"] @@ -20979,9 +20979,9 @@ unsafe fn vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbl2.v16i8" )] - fn _vqtbl2q(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; + fn _vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t; } - _vqtbl2q(a, b, c.as_signed()) + _vqtbl2q(a, b, c) } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_s8)"] @@ -21175,9 +21175,9 @@ unsafe fn vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8 any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbl3.v8i8" )] - fn _vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x8_t) -> int8x8_t; + fn _vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t; } - _vqtbl3(a, b, c, d.as_signed()) + _vqtbl3(a, b, c, d) } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q)"] @@ -21193,9 +21193,9 @@ unsafe fn vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> in any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbl3.v16i8" )] - fn _vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t) -> int8x16_t; + fn _vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t; } - _vqtbl3q(a, b, c, d.as_signed()) + _vqtbl3q(a, b, c, d) } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_s8)"] @@ -21409,10 +21409,15 @@ unsafe fn vqtbl4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbl4.v8i8" )] - fn _vqtbl4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: int8x8_t) - -> int8x8_t; + fn _vqtbl4( + a: int8x16_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + e: uint8x8_t, + ) -> int8x8_t; } - _vqtbl4(a, b, c, d, e.as_signed()) + _vqtbl4(a, b, c, d, e) } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q)"] @@ -21439,10 +21444,10 @@ unsafe fn vqtbl4q( b: int8x16_t, c: int8x16_t, d: int8x16_t, - e: int8x16_t, + e: uint8x16_t, ) -> int8x16_t; } - _vqtbl4q(a, b, c, d, e.as_signed()) + _vqtbl4q(a, b, c, d, e) } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_s8)"] @@ -21724,9 +21729,9 @@ unsafe fn vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbx1.v8i8" )] - fn _vqtbx1(a: int8x8_t, b: int8x16_t, c: int8x8_t) -> int8x8_t; + fn _vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t; } - _vqtbx1(a, b, c.as_signed()) + _vqtbx1(a, b, c) } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q)"] @@ -21742,9 +21747,9 @@ unsafe fn vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbx1.v16i8" )] - fn _vqtbx1q(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; + fn _vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t; } - _vqtbx1q(a, b, c.as_signed()) + _vqtbx1q(a, b, c) } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_s8)"] @@ -21830,9 +21835,9 @@ unsafe fn vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbx2.v8i8" )] - fn _vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x8_t) -> int8x8_t; + fn _vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t; } - _vqtbx2(a, b, c, d.as_signed()) + _vqtbx2(a, b, c, d) } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q)"] @@ -21848,9 +21853,9 @@ unsafe fn vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> in any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbx2.v16i8" )] - fn _vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t) -> int8x16_t; + fn _vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t; } - _vqtbx2q(a, b, c, d.as_signed()) + _vqtbx2q(a, b, c, d) } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_s8)"] @@ -22048,9 +22053,10 @@ unsafe fn vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8 any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.tbx3.v8i8" )] - fn _vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: int8x8_t) -> int8x8_t; + fn _vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) + -> int8x8_t; } - _vqtbx3(a, b, c, d, e.as_signed()) + _vqtbx3(a, b, c, d, e) } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q)"] @@ -22077,10 +22083,10 @@ unsafe fn vqtbx3q( b: int8x16_t, c: int8x16_t, d: int8x16_t, - e: int8x16_t, + e: uint8x16_t, ) -> int8x16_t; } - _vqtbx3q(a, b, c, d, e.as_signed()) + _vqtbx3q(a, b, c, d, e) } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_s8)"] @@ -22359,10 +22365,10 @@ unsafe fn vqtbx4( c: int8x16_t, d: int8x16_t, e: int8x16_t, - f: int8x8_t, + f: uint8x8_t, ) -> int8x8_t; } - _vqtbx4(a, b, c, d, e, f.as_signed()) + _vqtbx4(a, b, c, d, e, f) } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q)"] @@ -22391,10 +22397,10 @@ unsafe fn vqtbx4q( c: int8x16_t, d: int8x16_t, e: int8x16_t, - f: int8x16_t, + f: uint8x16_t, ) -> int8x16_t; } - _vqtbx4q(a, b, c, d, e, f.as_signed()) + _vqtbx4q(a, b, c, d, e, f) } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_s8)"] @@ -22688,9 +22694,9 @@ pub unsafe fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.rax1" )] - fn _vrax1q_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + fn _vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; } - _vrax1q_u64(a.as_signed(), b.as_signed()).as_unsigned() + _vrax1q_u64(a, b) } #[doc = "Reverse bit order"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_s8)"] @@ -25776,9 +25782,9 @@ pub unsafe fn vrshld_u64(a: u64, b: i64) -> u64 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.urshl.i64" )] - fn _vrshld_u64(a: i64, b: i64) -> i64; + fn _vrshld_u64(a: u64, b: i64) -> u64; } - _vrshld_u64(a.as_signed(), b).as_unsigned() + _vrshld_u64(a, b) } #[doc = "Signed rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_s64)"] @@ -26213,9 +26219,9 @@ pub unsafe fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uin any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha512h2" )] - fn _vsha512h2q_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; + fn _vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t; } - _vsha512h2q_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() + _vsha512h2q_u64(a, b, c) } #[doc = "SHA512 hash update part 1"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512hq_u64)"] @@ -26231,9 +26237,9 @@ pub unsafe fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha512h" )] - fn _vsha512hq_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; + fn _vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t; } - _vsha512hq_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() + _vsha512hq_u64(a, b, c) } #[doc = "SHA512 schedule update 0"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su0q_u64)"] @@ -26249,9 +26255,9 @@ pub unsafe fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha512su0" )] - fn _vsha512su0q_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + fn _vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; } - _vsha512su0q_u64(a.as_signed(), b.as_signed()).as_unsigned() + _vsha512su0q_u64(a, b) } #[doc = "SHA512 schedule update 1"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su1q_u64)"] @@ -26267,9 +26273,9 @@ pub unsafe fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> ui any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sha512su1" )] - fn _vsha512su1q_u64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; + fn _vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t; } - _vsha512su1q_u64(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() + _vsha512su1q_u64(a, b, c) } #[doc = "Signed Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_s64)"] @@ -26845,9 +26851,9 @@ pub unsafe fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> ui any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm3partw1" )] - fn _vsm3partw1q_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; + fn _vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t; } - _vsm3partw1q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() + _vsm3partw1q_u32(a, b, c) } #[doc = "SM3PARTW2"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw2q_u32)"] @@ -26863,9 +26869,9 @@ pub unsafe fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> ui any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm3partw2" )] - fn _vsm3partw2q_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; + fn _vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t; } - _vsm3partw2q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() + _vsm3partw2q_u32(a, b, c) } #[doc = "SM3SS1"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3ss1q_u32)"] @@ -26881,9 +26887,9 @@ pub unsafe fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint3 any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm3ss1" )] - fn _vsm3ss1q_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; + fn _vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t; } - _vsm3ss1q_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() + _vsm3ss1q_u32(a, b, c) } #[doc = "SM3TT1A"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1aq_u32)"] @@ -26905,9 +26911,9 @@ pub unsafe fn vsm3tt1aq_u32( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm3tt1a" )] - fn _vsm3tt1aq_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i64) -> int32x4_t; + fn _vsm3tt1aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t; } - _vsm3tt1aq_u32(a.as_signed(), b.as_signed(), c.as_signed(), IMM2 as i64).as_unsigned() + _vsm3tt1aq_u32(a, b, c, IMM2 as i64) } #[doc = "SM3TT1B"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1bq_u32)"] @@ -26929,9 +26935,9 @@ pub unsafe fn vsm3tt1bq_u32( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm3tt1b" )] - fn _vsm3tt1bq_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i64) -> int32x4_t; + fn _vsm3tt1bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t; } - _vsm3tt1bq_u32(a.as_signed(), b.as_signed(), c.as_signed(), IMM2 as i64).as_unsigned() + _vsm3tt1bq_u32(a, b, c, IMM2 as i64) } #[doc = "SM3TT2A"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2aq_u32)"] @@ -26953,9 +26959,9 @@ pub unsafe fn vsm3tt2aq_u32( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm3tt2a" )] - fn _vsm3tt2aq_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i64) -> int32x4_t; + fn _vsm3tt2aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t; } - _vsm3tt2aq_u32(a.as_signed(), b.as_signed(), c.as_signed(), IMM2 as i64).as_unsigned() + _vsm3tt2aq_u32(a, b, c, IMM2 as i64) } #[doc = "SM3TT2B"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2bq_u32)"] @@ -26977,9 +26983,9 @@ pub unsafe fn vsm3tt2bq_u32( any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm3tt2b" )] - fn _vsm3tt2bq_u32(a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i64) -> int32x4_t; + fn _vsm3tt2bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t; } - _vsm3tt2bq_u32(a.as_signed(), b.as_signed(), c.as_signed(), IMM2 as i64).as_unsigned() + _vsm3tt2bq_u32(a, b, c, IMM2 as i64) } #[doc = "SM4 key"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4ekeyq_u32)"] @@ -26995,9 +27001,9 @@ pub unsafe fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm4ekey" )] - fn _vsm4ekeyq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + fn _vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; } - _vsm4ekeyq_u32(a.as_signed(), b.as_signed()).as_unsigned() + _vsm4ekeyq_u32(a, b) } #[doc = "SM4 encode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4eq_u32)"] @@ -27013,9 +27019,9 @@ pub unsafe fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.sm4e" )] - fn _vsm4eq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + fn _vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; } - _vsm4eq_u32(a.as_signed(), b.as_signed()).as_unsigned() + _vsm4eq_u32(a, b) } #[doc = "Unsigned saturating Accumulate of Signed value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u8)"] @@ -27031,9 +27037,9 @@ pub unsafe fn vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usqadd.v8i8" )] - fn _vsqadd_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + fn _vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t; } - _vsqadd_u8(a.as_signed(), b).as_unsigned() + _vsqadd_u8(a, b) } #[doc = "Unsigned saturating Accumulate of Signed value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u8)"] @@ -27049,9 +27055,9 @@ pub unsafe fn vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usqadd.v16i8" )] - fn _vsqaddq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + fn _vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t; } - _vsqaddq_u8(a.as_signed(), b).as_unsigned() + _vsqaddq_u8(a, b) } #[doc = "Unsigned saturating Accumulate of Signed value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u16)"] @@ -27067,9 +27073,9 @@ pub unsafe fn vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usqadd.v4i16" )] - fn _vsqadd_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + fn _vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t; } - _vsqadd_u16(a.as_signed(), b).as_unsigned() + _vsqadd_u16(a, b) } #[doc = "Unsigned saturating Accumulate of Signed value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u16)"] @@ -27085,9 +27091,9 @@ pub unsafe fn vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usqadd.v8i16" )] - fn _vsqaddq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + fn _vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t; } - _vsqaddq_u16(a.as_signed(), b).as_unsigned() + _vsqaddq_u16(a, b) } #[doc = "Unsigned saturating Accumulate of Signed value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u32)"] @@ -27103,9 +27109,9 @@ pub unsafe fn vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usqadd.v2i32" )] - fn _vsqadd_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + fn _vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t; } - _vsqadd_u32(a.as_signed(), b).as_unsigned() + _vsqadd_u32(a, b) } #[doc = "Unsigned saturating Accumulate of Signed value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u32)"] @@ -27121,9 +27127,9 @@ pub unsafe fn vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usqadd.v4i32" )] - fn _vsqaddq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + fn _vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t; } - _vsqaddq_u32(a.as_signed(), b).as_unsigned() + _vsqaddq_u32(a, b) } #[doc = "Unsigned saturating Accumulate of Signed value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u64)"] @@ -27139,9 +27145,9 @@ pub unsafe fn vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usqadd.v1i64" )] - fn _vsqadd_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + fn _vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t; } - _vsqadd_u64(a.as_signed(), b).as_unsigned() + _vsqadd_u64(a, b) } #[doc = "Unsigned saturating Accumulate of Signed value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u64)"] @@ -27157,9 +27163,9 @@ pub unsafe fn vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usqadd.v2i64" )] - fn _vsqaddq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + fn _vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t; } - _vsqaddq_u64(a.as_signed(), b).as_unsigned() + _vsqaddq_u64(a, b) } #[doc = "Unsigned saturating accumulate of signed value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddb_u8)"] @@ -27197,9 +27203,9 @@ pub unsafe fn vsqaddd_u64(a: u64, b: i64) -> u64 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usqadd.i64" )] - fn _vsqaddd_u64(a: i64, b: i64) -> i64; + fn _vsqaddd_u64(a: u64, b: i64) -> u64; } - _vsqaddd_u64(a.as_signed(), b).as_unsigned() + _vsqaddd_u64(a, b) } #[doc = "Unsigned saturating accumulate of signed value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadds_u32)"] @@ -27215,9 +27221,9 @@ pub unsafe fn vsqadds_u32(a: u32, b: i32) -> u32 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.usqadd.i32" )] - fn _vsqadds_u32(a: i32, b: i32) -> i32; + fn _vsqadds_u32(a: u32, b: i32) -> u32; } - _vsqadds_u32(a.as_signed(), b).as_unsigned() + _vsqadds_u32(a, b) } #[doc = "Calculates the square root of each lane."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f16)"] @@ -30488,9 +30494,9 @@ pub unsafe fn vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.suqadd.v8i8" )] - fn _vuqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + fn _vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t; } - _vuqadd_s8(a, b.as_signed()) + _vuqadd_s8(a, b) } #[doc = "Signed saturating Accumulate of Unsigned value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s8)"] @@ -30506,9 +30512,9 @@ pub unsafe fn vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.suqadd.v16i8" )] - fn _vuqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + fn _vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t; } - _vuqaddq_s8(a, b.as_signed()) + _vuqaddq_s8(a, b) } #[doc = "Signed saturating Accumulate of Unsigned value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s16)"] @@ -30524,9 +30530,9 @@ pub unsafe fn vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.suqadd.v4i16" )] - fn _vuqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + fn _vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t; } - _vuqadd_s16(a, b.as_signed()) + _vuqadd_s16(a, b) } #[doc = "Signed saturating Accumulate of Unsigned value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s16)"] @@ -30542,9 +30548,9 @@ pub unsafe fn vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.suqadd.v8i16" )] - fn _vuqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + fn _vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t; } - _vuqaddq_s16(a, b.as_signed()) + _vuqaddq_s16(a, b) } #[doc = "Signed saturating Accumulate of Unsigned value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s32)"] @@ -30560,9 +30566,9 @@ pub unsafe fn vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.suqadd.v2i32" )] - fn _vuqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + fn _vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t; } - _vuqadd_s32(a, b.as_signed()) + _vuqadd_s32(a, b) } #[doc = "Signed saturating Accumulate of Unsigned value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s32)"] @@ -30578,9 +30584,9 @@ pub unsafe fn vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.suqadd.v4i32" )] - fn _vuqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + fn _vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t; } - _vuqaddq_s32(a, b.as_signed()) + _vuqaddq_s32(a, b) } #[doc = "Signed saturating Accumulate of Unsigned value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s64)"] @@ -30596,9 +30602,9 @@ pub unsafe fn vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.suqadd.v1i64" )] - fn _vuqadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + fn _vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t; } - _vuqadd_s64(a, b.as_signed()) + _vuqadd_s64(a, b) } #[doc = "Signed saturating Accumulate of Unsigned value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s64)"] @@ -30614,9 +30620,9 @@ pub unsafe fn vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.suqadd.v2i64" )] - fn _vuqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + fn _vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t; } - _vuqaddq_s64(a, b.as_signed()) + _vuqaddq_s64(a, b) } #[doc = "Signed saturating accumulate of unsigned value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddb_s8)"] @@ -30654,9 +30660,9 @@ pub unsafe fn vuqaddd_s64(a: i64, b: u64) -> i64 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.suqadd.i64" )] - fn _vuqaddd_s64(a: i64, b: i64) -> i64; + fn _vuqaddd_s64(a: i64, b: u64) -> i64; } - _vuqaddd_s64(a, b.as_signed()) + _vuqaddd_s64(a, b) } #[doc = "Signed saturating accumulate of unsigned value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadds_s32)"] @@ -30672,9 +30678,9 @@ pub unsafe fn vuqadds_s32(a: i32, b: u32) -> i32 { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.suqadd.i32" )] - fn _vuqadds_s32(a: i32, b: i32) -> i32; + fn _vuqadds_s32(a: i32, b: u32) -> i32; } - _vuqadds_s32(a, b.as_signed()) + _vuqadds_s32(a, b) } #[doc = "Dot product index form with unsigned and signed integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_laneq_s32)"] @@ -31282,9 +31288,9 @@ pub unsafe fn vxarq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64 any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.crypto.xar" )] - fn _vxarq_u64(a: int64x2_t, b: int64x2_t, n: i64) -> int64x2_t; + fn _vxarq_u64(a: uint64x2_t, b: uint64x2_t, n: i64) -> uint64x2_t; } - _vxarq_u64(a.as_signed(), b.as_signed(), IMM6 as i64).as_unsigned() + _vxarq_u64(a, b, IMM6 as i64) } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f16)"] diff --git a/crates/core_arch/src/arm_shared/neon/generated.rs b/crates/core_arch/src/arm_shared/neon/generated.rs index 522388b627..abe7c5c59e 100644 --- a/crates/core_arch/src/arm_shared/neon/generated.rs +++ b/crates/core_arch/src/arm_shared/neon/generated.rs @@ -35,9 +35,9 @@ pub unsafe fn __crc32b(crc: u32, data: u8) -> u32 { link_name = "llvm.aarch64.crc32b" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32b")] - fn ___crc32b(crc: i32, data: i32) -> i32; + fn ___crc32b(crc: u32, data: u32) -> u32; } - ___crc32b(crc.as_signed(), data.as_signed() as i32).as_unsigned() + ___crc32b(crc, data as u32) } #[doc = "CRC32-C single round checksum for bytes (8 bits)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32cb)"] @@ -62,9 +62,9 @@ pub unsafe fn __crc32cb(crc: u32, data: u8) -> u32 { link_name = "llvm.aarch64.crc32cb" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32cb")] - fn ___crc32cb(crc: i32, data: i32) -> i32; + fn ___crc32cb(crc: u32, data: u32) -> u32; } - ___crc32cb(crc.as_signed(), data.as_signed() as i32).as_unsigned() + ___crc32cb(crc, data as u32) } #[doc = "CRC32-C single round checksum for quad words (64 bits)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32cd)"] @@ -79,14 +79,13 @@ pub unsafe fn __crc32cb(crc: u32, data: u8) -> u32 { unstable(feature = "stdarch_aarch32_crc32", issue = "125085") )] pub unsafe fn __crc32cd(crc: u32, data: u64) -> u32 { - let a: i32 = crc as i32; - let b: i32 = (data & 0xFFFFFFFF).as_signed() as i32; - let c: i32 = (data >> 32).as_signed() as i32; + let b: u32 = (data & 0xFFFFFFFF) as u32; + let c: u32 = (data >> 32) as u32; unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32cw")] - fn ___crc32cw(crc: i32, data: i32) -> i32; + fn ___crc32cw(crc: u32, data: u32) -> u32; } - ___crc32cw(___crc32cw(a, b), c).as_unsigned() as u32 + ___crc32cw(___crc32cw(crc, b), c) } #[doc = "CRC32-C single round checksum for bytes (16 bits)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32ch)"] @@ -111,9 +110,9 @@ pub unsafe fn __crc32ch(crc: u32, data: u16) -> u32 { link_name = "llvm.aarch64.crc32ch" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32ch")] - fn ___crc32ch(crc: i32, data: i32) -> i32; + fn ___crc32ch(crc: u32, data: u32) -> u32; } - ___crc32ch(crc.as_signed(), data.as_signed() as i32).as_unsigned() + ___crc32ch(crc, data as u32) } #[doc = "CRC32-C single round checksum for bytes (32 bits)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32cw)"] @@ -138,9 +137,9 @@ pub unsafe fn __crc32cw(crc: u32, data: u32) -> u32 { link_name = "llvm.aarch64.crc32cw" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32cw")] - fn ___crc32cw(crc: i32, data: i32) -> i32; + fn ___crc32cw(crc: u32, data: u32) -> u32; } - ___crc32cw(crc.as_signed(), data.as_signed()).as_unsigned() + ___crc32cw(crc, data) } #[doc = "CRC32 single round checksum for quad words (64 bits)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32d)"] @@ -155,14 +154,13 @@ pub unsafe fn __crc32cw(crc: u32, data: u32) -> u32 { unstable(feature = "stdarch_aarch32_crc32", issue = "125085") )] pub unsafe fn __crc32d(crc: u32, data: u64) -> u32 { - let a: i32 = crc as i32; - let b: i32 = (data & 0xFFFFFFFF).as_signed() as i32; - let c: i32 = (data >> 32).as_signed() as i32; + let b: u32 = (data & 0xFFFFFFFF) as u32; + let c: u32 = (data >> 32) as u32; unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32w")] - fn ___crc32w(crc: i32, data: i32) -> i32; + fn ___crc32w(crc: u32, data: u32) -> u32; } - ___crc32w(___crc32w(a, b), c).as_unsigned() + ___crc32w(___crc32w(crc, b), c) } #[doc = "CRC32 single round checksum for bytes (16 bits)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32h)"] @@ -187,9 +185,9 @@ pub unsafe fn __crc32h(crc: u32, data: u16) -> u32 { link_name = "llvm.aarch64.crc32h" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32h")] - fn ___crc32h(crc: i32, data: i32) -> i32; + fn ___crc32h(crc: u32, data: u32) -> u32; } - ___crc32h(crc.as_signed(), data.as_signed() as i32).as_unsigned() + ___crc32h(crc, data as u32) } #[doc = "CRC32 single round checksum for bytes (32 bits)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32w)"] @@ -214,9 +212,9 @@ pub unsafe fn __crc32w(crc: u32, data: u32) -> u32 { link_name = "llvm.aarch64.crc32w" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32w")] - fn ___crc32w(crc: i32, data: i32) -> i32; + fn ___crc32w(crc: u32, data: u32) -> u32; } - ___crc32w(crc.as_signed(), data.as_signed()).as_unsigned() + ___crc32w(crc, data) } #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_s8)"] @@ -347,9 +345,9 @@ unsafe fn priv_vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { unsafe fn priv_vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v4i16.v8i8")] - fn _priv_vpadal_u8(a: int16x4_t, b: int8x8_t) -> int16x4_t; + fn _priv_vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t; } - _priv_vpadal_u8(a.as_signed(), b.as_signed()).as_unsigned() + _priv_vpadal_u8(a, b) } #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_u8)"] @@ -366,9 +364,9 @@ unsafe fn priv_vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t { unsafe fn priv_vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v8i16.v16i8")] - fn _priv_vpadalq_u8(a: int16x8_t, b: int8x16_t) -> int16x8_t; + fn _priv_vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t; } - _priv_vpadalq_u8(a.as_signed(), b.as_signed()).as_unsigned() + _priv_vpadalq_u8(a, b) } #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_u16)"] @@ -385,9 +383,9 @@ unsafe fn priv_vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { unsafe fn priv_vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v2i32.v4i16")] - fn _priv_vpadal_u16(a: int32x2_t, b: int16x4_t) -> int32x2_t; + fn _priv_vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t; } - _priv_vpadal_u16(a.as_signed(), b.as_signed()).as_unsigned() + _priv_vpadal_u16(a, b) } #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_u16)"] @@ -404,9 +402,9 @@ unsafe fn priv_vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t { unsafe fn priv_vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v4i32.v8i16")] - fn _priv_vpadalq_u16(a: int32x4_t, b: int16x8_t) -> int32x4_t; + fn _priv_vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t; } - _priv_vpadalq_u16(a.as_signed(), b.as_signed()).as_unsigned() + _priv_vpadalq_u16(a, b) } #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_u32)"] @@ -423,9 +421,9 @@ unsafe fn priv_vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { unsafe fn priv_vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v1i64.v2i32")] - fn _priv_vpadal_u32(a: int64x1_t, b: int32x2_t) -> int64x1_t; + fn _priv_vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t; } - _priv_vpadal_u32(a.as_signed(), b.as_signed()).as_unsigned() + _priv_vpadal_u32(a, b) } #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_u32)"] @@ -442,9 +440,9 @@ unsafe fn priv_vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t { unsafe fn priv_vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadalu.v2i64.v4i32")] - fn _priv_vpadalq_u32(a: int64x2_t, b: int32x4_t) -> int64x2_t; + fn _priv_vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t; } - _priv_vpadalq_u32(a.as_signed(), b.as_signed()).as_unsigned() + _priv_vpadalq_u32(a, b) } #[doc = "Signed Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_s8)"] @@ -916,9 +914,9 @@ pub unsafe fn vabd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { link_name = "llvm.aarch64.neon.uabd.v8i8" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v8i8")] - fn _vabd_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + fn _vabd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; } - _vabd_u8(a.as_signed(), b.as_signed()).as_unsigned() + _vabd_u8(a, b) } #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u8)"] @@ -947,9 +945,9 @@ pub unsafe fn vabdq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { link_name = "llvm.aarch64.neon.uabd.v16i8" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v16i8")] - fn _vabdq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + fn _vabdq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; } - _vabdq_u8(a.as_signed(), b.as_signed()).as_unsigned() + _vabdq_u8(a, b) } #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u16)"] @@ -978,9 +976,9 @@ pub unsafe fn vabd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { link_name = "llvm.aarch64.neon.uabd.v4i16" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v4i16")] - fn _vabd_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + fn _vabd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; } - _vabd_u16(a.as_signed(), b.as_signed()).as_unsigned() + _vabd_u16(a, b) } #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u16)"] @@ -1009,9 +1007,9 @@ pub unsafe fn vabdq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { link_name = "llvm.aarch64.neon.uabd.v8i16" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v8i16")] - fn _vabdq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + fn _vabdq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; } - _vabdq_u16(a.as_signed(), b.as_signed()).as_unsigned() + _vabdq_u16(a, b) } #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u32)"] @@ -1040,9 +1038,9 @@ pub unsafe fn vabd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { link_name = "llvm.aarch64.neon.uabd.v2i32" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v2i32")] - fn _vabd_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + fn _vabd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; } - _vabd_u32(a.as_signed(), b.as_signed()).as_unsigned() + _vabd_u32(a, b) } #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u32)"] @@ -1071,9 +1069,9 @@ pub unsafe fn vabdq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { link_name = "llvm.aarch64.neon.uabd.v4i32" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v4i32")] - fn _vabdq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + fn _vabdq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; } - _vabdq_u32(a.as_signed(), b.as_signed()).as_unsigned() + _vabdq_u32(a, b) } #[doc = "Signed Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_s8)"] @@ -1728,9 +1726,9 @@ pub unsafe fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { link_name = "llvm.aarch64.crypto.aesd" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesd")] - fn _vaesdq_u8(data: int8x16_t, key: int8x16_t) -> int8x16_t; + fn _vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t; } - _vaesdq_u8(data.as_signed(), key.as_signed()).as_unsigned() + _vaesdq_u8(data, key) } #[doc = "AES single round encryption."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaeseq_u8)"] @@ -1755,9 +1753,9 @@ pub unsafe fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { link_name = "llvm.aarch64.crypto.aese" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aese")] - fn _vaeseq_u8(data: int8x16_t, key: int8x16_t) -> int8x16_t; + fn _vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t; } - _vaeseq_u8(data.as_signed(), key.as_signed()).as_unsigned() + _vaeseq_u8(data, key) } #[doc = "AES inverse mix columns."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesimcq_u8)"] @@ -1782,9 +1780,9 @@ pub unsafe fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t { link_name = "llvm.aarch64.crypto.aesimc" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesimc")] - fn _vaesimcq_u8(data: int8x16_t) -> int8x16_t; + fn _vaesimcq_u8(data: uint8x16_t) -> uint8x16_t; } - _vaesimcq_u8(data.as_signed()).as_unsigned() + _vaesimcq_u8(data) } #[doc = "AES mix columns."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesmcq_u8)"] @@ -1809,9 +1807,9 @@ pub unsafe fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t { link_name = "llvm.aarch64.crypto.aesmc" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesmc")] - fn _vaesmcq_u8(data: int8x16_t) -> int8x16_t; + fn _vaesmcq_u8(data: uint8x16_t) -> uint8x16_t; } - _vaesmcq_u8(data.as_signed()).as_unsigned() + _vaesmcq_u8(data) } #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s8)"] @@ -2201,9 +2199,9 @@ pub unsafe fn vcage_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facge.v4i16.v4f16" )] - fn _vcage_f16(a: float16x4_t, b: float16x4_t) -> int16x4_t; + fn _vcage_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t; } - _vcage_f16(a, b).as_unsigned() + _vcage_f16(a, b) } #[doc = "Floating-point absolute compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f16)"] @@ -2225,9 +2223,9 @@ pub unsafe fn vcageq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facge.v8i16.v8f16" )] - fn _vcageq_f16(a: float16x8_t, b: float16x8_t) -> int16x8_t; + fn _vcageq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t; } - _vcageq_f16(a, b).as_unsigned() + _vcageq_f16(a, b) } #[doc = "Floating-point absolute compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f32)"] @@ -2256,9 +2254,9 @@ pub unsafe fn vcage_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facge.v2i32.v2f32" )] - fn _vcage_f32(a: float32x2_t, b: float32x2_t) -> int32x2_t; + fn _vcage_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t; } - _vcage_f32(a, b).as_unsigned() + _vcage_f32(a, b) } #[doc = "Floating-point absolute compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f32)"] @@ -2287,9 +2285,9 @@ pub unsafe fn vcageq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facge.v4i32.v4f32" )] - fn _vcageq_f32(a: float32x4_t, b: float32x4_t) -> int32x4_t; + fn _vcageq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t; } - _vcageq_f32(a, b).as_unsigned() + _vcageq_f32(a, b) } #[doc = "Floating-point absolute compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f16)"] @@ -2311,9 +2309,9 @@ pub unsafe fn vcagt_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facgt.v4i16.v4f16" )] - fn _vcagt_f16(a: float16x4_t, b: float16x4_t) -> int16x4_t; + fn _vcagt_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t; } - _vcagt_f16(a, b).as_unsigned() + _vcagt_f16(a, b) } #[doc = "Floating-point absolute compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f16)"] @@ -2335,9 +2333,9 @@ pub unsafe fn vcagtq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facgt.v8i16.v8f16" )] - fn _vcagtq_f16(a: float16x8_t, b: float16x8_t) -> int16x8_t; + fn _vcagtq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t; } - _vcagtq_f16(a, b).as_unsigned() + _vcagtq_f16(a, b) } #[doc = "Floating-point absolute compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f32)"] @@ -2366,9 +2364,9 @@ pub unsafe fn vcagt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facgt.v2i32.v2f32" )] - fn _vcagt_f32(a: float32x2_t, b: float32x2_t) -> int32x2_t; + fn _vcagt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t; } - _vcagt_f32(a, b).as_unsigned() + _vcagt_f32(a, b) } #[doc = "Floating-point absolute compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f32)"] @@ -2397,9 +2395,9 @@ pub unsafe fn vcagtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.facgt.v4i32.v4f32" )] - fn _vcagtq_f32(a: float32x4_t, b: float32x4_t) -> int32x4_t; + fn _vcagtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t; } - _vcagtq_f32(a, b).as_unsigned() + _vcagtq_f32(a, b) } #[doc = "Floating-point absolute compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f16)"] @@ -6649,9 +6647,9 @@ pub unsafe fn vcvt_n_f16_u16(a: uint16x4_t) -> float16x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxu2fp.v4f16.v4i16" )] - fn _vcvt_n_f16_u16(a: int16x4_t, n: i32) -> float16x4_t; + fn _vcvt_n_f16_u16(a: uint16x4_t, n: i32) -> float16x4_t; } - _vcvt_n_f16_u16(a.as_signed(), N) + _vcvt_n_f16_u16(a, N) } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f16_u16)"] @@ -6678,9 +6676,9 @@ pub unsafe fn vcvtq_n_f16_u16(a: uint16x8_t) -> float16x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxu2fp.v8f16.v8i16" )] - fn _vcvtq_n_f16_u16(a: int16x8_t, n: i32) -> float16x8_t; + fn _vcvtq_n_f16_u16(a: uint16x8_t, n: i32) -> float16x8_t; } - _vcvtq_n_f16_u16(a.as_signed(), N) + _vcvtq_n_f16_u16(a, N) } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_s32)"] @@ -6783,9 +6781,9 @@ pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { target_arch = "arm", link_name = "llvm.arm.neon.vcvtfxu2fp.v2f32.v2i32" )] - fn _vcvt_n_f32_u32(a: int32x2_t, n: i32) -> float32x2_t; + fn _vcvt_n_f32_u32(a: uint32x2_t, n: i32) -> float32x2_t; } - _vcvt_n_f32_u32(a.as_signed(), N) + _vcvt_n_f32_u32(a, N) } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_u32)"] @@ -6804,9 +6802,9 @@ pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { target_arch = "arm", link_name = "llvm.arm.neon.vcvtfxu2fp.v4f32.v4i32" )] - fn _vcvtq_n_f32_u32(a: int32x4_t, n: i32) -> float32x4_t; + fn _vcvtq_n_f32_u32(a: uint32x4_t, n: i32) -> float32x4_t; } - _vcvtq_n_f32_u32(a.as_signed(), N) + _vcvtq_n_f32_u32(a, N) } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_u32)"] @@ -6825,9 +6823,9 @@ pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f32.v2i32" )] - fn _vcvt_n_f32_u32(a: int32x2_t, n: i32) -> float32x2_t; + fn _vcvt_n_f32_u32(a: uint32x2_t, n: i32) -> float32x2_t; } - _vcvt_n_f32_u32(a.as_signed(), N) + _vcvt_n_f32_u32(a, N) } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_u32)"] @@ -6846,9 +6844,9 @@ pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfxu2fp.v4f32.v4i32" )] - fn _vcvtq_n_f32_u32(a: int32x4_t, n: i32) -> float32x4_t; + fn _vcvtq_n_f32_u32(a: uint32x4_t, n: i32) -> float32x4_t; } - _vcvtq_n_f32_u32(a.as_signed(), N) + _vcvtq_n_f32_u32(a, N) } #[doc = "Floating-point convert to signed fixed-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s16_f16)"] @@ -7017,9 +7015,9 @@ pub unsafe fn vcvt_n_u16_f16(a: float16x4_t) -> uint16x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxu.v4i16.v4f16" )] - fn _vcvt_n_u16_f16(a: float16x4_t, n: i32) -> int16x4_t; + fn _vcvt_n_u16_f16(a: float16x4_t, n: i32) -> uint16x4_t; } - _vcvt_n_u16_f16(a, N).as_unsigned() + _vcvt_n_u16_f16(a, N) } #[doc = "Fixed-point convert to unsigned fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u16_f16)"] @@ -7046,9 +7044,9 @@ pub unsafe fn vcvtq_n_u16_f16(a: float16x8_t) -> uint16x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxu.v8i16.v8f16" )] - fn _vcvtq_n_u16_f16(a: float16x8_t, n: i32) -> int16x8_t; + fn _vcvtq_n_u16_f16(a: float16x8_t, n: i32) -> uint16x8_t; } - _vcvtq_n_u16_f16(a, N).as_unsigned() + _vcvtq_n_u16_f16(a, N) } #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u32_f32)"] @@ -7067,9 +7065,9 @@ pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { target_arch = "arm", link_name = "llvm.arm.neon.vcvtfp2fxu.v2i32.v2f32" )] - fn _vcvt_n_u32_f32(a: float32x2_t, n: i32) -> int32x2_t; + fn _vcvt_n_u32_f32(a: float32x2_t, n: i32) -> uint32x2_t; } - _vcvt_n_u32_f32(a, N).as_unsigned() + _vcvt_n_u32_f32(a, N) } #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u32_f32)"] @@ -7088,9 +7086,9 @@ pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { target_arch = "arm", link_name = "llvm.arm.neon.vcvtfp2fxu.v4i32.v4f32" )] - fn _vcvtq_n_u32_f32(a: float32x4_t, n: i32) -> int32x4_t; + fn _vcvtq_n_u32_f32(a: float32x4_t, n: i32) -> uint32x4_t; } - _vcvtq_n_u32_f32(a, N).as_unsigned() + _vcvtq_n_u32_f32(a, N) } #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u32_f32)"] @@ -7109,9 +7107,9 @@ pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i32.v2f32" )] - fn _vcvt_n_u32_f32(a: float32x2_t, n: i32) -> int32x2_t; + fn _vcvt_n_u32_f32(a: float32x2_t, n: i32) -> uint32x2_t; } - _vcvt_n_u32_f32(a, N).as_unsigned() + _vcvt_n_u32_f32(a, N) } #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u32_f32)"] @@ -7130,9 +7128,9 @@ pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.vcvtfp2fxu.v4i32.v4f32" )] - fn _vcvtq_n_u32_f32(a: float32x4_t, n: i32) -> int32x4_t; + fn _vcvtq_n_u32_f32(a: float32x4_t, n: i32) -> uint32x4_t; } - _vcvtq_n_u32_f32(a, N).as_unsigned() + _vcvtq_n_u32_f32(a, N) } #[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s16_f16)"] @@ -7287,9 +7285,9 @@ pub unsafe fn vcvt_u32_f32(a: float32x2_t) -> uint32x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fptoui.sat.v2i32.v2f32" )] - fn _vcvt_u32_f32(a: float32x2_t) -> int32x2_t; + fn _vcvt_u32_f32(a: float32x2_t) -> uint32x2_t; } - _vcvt_u32_f32(a).as_unsigned() + _vcvt_u32_f32(a) } #[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u32_f32)"] @@ -7318,9 +7316,9 @@ pub unsafe fn vcvtq_u32_f32(a: float32x4_t) -> uint32x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.fptoui.sat.v4i32.v4f32" )] - fn _vcvtq_u32_f32(a: float32x4_t) -> int32x4_t; + fn _vcvtq_u32_f32(a: float32x4_t) -> uint32x4_t; } - _vcvtq_u32_f32(a).as_unsigned() + _vcvtq_u32_f32(a) } #[doc = "Dot product arithmetic (indexed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_lane_s32)"] @@ -7531,9 +7529,9 @@ pub unsafe fn vdot_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.udot.v2i32.v8i8" )] - fn _vdot_u32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t; + fn _vdot_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t; } - _vdot_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() + _vdot_u32(a, b, c) } #[doc = "Dot product arithmetic (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_u32)"] @@ -7562,9 +7560,9 @@ pub unsafe fn vdotq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4 any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.udot.v4i32.v16i8" )] - fn _vdotq_u32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; + fn _vdotq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t; } - _vdotq_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() + _vdotq_u32(a, b, c) } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f16)"] @@ -10685,9 +10683,9 @@ pub unsafe fn vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { link_name = "llvm.aarch64.neon.uhadd.v8i8" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v8i8")] - fn _vhadd_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + fn _vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; } - _vhadd_u8(a.as_signed(), b.as_signed()).as_unsigned() + _vhadd_u8(a, b) } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u8)"] @@ -10716,9 +10714,9 @@ pub unsafe fn vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { link_name = "llvm.aarch64.neon.uhadd.v16i8" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v16i8")] - fn _vhaddq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + fn _vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; } - _vhaddq_u8(a.as_signed(), b.as_signed()).as_unsigned() + _vhaddq_u8(a, b) } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u16)"] @@ -10747,9 +10745,9 @@ pub unsafe fn vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { link_name = "llvm.aarch64.neon.uhadd.v4i16" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v4i16")] - fn _vhadd_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + fn _vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; } - _vhadd_u16(a.as_signed(), b.as_signed()).as_unsigned() + _vhadd_u16(a, b) } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u16)"] @@ -10778,9 +10776,9 @@ pub unsafe fn vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { link_name = "llvm.aarch64.neon.uhadd.v8i16" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v8i16")] - fn _vhaddq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + fn _vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; } - _vhaddq_u16(a.as_signed(), b.as_signed()).as_unsigned() + _vhaddq_u16(a, b) } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u32)"] @@ -10809,9 +10807,9 @@ pub unsafe fn vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { link_name = "llvm.aarch64.neon.uhadd.v2i32" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v2i32")] - fn _vhadd_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + fn _vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; } - _vhadd_u32(a.as_signed(), b.as_signed()).as_unsigned() + _vhadd_u32(a, b) } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u32)"] @@ -10840,9 +10838,9 @@ pub unsafe fn vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { link_name = "llvm.aarch64.neon.uhadd.v4i32" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v4i32")] - fn _vhaddq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + fn _vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; } - _vhaddq_u32(a.as_signed(), b.as_signed()).as_unsigned() + _vhaddq_u32(a, b) } #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s16)"] @@ -11057,9 +11055,9 @@ pub unsafe fn vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { link_name = "llvm.aarch64.neon.uhsub.v8i8" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v8i8")] - fn _vhsub_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + fn _vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; } - _vhsub_u8(a.as_signed(), b.as_signed()).as_unsigned() + _vhsub_u8(a, b) } #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u8)"] @@ -11088,9 +11086,9 @@ pub unsafe fn vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { link_name = "llvm.aarch64.neon.uhsub.v16i8" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v16i8")] - fn _vhsubq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + fn _vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; } - _vhsubq_u8(a.as_signed(), b.as_signed()).as_unsigned() + _vhsubq_u8(a, b) } #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u16)"] @@ -11119,9 +11117,9 @@ pub unsafe fn vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { link_name = "llvm.aarch64.neon.uhsub.v4i16" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v4i16")] - fn _vhsub_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + fn _vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; } - _vhsub_u16(a.as_signed(), b.as_signed()).as_unsigned() + _vhsub_u16(a, b) } #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u16)"] @@ -11150,9 +11148,9 @@ pub unsafe fn vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { link_name = "llvm.aarch64.neon.uhsub.v8i16" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v8i16")] - fn _vhsubq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + fn _vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; } - _vhsubq_u16(a.as_signed(), b.as_signed()).as_unsigned() + _vhsubq_u16(a, b) } #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u32)"] @@ -11181,9 +11179,9 @@ pub unsafe fn vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { link_name = "llvm.aarch64.neon.uhsub.v2i32" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v2i32")] - fn _vhsub_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + fn _vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; } - _vhsub_u32(a.as_signed(), b.as_signed()).as_unsigned() + _vhsub_u32(a, b) } #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u32)"] @@ -11212,9 +11210,9 @@ pub unsafe fn vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { link_name = "llvm.aarch64.neon.uhsub.v4i32" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v4i32")] - fn _vhsubq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + fn _vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; } - _vhsubq_u32(a.as_signed(), b.as_signed()).as_unsigned() + _vhsubq_u32(a, b) } #[doc = "Load one single-element structure and replicate to all lanes of one register"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_f16)"] @@ -23501,9 +23499,9 @@ pub unsafe fn vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umax.v8i8" )] - fn _vmax_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + fn _vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; } - _vmax_u8(a.as_signed(), b.as_signed()).as_unsigned() + _vmax_u8(a, b) } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u8)"] @@ -23532,9 +23530,9 @@ pub unsafe fn vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umax.v16i8" )] - fn _vmaxq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + fn _vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; } - _vmaxq_u8(a.as_signed(), b.as_signed()).as_unsigned() + _vmaxq_u8(a, b) } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u16)"] @@ -23563,9 +23561,9 @@ pub unsafe fn vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umax.v4i16" )] - fn _vmax_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + fn _vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; } - _vmax_u16(a.as_signed(), b.as_signed()).as_unsigned() + _vmax_u16(a, b) } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u16)"] @@ -23594,9 +23592,9 @@ pub unsafe fn vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umax.v8i16" )] - fn _vmaxq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + fn _vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; } - _vmaxq_u16(a.as_signed(), b.as_signed()).as_unsigned() + _vmaxq_u16(a, b) } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u32)"] @@ -23625,9 +23623,9 @@ pub unsafe fn vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umax.v2i32" )] - fn _vmax_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + fn _vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; } - _vmax_u32(a.as_signed(), b.as_signed()).as_unsigned() + _vmax_u32(a, b) } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u32)"] @@ -23656,9 +23654,9 @@ pub unsafe fn vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umax.v4i32" )] - fn _vmaxq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + fn _vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; } - _vmaxq_u32(a.as_signed(), b.as_signed()).as_unsigned() + _vmaxq_u32(a, b) } #[doc = "Floating-point Maximum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f16)"] @@ -24093,9 +24091,9 @@ pub unsafe fn vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umin.v8i8" )] - fn _vmin_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + fn _vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; } - _vmin_u8(a.as_signed(), b.as_signed()).as_unsigned() + _vmin_u8(a, b) } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u8)"] @@ -24124,9 +24122,9 @@ pub unsafe fn vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umin.v16i8" )] - fn _vminq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + fn _vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; } - _vminq_u8(a.as_signed(), b.as_signed()).as_unsigned() + _vminq_u8(a, b) } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u16)"] @@ -24155,9 +24153,9 @@ pub unsafe fn vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umin.v4i16" )] - fn _vmin_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + fn _vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; } - _vmin_u16(a.as_signed(), b.as_signed()).as_unsigned() + _vmin_u16(a, b) } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u16)"] @@ -24186,9 +24184,9 @@ pub unsafe fn vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umin.v8i16" )] - fn _vminq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + fn _vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; } - _vminq_u16(a.as_signed(), b.as_signed()).as_unsigned() + _vminq_u16(a, b) } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u32)"] @@ -24217,9 +24215,9 @@ pub unsafe fn vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umin.v2i32" )] - fn _vmin_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + fn _vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; } - _vmin_u32(a.as_signed(), b.as_signed()).as_unsigned() + _vmin_u32(a, b) } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u32)"] @@ -24248,9 +24246,9 @@ pub unsafe fn vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.umin.v4i32" )] - fn _vminq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + fn _vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; } - _vminq_u32(a.as_signed(), b.as_signed()).as_unsigned() + _vminq_u32(a, b) } #[doc = "Floating-point Minimum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f16)"] @@ -27856,9 +27854,9 @@ pub unsafe fn vmmlaq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x link_name = "llvm.aarch64.neon.ummla.v4i32.v16i8" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.ummla.v4i32.v16i8")] - fn _vmmlaq_u32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; + fn _vmmlaq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t; } - _vmmlaq_u32(a.as_signed(), b.as_signed(), c.as_signed()).as_unsigned() + _vmmlaq_u32(a, b, c) } #[doc = "Duplicate element to vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_f16)"] @@ -29674,9 +29672,9 @@ pub unsafe fn vmull_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { link_name = "llvm.aarch64.neon.umull.v8i8" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullu.v8i8")] - fn _vmull_u8(a: int8x8_t, b: int8x8_t) -> int16x8_t; + fn _vmull_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t; } - _vmull_u8(a.as_signed(), b.as_signed()).as_unsigned() + _vmull_u8(a, b) } #[doc = "Unsigned multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u16)"] @@ -29705,9 +29703,9 @@ pub unsafe fn vmull_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { link_name = "llvm.aarch64.neon.umull.v4i16" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullu.v4i16")] - fn _vmull_u16(a: int16x4_t, b: int16x4_t) -> int32x4_t; + fn _vmull_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t; } - _vmull_u16(a.as_signed(), b.as_signed()).as_unsigned() + _vmull_u16(a, b) } #[doc = "Unsigned multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u32)"] @@ -29736,9 +29734,9 @@ pub unsafe fn vmull_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { link_name = "llvm.aarch64.neon.umull.v2i32" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullu.v2i32")] - fn _vmull_u32(a: int32x2_t, b: int32x2_t) -> int64x2_t; + fn _vmull_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t; } - _vmull_u32(a.as_signed(), b.as_signed()).as_unsigned() + _vmull_u32(a, b) } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f16)"] @@ -31222,9 +31220,9 @@ pub unsafe fn vpaddl_u8(a: uint8x8_t) -> uint16x4_t { link_name = "llvm.aarch64.neon.uaddlp.v4i16.v8i8" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v4i16.v8i8")] - fn _vpaddl_u8(a: int8x8_t) -> int16x4_t; + fn _vpaddl_u8(a: uint8x8_t) -> uint16x4_t; } - _vpaddl_u8(a.as_signed()).as_unsigned() + _vpaddl_u8(a) } #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_u8)"] @@ -31253,9 +31251,9 @@ pub unsafe fn vpaddlq_u8(a: uint8x16_t) -> uint16x8_t { link_name = "llvm.aarch64.neon.uaddlp.v8i16.v16i8" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v8i16.v16i8")] - fn _vpaddlq_u8(a: int8x16_t) -> int16x8_t; + fn _vpaddlq_u8(a: uint8x16_t) -> uint16x8_t; } - _vpaddlq_u8(a.as_signed()).as_unsigned() + _vpaddlq_u8(a) } #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_u16)"] @@ -31284,9 +31282,9 @@ pub unsafe fn vpaddl_u16(a: uint16x4_t) -> uint32x2_t { link_name = "llvm.aarch64.neon.uaddlp.v2i32.v4i16" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v2i32.v4i16")] - fn _vpaddl_u16(a: int16x4_t) -> int32x2_t; + fn _vpaddl_u16(a: uint16x4_t) -> uint32x2_t; } - _vpaddl_u16(a.as_signed()).as_unsigned() + _vpaddl_u16(a) } #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_u16)"] @@ -31315,9 +31313,9 @@ pub unsafe fn vpaddlq_u16(a: uint16x8_t) -> uint32x4_t { link_name = "llvm.aarch64.neon.uaddlp.v4i32.v8i16" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v4i32.v8i16")] - fn _vpaddlq_u16(a: int16x8_t) -> int32x4_t; + fn _vpaddlq_u16(a: uint16x8_t) -> uint32x4_t; } - _vpaddlq_u16(a.as_signed()).as_unsigned() + _vpaddlq_u16(a) } #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_u32)"] @@ -31346,9 +31344,9 @@ pub unsafe fn vpaddl_u32(a: uint32x2_t) -> uint64x1_t { link_name = "llvm.aarch64.neon.uaddlp.v1i64.v2i32" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v1i64.v2i32")] - fn _vpaddl_u32(a: int32x2_t) -> int64x1_t; + fn _vpaddl_u32(a: uint32x2_t) -> uint64x1_t; } - _vpaddl_u32(a.as_signed()).as_unsigned() + _vpaddl_u32(a) } #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_u32)"] @@ -31377,9 +31375,9 @@ pub unsafe fn vpaddlq_u32(a: uint32x4_t) -> uint64x2_t { link_name = "llvm.aarch64.neon.uaddlp.v2i64.v4i32" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v2i64.v4i32")] - fn _vpaddlq_u32(a: int32x4_t) -> int64x2_t; + fn _vpaddlq_u32(a: uint32x4_t) -> uint64x2_t; } - _vpaddlq_u32(a.as_signed()).as_unsigned() + _vpaddlq_u32(a) } #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_f32)"] @@ -31532,9 +31530,9 @@ pub unsafe fn vpmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { link_name = "llvm.aarch64.neon.umaxp.v8i8" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxu.v8i8")] - fn _vpmax_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + fn _vpmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; } - _vpmax_u8(a.as_signed(), b.as_signed()).as_unsigned() + _vpmax_u8(a, b) } #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u16)"] @@ -31563,9 +31561,9 @@ pub unsafe fn vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { link_name = "llvm.aarch64.neon.umaxp.v4i16" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxu.v4i16")] - fn _vpmax_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + fn _vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; } - _vpmax_u16(a.as_signed(), b.as_signed()).as_unsigned() + _vpmax_u16(a, b) } #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u32)"] @@ -31594,9 +31592,9 @@ pub unsafe fn vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { link_name = "llvm.aarch64.neon.umaxp.v2i32" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxu.v2i32")] - fn _vpmax_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + fn _vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; } - _vpmax_u32(a.as_signed(), b.as_signed()).as_unsigned() + _vpmax_u32(a, b) } #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_f32)"] @@ -31749,9 +31747,9 @@ pub unsafe fn vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { link_name = "llvm.aarch64.neon.uminp.v8i8" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpminu.v8i8")] - fn _vpmin_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + fn _vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; } - _vpmin_u8(a.as_signed(), b.as_signed()).as_unsigned() + _vpmin_u8(a, b) } #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u16)"] @@ -31780,9 +31778,9 @@ pub unsafe fn vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { link_name = "llvm.aarch64.neon.uminp.v4i16" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpminu.v4i16")] - fn _vpmin_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + fn _vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; } - _vpmin_u16(a.as_signed(), b.as_signed()).as_unsigned() + _vpmin_u16(a, b) } #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u32)"] @@ -31811,9 +31809,9 @@ pub unsafe fn vpmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { link_name = "llvm.aarch64.neon.uminp.v2i32" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpminu.v2i32")] - fn _vpmin_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + fn _vpmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; } - _vpmin_u32(a.as_signed(), b.as_signed()).as_unsigned() + _vpmin_u32(a, b) } #[doc = "Signed saturating Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s8)"] @@ -32276,9 +32274,9 @@ pub unsafe fn vqadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { link_name = "llvm.aarch64.neon.uqadd.v8i8" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v8i8")] - fn _vqadd_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + fn _vqadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; } - _vqadd_u8(a.as_signed(), b.as_signed()).as_unsigned() + _vqadd_u8(a, b) } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u8)"] @@ -32307,9 +32305,9 @@ pub unsafe fn vqaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { link_name = "llvm.aarch64.neon.uqadd.v16i8" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v16i8")] - fn _vqaddq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + fn _vqaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; } - _vqaddq_u8(a.as_signed(), b.as_signed()).as_unsigned() + _vqaddq_u8(a, b) } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u16)"] @@ -32338,9 +32336,9 @@ pub unsafe fn vqadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { link_name = "llvm.aarch64.neon.uqadd.v4i16" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v4i16")] - fn _vqadd_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + fn _vqadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; } - _vqadd_u16(a.as_signed(), b.as_signed()).as_unsigned() + _vqadd_u16(a, b) } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u16)"] @@ -32369,9 +32367,9 @@ pub unsafe fn vqaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { link_name = "llvm.aarch64.neon.uqadd.v8i16" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v8i16")] - fn _vqaddq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + fn _vqaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; } - _vqaddq_u16(a.as_signed(), b.as_signed()).as_unsigned() + _vqaddq_u16(a, b) } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u32)"] @@ -32400,9 +32398,9 @@ pub unsafe fn vqadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { link_name = "llvm.aarch64.neon.uqadd.v2i32" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v2i32")] - fn _vqadd_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + fn _vqadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; } - _vqadd_u32(a.as_signed(), b.as_signed()).as_unsigned() + _vqadd_u32(a, b) } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u32)"] @@ -32431,9 +32429,9 @@ pub unsafe fn vqaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { link_name = "llvm.aarch64.neon.uqadd.v4i32" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v4i32")] - fn _vqaddq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + fn _vqaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; } - _vqaddq_u32(a.as_signed(), b.as_signed()).as_unsigned() + _vqaddq_u32(a, b) } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u64)"] @@ -32462,9 +32460,9 @@ pub unsafe fn vqadd_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { link_name = "llvm.aarch64.neon.uqadd.v1i64" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v1i64")] - fn _vqadd_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + fn _vqadd_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t; } - _vqadd_u64(a.as_signed(), b.as_signed()).as_unsigned() + _vqadd_u64(a, b) } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u64)"] @@ -32493,9 +32491,9 @@ pub unsafe fn vqaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { link_name = "llvm.aarch64.neon.uqadd.v2i64" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v2i64")] - fn _vqaddq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + fn _vqaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; } - _vqaddq_u64(a.as_signed(), b.as_signed()).as_unsigned() + _vqaddq_u64(a, b) } #[doc = "Vector widening saturating doubling multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_lane_s16)"] @@ -33397,9 +33395,9 @@ pub unsafe fn vqmovn_u16(a: uint16x8_t) -> uint8x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqxtn.v8i8" )] - fn _vqmovn_u16(a: int16x8_t) -> int8x8_t; + fn _vqmovn_u16(a: uint16x8_t) -> uint8x8_t; } - _vqmovn_u16(a.as_signed()).as_unsigned() + _vqmovn_u16(a) } #[doc = "Unsigned saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u32)"] @@ -33428,9 +33426,9 @@ pub unsafe fn vqmovn_u32(a: uint32x4_t) -> uint16x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqxtn.v4i16" )] - fn _vqmovn_u32(a: int32x4_t) -> int16x4_t; + fn _vqmovn_u32(a: uint32x4_t) -> uint16x4_t; } - _vqmovn_u32(a.as_signed()).as_unsigned() + _vqmovn_u32(a) } #[doc = "Unsigned saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u64)"] @@ -33459,9 +33457,9 @@ pub unsafe fn vqmovn_u64(a: uint64x2_t) -> uint32x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqxtn.v2i32" )] - fn _vqmovn_u64(a: int64x2_t) -> int32x2_t; + fn _vqmovn_u64(a: uint64x2_t) -> uint32x2_t; } - _vqmovn_u64(a.as_signed()).as_unsigned() + _vqmovn_u64(a) } #[doc = "Signed saturating extract unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s16)"] @@ -33490,9 +33488,9 @@ pub unsafe fn vqmovun_s16(a: int16x8_t) -> uint8x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqxtun.v8i8" )] - fn _vqmovun_s16(a: int16x8_t) -> int8x8_t; + fn _vqmovun_s16(a: int16x8_t) -> uint8x8_t; } - _vqmovun_s16(a).as_unsigned() + _vqmovun_s16(a) } #[doc = "Signed saturating extract unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s32)"] @@ -33521,9 +33519,9 @@ pub unsafe fn vqmovun_s32(a: int32x4_t) -> uint16x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqxtun.v4i16" )] - fn _vqmovun_s32(a: int32x4_t) -> int16x4_t; + fn _vqmovun_s32(a: int32x4_t) -> uint16x4_t; } - _vqmovun_s32(a).as_unsigned() + _vqmovun_s32(a) } #[doc = "Signed saturating extract unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s64)"] @@ -33552,9 +33550,9 @@ pub unsafe fn vqmovun_s64(a: int64x2_t) -> uint32x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqxtun.v2i32" )] - fn _vqmovun_s64(a: int64x2_t) -> int32x2_t; + fn _vqmovun_s64(a: int64x2_t) -> uint32x2_t; } - _vqmovun_s64(a).as_unsigned() + _vqmovun_s64(a) } #[doc = "Signed saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s8)"] @@ -34467,9 +34465,9 @@ pub unsafe fn vqrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqrshl.v8i8" )] - fn _vqrshl_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + fn _vqrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t; } - _vqrshl_u8(a.as_signed(), b).as_unsigned() + _vqrshl_u8(a, b) } #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u8)"] @@ -34498,9 +34496,9 @@ pub unsafe fn vqrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqrshl.v16i8" )] - fn _vqrshlq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + fn _vqrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t; } - _vqrshlq_u8(a.as_signed(), b).as_unsigned() + _vqrshlq_u8(a, b) } #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u16)"] @@ -34529,9 +34527,9 @@ pub unsafe fn vqrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqrshl.v4i16" )] - fn _vqrshl_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + fn _vqrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t; } - _vqrshl_u16(a.as_signed(), b).as_unsigned() + _vqrshl_u16(a, b) } #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u16)"] @@ -34560,9 +34558,9 @@ pub unsafe fn vqrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqrshl.v8i16" )] - fn _vqrshlq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + fn _vqrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t; } - _vqrshlq_u16(a.as_signed(), b).as_unsigned() + _vqrshlq_u16(a, b) } #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u32)"] @@ -34591,9 +34589,9 @@ pub unsafe fn vqrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqrshl.v2i32" )] - fn _vqrshl_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + fn _vqrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t; } - _vqrshl_u32(a.as_signed(), b).as_unsigned() + _vqrshl_u32(a, b) } #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u32)"] @@ -34622,9 +34620,9 @@ pub unsafe fn vqrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqrshl.v4i32" )] - fn _vqrshlq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + fn _vqrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t; } - _vqrshlq_u32(a.as_signed(), b).as_unsigned() + _vqrshlq_u32(a, b) } #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u64)"] @@ -34653,9 +34651,9 @@ pub unsafe fn vqrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqrshl.v1i64" )] - fn _vqrshl_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + fn _vqrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t; } - _vqrshl_u64(a.as_signed(), b).as_unsigned() + _vqrshl_u64(a, b) } #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u64)"] @@ -34684,9 +34682,9 @@ pub unsafe fn vqrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqrshl.v2i64" )] - fn _vqrshlq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + fn _vqrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t; } - _vqrshlq_u64(a.as_signed(), b).as_unsigned() + _vqrshlq_u64(a, b) } #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s16)"] @@ -34830,19 +34828,17 @@ pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v8i8")] - fn _vqrshrn_n_u16(a: int16x8_t, n: int16x8_t) -> int8x8_t; + fn _vqrshrn_n_u16(a: uint16x8_t, n: uint16x8_t) -> uint8x8_t; } _vqrshrn_n_u16( - a.as_signed(), + a, const { uint16x8_t([ -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, ]) - } - .as_signed(), + }, ) - .as_unsigned() } #[doc = "Unsigned signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u32)"] @@ -34858,13 +34854,12 @@ pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v4i16")] - fn _vqrshrn_n_u32(a: int32x4_t, n: int32x4_t) -> int16x4_t; + fn _vqrshrn_n_u32(a: uint32x4_t, n: uint32x4_t) -> uint16x4_t; } _vqrshrn_n_u32( - a.as_signed(), - const { uint32x4_t([-N as u32, -N as u32, -N as u32, -N as u32]) }.as_signed(), + a, + const { uint32x4_t([-N as u32, -N as u32, -N as u32, -N as u32]) }, ) - .as_unsigned() } #[doc = "Unsigned signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u64)"] @@ -34880,13 +34875,9 @@ pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v2i32")] - fn _vqrshrn_n_u64(a: int64x2_t, n: int64x2_t) -> int32x2_t; + fn _vqrshrn_n_u64(a: uint64x2_t, n: uint64x2_t) -> uint32x2_t; } - _vqrshrn_n_u64( - a.as_signed(), - const { uint64x2_t([-N as u64, -N as u64]) }.as_signed(), - ) - .as_unsigned() + _vqrshrn_n_u64(a, const { uint64x2_t([-N as u64, -N as u64]) }) } #[doc = "Unsigned signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u16)"] @@ -34905,9 +34896,9 @@ pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqrshrn.v8i8" )] - fn _vqrshrn_n_u16(a: int16x8_t, n: i32) -> int8x8_t; + fn _vqrshrn_n_u16(a: uint16x8_t, n: i32) -> uint8x8_t; } - _vqrshrn_n_u16(a.as_signed(), N).as_unsigned() + _vqrshrn_n_u16(a, N) } #[doc = "Unsigned signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u32)"] @@ -34926,9 +34917,9 @@ pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqrshrn.v4i16" )] - fn _vqrshrn_n_u32(a: int32x4_t, n: i32) -> int16x4_t; + fn _vqrshrn_n_u32(a: uint32x4_t, n: i32) -> uint16x4_t; } - _vqrshrn_n_u32(a.as_signed(), N).as_unsigned() + _vqrshrn_n_u32(a, N) } #[doc = "Unsigned signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u64)"] @@ -34947,9 +34938,9 @@ pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqrshrn.v2i32" )] - fn _vqrshrn_n_u64(a: int64x2_t, n: i32) -> int32x2_t; + fn _vqrshrn_n_u64(a: uint64x2_t, n: i32) -> uint32x2_t; } - _vqrshrn_n_u64(a.as_signed(), N).as_unsigned() + _vqrshrn_n_u64(a, N) } #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s16)"] @@ -34965,7 +34956,7 @@ pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v8i8")] - fn _vqrshrun_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; + fn _vqrshrun_n_s16(a: int16x8_t, n: int16x8_t) -> uint8x8_t; } _vqrshrun_n_s16( a, @@ -34976,7 +34967,6 @@ pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { ]) }, ) - .as_unsigned() } #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s32)"] @@ -34992,13 +34982,12 @@ pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v4i16")] - fn _vqrshrun_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; + fn _vqrshrun_n_s32(a: int32x4_t, n: int32x4_t) -> uint16x4_t; } _vqrshrun_n_s32( a, const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, ) - .as_unsigned() } #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s64)"] @@ -35014,9 +35003,9 @@ pub unsafe fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v2i32")] - fn _vqrshrun_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; + fn _vqrshrun_n_s64(a: int64x2_t, n: int64x2_t) -> uint32x2_t; } - _vqrshrun_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }).as_unsigned() + _vqrshrun_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }) } #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s16)"] @@ -35035,9 +35024,9 @@ pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrshrun.v8i8" )] - fn _vqrshrun_n_s16(a: int16x8_t, n: i32) -> int8x8_t; + fn _vqrshrun_n_s16(a: int16x8_t, n: i32) -> uint8x8_t; } - _vqrshrun_n_s16(a, N).as_unsigned() + _vqrshrun_n_s16(a, N) } #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s32)"] @@ -35056,9 +35045,9 @@ pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrshrun.v4i16" )] - fn _vqrshrun_n_s32(a: int32x4_t, n: i32) -> int16x4_t; + fn _vqrshrun_n_s32(a: int32x4_t, n: i32) -> uint16x4_t; } - _vqrshrun_n_s32(a, N).as_unsigned() + _vqrshrun_n_s32(a, N) } #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s64)"] @@ -35077,9 +35066,9 @@ pub unsafe fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqrshrun.v2i32" )] - fn _vqrshrun_n_s64(a: int64x2_t, n: i32) -> int32x2_t; + fn _vqrshrun_n_s64(a: int64x2_t, n: i32) -> uint32x2_t; } - _vqrshrun_n_s64(a, N).as_unsigned() + _vqrshrun_n_s64(a, N) } #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s8)"] @@ -35756,9 +35745,9 @@ pub unsafe fn vqshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqshl.v8i8" )] - fn _vqshl_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + fn _vqshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t; } - _vqshl_u8(a.as_signed(), b).as_unsigned() + _vqshl_u8(a, b) } #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u8)"] @@ -35787,9 +35776,9 @@ pub unsafe fn vqshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqshl.v16i8" )] - fn _vqshlq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + fn _vqshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t; } - _vqshlq_u8(a.as_signed(), b).as_unsigned() + _vqshlq_u8(a, b) } #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u16)"] @@ -35818,9 +35807,9 @@ pub unsafe fn vqshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqshl.v4i16" )] - fn _vqshl_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + fn _vqshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t; } - _vqshl_u16(a.as_signed(), b).as_unsigned() + _vqshl_u16(a, b) } #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u16)"] @@ -35849,9 +35838,9 @@ pub unsafe fn vqshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqshl.v8i16" )] - fn _vqshlq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + fn _vqshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t; } - _vqshlq_u16(a.as_signed(), b).as_unsigned() + _vqshlq_u16(a, b) } #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u32)"] @@ -35880,9 +35869,9 @@ pub unsafe fn vqshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqshl.v2i32" )] - fn _vqshl_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + fn _vqshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t; } - _vqshl_u32(a.as_signed(), b).as_unsigned() + _vqshl_u32(a, b) } #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u32)"] @@ -35911,9 +35900,9 @@ pub unsafe fn vqshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqshl.v4i32" )] - fn _vqshlq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + fn _vqshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t; } - _vqshlq_u32(a.as_signed(), b).as_unsigned() + _vqshlq_u32(a, b) } #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u64)"] @@ -35942,9 +35931,9 @@ pub unsafe fn vqshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqshl.v1i64" )] - fn _vqshl_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + fn _vqshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t; } - _vqshl_u64(a.as_signed(), b).as_unsigned() + _vqshl_u64(a, b) } #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u64)"] @@ -35973,9 +35962,9 @@ pub unsafe fn vqshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqshl.v2i64" )] - fn _vqshlq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + fn _vqshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t; } - _vqshlq_u64(a.as_signed(), b).as_unsigned() + _vqshlq_u64(a, b) } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s8)"] @@ -35991,7 +35980,7 @@ pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { static_assert_uimm_bits!(N, 3); unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i8")] - fn _vqshlu_n_s8(a: int8x8_t, n: int8x8_t) -> int8x8_t; + fn _vqshlu_n_s8(a: int8x8_t, n: int8x8_t) -> uint8x8_t; } _vqshlu_n_s8( a, @@ -36001,7 +35990,6 @@ pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { ]) }, ) - .as_unsigned() } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s8)"] @@ -36017,7 +36005,7 @@ pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { static_assert_uimm_bits!(N, 3); unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v16i8")] - fn _vqshluq_n_s8(a: int8x16_t, n: int8x16_t) -> int8x16_t; + fn _vqshluq_n_s8(a: int8x16_t, n: int8x16_t) -> uint8x16_t; } _vqshluq_n_s8( a, @@ -36028,7 +36016,6 @@ pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { ]) }, ) - .as_unsigned() } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s16)"] @@ -36044,13 +36031,12 @@ pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { static_assert_uimm_bits!(N, 4); unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i16")] - fn _vqshlu_n_s16(a: int16x4_t, n: int16x4_t) -> int16x4_t; + fn _vqshlu_n_s16(a: int16x4_t, n: int16x4_t) -> uint16x4_t; } _vqshlu_n_s16( a, const { int16x4_t([N as i16, N as i16, N as i16, N as i16]) }, ) - .as_unsigned() } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s16)"] @@ -36066,7 +36052,7 @@ pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { static_assert_uimm_bits!(N, 4); unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i16")] - fn _vqshluq_n_s16(a: int16x8_t, n: int16x8_t) -> int16x8_t; + fn _vqshluq_n_s16(a: int16x8_t, n: int16x8_t) -> uint16x8_t; } _vqshluq_n_s16( a, @@ -36076,7 +36062,6 @@ pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { ]) }, ) - .as_unsigned() } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s32)"] @@ -36092,9 +36077,9 @@ pub unsafe fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { static_assert_uimm_bits!(N, 5); unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i32")] - fn _vqshlu_n_s32(a: int32x2_t, n: int32x2_t) -> int32x2_t; + fn _vqshlu_n_s32(a: int32x2_t, n: int32x2_t) -> uint32x2_t; } - _vqshlu_n_s32(a, const { int32x2_t([N as i32, N as i32]) }).as_unsigned() + _vqshlu_n_s32(a, const { int32x2_t([N as i32, N as i32]) }) } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s32)"] @@ -36110,13 +36095,12 @@ pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { static_assert_uimm_bits!(N, 5); unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i32")] - fn _vqshluq_n_s32(a: int32x4_t, n: int32x4_t) -> int32x4_t; + fn _vqshluq_n_s32(a: int32x4_t, n: int32x4_t) -> uint32x4_t; } _vqshluq_n_s32( a, const { int32x4_t([N as i32, N as i32, N as i32, N as i32]) }, ) - .as_unsigned() } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s64)"] @@ -36132,9 +36116,9 @@ pub unsafe fn vqshlu_n_s64(a: int64x1_t) -> uint64x1_t { static_assert_uimm_bits!(N, 6); unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v1i64")] - fn _vqshlu_n_s64(a: int64x1_t, n: int64x1_t) -> int64x1_t; + fn _vqshlu_n_s64(a: int64x1_t, n: int64x1_t) -> uint64x1_t; } - _vqshlu_n_s64(a, const { int64x1_t([N as i64]) }).as_unsigned() + _vqshlu_n_s64(a, const { int64x1_t([N as i64]) }) } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s64)"] @@ -36150,9 +36134,9 @@ pub unsafe fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { static_assert_uimm_bits!(N, 6); unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i64")] - fn _vqshluq_n_s64(a: int64x2_t, n: int64x2_t) -> int64x2_t; + fn _vqshluq_n_s64(a: int64x2_t, n: int64x2_t) -> uint64x2_t; } - _vqshluq_n_s64(a, const { int64x2_t([N as i64, N as i64]) }).as_unsigned() + _vqshluq_n_s64(a, const { int64x2_t([N as i64, N as i64]) }) } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s8)"] @@ -36171,7 +36155,7 @@ pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v8i8" )] - fn _vqshlu_n_s8(a: int8x8_t, n: int8x8_t) -> int8x8_t; + fn _vqshlu_n_s8(a: int8x8_t, n: int8x8_t) -> uint8x8_t; } _vqshlu_n_s8( a, @@ -36181,7 +36165,6 @@ pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { ]) }, ) - .as_unsigned() } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s8)"] @@ -36200,7 +36183,7 @@ pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v16i8" )] - fn _vqshluq_n_s8(a: int8x16_t, n: int8x16_t) -> int8x16_t; + fn _vqshluq_n_s8(a: int8x16_t, n: int8x16_t) -> uint8x16_t; } _vqshluq_n_s8( a, @@ -36211,7 +36194,6 @@ pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { ]) }, ) - .as_unsigned() } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s16)"] @@ -36230,13 +36212,12 @@ pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v4i16" )] - fn _vqshlu_n_s16(a: int16x4_t, n: int16x4_t) -> int16x4_t; + fn _vqshlu_n_s16(a: int16x4_t, n: int16x4_t) -> uint16x4_t; } _vqshlu_n_s16( a, const { int16x4_t([N as i16, N as i16, N as i16, N as i16]) }, ) - .as_unsigned() } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s16)"] @@ -36255,7 +36236,7 @@ pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v8i16" )] - fn _vqshluq_n_s16(a: int16x8_t, n: int16x8_t) -> int16x8_t; + fn _vqshluq_n_s16(a: int16x8_t, n: int16x8_t) -> uint16x8_t; } _vqshluq_n_s16( a, @@ -36265,7 +36246,6 @@ pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { ]) }, ) - .as_unsigned() } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s32)"] @@ -36284,9 +36264,9 @@ pub unsafe fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v2i32" )] - fn _vqshlu_n_s32(a: int32x2_t, n: int32x2_t) -> int32x2_t; + fn _vqshlu_n_s32(a: int32x2_t, n: int32x2_t) -> uint32x2_t; } - _vqshlu_n_s32(a, const { int32x2_t([N as i32, N as i32]) }).as_unsigned() + _vqshlu_n_s32(a, const { int32x2_t([N as i32, N as i32]) }) } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s32)"] @@ -36305,13 +36285,12 @@ pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v4i32" )] - fn _vqshluq_n_s32(a: int32x4_t, n: int32x4_t) -> int32x4_t; + fn _vqshluq_n_s32(a: int32x4_t, n: int32x4_t) -> uint32x4_t; } _vqshluq_n_s32( a, const { int32x4_t([N as i32, N as i32, N as i32, N as i32]) }, ) - .as_unsigned() } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s64)"] @@ -36330,9 +36309,9 @@ pub unsafe fn vqshlu_n_s64(a: int64x1_t) -> uint64x1_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v1i64" )] - fn _vqshlu_n_s64(a: int64x1_t, n: int64x1_t) -> int64x1_t; + fn _vqshlu_n_s64(a: int64x1_t, n: int64x1_t) -> uint64x1_t; } - _vqshlu_n_s64(a, const { int64x1_t([N as i64]) }).as_unsigned() + _vqshlu_n_s64(a, const { int64x1_t([N as i64]) }) } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s64)"] @@ -36351,9 +36330,9 @@ pub unsafe fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v2i64" )] - fn _vqshluq_n_s64(a: int64x2_t, n: int64x2_t) -> int64x2_t; + fn _vqshluq_n_s64(a: int64x2_t, n: int64x2_t) -> uint64x2_t; } - _vqshluq_n_s64(a, const { int64x2_t([N as i64, N as i64]) }).as_unsigned() + _vqshluq_n_s64(a, const { int64x2_t([N as i64, N as i64]) }) } #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)"] @@ -36497,19 +36476,17 @@ pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v8i8")] - fn _vqshrn_n_u16(a: int16x8_t, n: int16x8_t) -> int8x8_t; + fn _vqshrn_n_u16(a: uint16x8_t, n: uint16x8_t) -> uint8x8_t; } _vqshrn_n_u16( - a.as_signed(), + a, const { uint16x8_t([ -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, ]) - } - .as_signed(), + }, ) - .as_unsigned() } #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u32)"] @@ -36525,13 +36502,12 @@ pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v4i16")] - fn _vqshrn_n_u32(a: int32x4_t, n: int32x4_t) -> int16x4_t; + fn _vqshrn_n_u32(a: uint32x4_t, n: uint32x4_t) -> uint16x4_t; } _vqshrn_n_u32( - a.as_signed(), - const { uint32x4_t([-N as u32, -N as u32, -N as u32, -N as u32]) }.as_signed(), + a, + const { uint32x4_t([-N as u32, -N as u32, -N as u32, -N as u32]) }, ) - .as_unsigned() } #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u64)"] @@ -36547,13 +36523,9 @@ pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v2i32")] - fn _vqshrn_n_u64(a: int64x2_t, n: int64x2_t) -> int32x2_t; + fn _vqshrn_n_u64(a: uint64x2_t, n: uint64x2_t) -> uint32x2_t; } - _vqshrn_n_u64( - a.as_signed(), - const { uint64x2_t([-N as u64, -N as u64]) }.as_signed(), - ) - .as_unsigned() + _vqshrn_n_u64(a, const { uint64x2_t([-N as u64, -N as u64]) }) } #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u16)"] @@ -36572,9 +36544,9 @@ pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqshrn.v8i8" )] - fn _vqshrn_n_u16(a: int16x8_t, n: i32) -> int8x8_t; + fn _vqshrn_n_u16(a: uint16x8_t, n: i32) -> uint8x8_t; } - _vqshrn_n_u16(a.as_signed(), N).as_unsigned() + _vqshrn_n_u16(a, N) } #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u32)"] @@ -36593,9 +36565,9 @@ pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqshrn.v4i16" )] - fn _vqshrn_n_u32(a: int32x4_t, n: i32) -> int16x4_t; + fn _vqshrn_n_u32(a: uint32x4_t, n: i32) -> uint16x4_t; } - _vqshrn_n_u32(a.as_signed(), N).as_unsigned() + _vqshrn_n_u32(a, N) } #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u64)"] @@ -36614,9 +36586,9 @@ pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.uqshrn.v2i32" )] - fn _vqshrn_n_u64(a: int64x2_t, n: i32) -> int32x2_t; + fn _vqshrn_n_u64(a: uint64x2_t, n: i32) -> uint32x2_t; } - _vqshrn_n_u64(a.as_signed(), N).as_unsigned() + _vqshrn_n_u64(a, N) } #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)"] @@ -36632,7 +36604,7 @@ pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v8i8")] - fn _vqshrun_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; + fn _vqshrun_n_s16(a: int16x8_t, n: int16x8_t) -> uint8x8_t; } _vqshrun_n_s16( a, @@ -36643,7 +36615,6 @@ pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { ]) }, ) - .as_unsigned() } #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)"] @@ -36659,13 +36630,12 @@ pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v4i16")] - fn _vqshrun_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; + fn _vqshrun_n_s32(a: int32x4_t, n: int32x4_t) -> uint16x4_t; } _vqshrun_n_s32( a, const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, ) - .as_unsigned() } #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)"] @@ -36681,9 +36651,9 @@ pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v2i32")] - fn _vqshrun_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; + fn _vqshrun_n_s64(a: int64x2_t, n: int64x2_t) -> uint32x2_t; } - _vqshrun_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }).as_unsigned() + _vqshrun_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }) } #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)"] @@ -36702,9 +36672,9 @@ pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshrun.v8i8" )] - fn _vqshrun_n_s16(a: int16x8_t, n: i32) -> int8x8_t; + fn _vqshrun_n_s16(a: int16x8_t, n: i32) -> uint8x8_t; } - _vqshrun_n_s16(a, N).as_unsigned() + _vqshrun_n_s16(a, N) } #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)"] @@ -36723,9 +36693,9 @@ pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshrun.v4i16" )] - fn _vqshrun_n_s32(a: int32x4_t, n: i32) -> int16x4_t; + fn _vqshrun_n_s32(a: int32x4_t, n: i32) -> uint16x4_t; } - _vqshrun_n_s32(a, N).as_unsigned() + _vqshrun_n_s32(a, N) } #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)"] @@ -36744,9 +36714,9 @@ pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshrun.v2i32" )] - fn _vqshrun_n_s64(a: int64x2_t, n: i32) -> int32x2_t; + fn _vqshrun_n_s64(a: int64x2_t, n: i32) -> uint32x2_t; } - _vqshrun_n_s64(a, N).as_unsigned() + _vqshrun_n_s64(a, N) } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s8)"] @@ -37023,9 +36993,9 @@ pub unsafe fn vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { link_name = "llvm.aarch64.neon.uqsub.v8i8" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v8i8")] - fn _vqsub_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + fn _vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; } - _vqsub_u8(a.as_signed(), b.as_signed()).as_unsigned() + _vqsub_u8(a, b) } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u8)"] @@ -37054,9 +37024,9 @@ pub unsafe fn vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { link_name = "llvm.aarch64.neon.uqsub.v16i8" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v16i8")] - fn _vqsubq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + fn _vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; } - _vqsubq_u8(a.as_signed(), b.as_signed()).as_unsigned() + _vqsubq_u8(a, b) } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u16)"] @@ -37085,9 +37055,9 @@ pub unsafe fn vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { link_name = "llvm.aarch64.neon.uqsub.v4i16" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v4i16")] - fn _vqsub_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + fn _vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; } - _vqsub_u16(a.as_signed(), b.as_signed()).as_unsigned() + _vqsub_u16(a, b) } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u16)"] @@ -37116,9 +37086,9 @@ pub unsafe fn vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { link_name = "llvm.aarch64.neon.uqsub.v8i16" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v8i16")] - fn _vqsubq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + fn _vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; } - _vqsubq_u16(a.as_signed(), b.as_signed()).as_unsigned() + _vqsubq_u16(a, b) } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u32)"] @@ -37147,9 +37117,9 @@ pub unsafe fn vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { link_name = "llvm.aarch64.neon.uqsub.v2i32" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v2i32")] - fn _vqsub_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + fn _vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; } - _vqsub_u32(a.as_signed(), b.as_signed()).as_unsigned() + _vqsub_u32(a, b) } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u32)"] @@ -37178,9 +37148,9 @@ pub unsafe fn vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { link_name = "llvm.aarch64.neon.uqsub.v4i32" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v4i32")] - fn _vqsubq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + fn _vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; } - _vqsubq_u32(a.as_signed(), b.as_signed()).as_unsigned() + _vqsubq_u32(a, b) } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u64)"] @@ -37209,9 +37179,9 @@ pub unsafe fn vqsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { link_name = "llvm.aarch64.neon.uqsub.v1i64" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v1i64")] - fn _vqsub_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + fn _vqsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t; } - _vqsub_u64(a.as_signed(), b.as_signed()).as_unsigned() + _vqsub_u64(a, b) } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u64)"] @@ -37240,9 +37210,9 @@ pub unsafe fn vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { link_name = "llvm.aarch64.neon.uqsub.v2i64" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v2i64")] - fn _vqsubq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + fn _vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; } - _vqsubq_u64(a.as_signed(), b.as_signed()).as_unsigned() + _vqsubq_u64(a, b) } #[doc = "Rounding Add returning High Narrow (high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s16)"] @@ -37771,9 +37741,9 @@ pub unsafe fn vrecpe_u32(a: uint32x2_t) -> uint32x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.urecpe.v2i32" )] - fn _vrecpe_u32(a: int32x2_t) -> int32x2_t; + fn _vrecpe_u32(a: uint32x2_t) -> uint32x2_t; } - _vrecpe_u32(a.as_signed()).as_unsigned() + _vrecpe_u32(a) } #[doc = "Unsigned reciprocal estimate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_u32)"] @@ -37802,9 +37772,9 @@ pub unsafe fn vrecpeq_u32(a: uint32x4_t) -> uint32x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.urecpe.v4i32" )] - fn _vrecpeq_u32(a: int32x4_t) -> int32x4_t; + fn _vrecpeq_u32(a: uint32x4_t) -> uint32x4_t; } - _vrecpeq_u32(a.as_signed()).as_unsigned() + _vrecpeq_u32(a) } #[doc = "Floating-point reciprocal step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f16)"] @@ -53779,9 +53749,9 @@ pub unsafe fn vrhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { link_name = "llvm.aarch64.neon.urhadd.v8i8" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v8i8")] - fn _vrhadd_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + fn _vrhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; } - _vrhadd_u8(a.as_signed(), b.as_signed()).as_unsigned() + _vrhadd_u8(a, b) } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u8)"] @@ -53810,9 +53780,9 @@ pub unsafe fn vrhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { link_name = "llvm.aarch64.neon.urhadd.v16i8" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v16i8")] - fn _vrhaddq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + fn _vrhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; } - _vrhaddq_u8(a.as_signed(), b.as_signed()).as_unsigned() + _vrhaddq_u8(a, b) } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u16)"] @@ -53841,9 +53811,9 @@ pub unsafe fn vrhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { link_name = "llvm.aarch64.neon.urhadd.v4i16" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v4i16")] - fn _vrhadd_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + fn _vrhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; } - _vrhadd_u16(a.as_signed(), b.as_signed()).as_unsigned() + _vrhadd_u16(a, b) } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u16)"] @@ -53872,9 +53842,9 @@ pub unsafe fn vrhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { link_name = "llvm.aarch64.neon.urhadd.v8i16" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v8i16")] - fn _vrhaddq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + fn _vrhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; } - _vrhaddq_u16(a.as_signed(), b.as_signed()).as_unsigned() + _vrhaddq_u16(a, b) } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u32)"] @@ -53903,9 +53873,9 @@ pub unsafe fn vrhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { link_name = "llvm.aarch64.neon.urhadd.v2i32" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v2i32")] - fn _vrhadd_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + fn _vrhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; } - _vrhadd_u32(a.as_signed(), b.as_signed()).as_unsigned() + _vrhadd_u32(a, b) } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u32)"] @@ -53934,9 +53904,9 @@ pub unsafe fn vrhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { link_name = "llvm.aarch64.neon.urhadd.v4i32" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v4i32")] - fn _vrhaddq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + fn _vrhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; } - _vrhaddq_u32(a.as_signed(), b.as_signed()).as_unsigned() + _vrhaddq_u32(a, b) } #[doc = "Floating-point round to integral, to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f16)"] @@ -54323,9 +54293,9 @@ pub unsafe fn vrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.urshl.v8i8" )] - fn _vrshl_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + fn _vrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t; } - _vrshl_u8(a.as_signed(), b).as_unsigned() + _vrshl_u8(a, b) } #[doc = "Unsigned rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u8)"] @@ -54354,9 +54324,9 @@ pub unsafe fn vrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.urshl.v16i8" )] - fn _vrshlq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + fn _vrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t; } - _vrshlq_u8(a.as_signed(), b).as_unsigned() + _vrshlq_u8(a, b) } #[doc = "Unsigned rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u16)"] @@ -54385,9 +54355,9 @@ pub unsafe fn vrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.urshl.v4i16" )] - fn _vrshl_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + fn _vrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t; } - _vrshl_u16(a.as_signed(), b).as_unsigned() + _vrshl_u16(a, b) } #[doc = "Unsigned rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u16)"] @@ -54416,9 +54386,9 @@ pub unsafe fn vrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.urshl.v8i16" )] - fn _vrshlq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + fn _vrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t; } - _vrshlq_u16(a.as_signed(), b).as_unsigned() + _vrshlq_u16(a, b) } #[doc = "Unsigned rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u32)"] @@ -54447,9 +54417,9 @@ pub unsafe fn vrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.urshl.v2i32" )] - fn _vrshl_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + fn _vrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t; } - _vrshl_u32(a.as_signed(), b).as_unsigned() + _vrshl_u32(a, b) } #[doc = "Unsigned rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u32)"] @@ -54478,9 +54448,9 @@ pub unsafe fn vrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.urshl.v4i32" )] - fn _vrshlq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + fn _vrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t; } - _vrshlq_u32(a.as_signed(), b).as_unsigned() + _vrshlq_u32(a, b) } #[doc = "Unsigned rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u64)"] @@ -54509,9 +54479,9 @@ pub unsafe fn vrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.urshl.v1i64" )] - fn _vrshl_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + fn _vrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t; } - _vrshl_u64(a.as_signed(), b).as_unsigned() + _vrshl_u64(a, b) } #[doc = "Unsigned rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u64)"] @@ -54540,9 +54510,9 @@ pub unsafe fn vrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.urshl.v2i64" )] - fn _vrshlq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + fn _vrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t; } - _vrshlq_u64(a.as_signed(), b).as_unsigned() + _vrshlq_u64(a, b) } #[doc = "Signed rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s8)"] @@ -55284,9 +55254,9 @@ pub unsafe fn vrsqrte_u32(a: uint32x2_t) -> uint32x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ursqrte.v2i32" )] - fn _vrsqrte_u32(a: int32x2_t) -> int32x2_t; + fn _vrsqrte_u32(a: uint32x2_t) -> uint32x2_t; } - _vrsqrte_u32(a.as_signed()).as_unsigned() + _vrsqrte_u32(a) } #[doc = "Unsigned reciprocal square root estimate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_u32)"] @@ -55315,9 +55285,9 @@ pub unsafe fn vrsqrteq_u32(a: uint32x4_t) -> uint32x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ursqrte.v4i32" )] - fn _vrsqrteq_u32(a: int32x4_t) -> int32x4_t; + fn _vrsqrteq_u32(a: uint32x4_t) -> uint32x4_t; } - _vrsqrteq_u32(a.as_signed()).as_unsigned() + _vrsqrteq_u32(a) } #[doc = "Floating-point reciprocal square root step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f16)"] @@ -56734,9 +56704,9 @@ pub unsafe fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> link_name = "llvm.aarch64.crypto.sha1c" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1c")] - fn _vsha1cq_u32(hash_abcd: int32x4_t, hash_e: i32, wk: int32x4_t) -> int32x4_t; + fn _vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t; } - _vsha1cq_u32(hash_abcd.as_signed(), hash_e.as_signed(), wk.as_signed()).as_unsigned() + _vsha1cq_u32(hash_abcd, hash_e, wk) } #[doc = "SHA1 fixed rotate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1h_u32)"] @@ -56761,9 +56731,9 @@ pub unsafe fn vsha1h_u32(hash_e: u32) -> u32 { link_name = "llvm.aarch64.crypto.sha1h" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1h")] - fn _vsha1h_u32(hash_e: i32) -> i32; + fn _vsha1h_u32(hash_e: u32) -> u32; } - _vsha1h_u32(hash_e.as_signed()).as_unsigned() + _vsha1h_u32(hash_e) } #[doc = "SHA1 hash update accelerator, majority"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1mq_u32)"] @@ -56788,9 +56758,9 @@ pub unsafe fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> link_name = "llvm.aarch64.crypto.sha1m" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1m")] - fn _vsha1mq_u32(hash_abcd: int32x4_t, hash_e: i32, wk: int32x4_t) -> int32x4_t; + fn _vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t; } - _vsha1mq_u32(hash_abcd.as_signed(), hash_e.as_signed(), wk.as_signed()).as_unsigned() + _vsha1mq_u32(hash_abcd, hash_e, wk) } #[doc = "SHA1 hash update accelerator, parity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1pq_u32)"] @@ -56815,9 +56785,9 @@ pub unsafe fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> link_name = "llvm.aarch64.crypto.sha1p" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1p")] - fn _vsha1pq_u32(hash_abcd: int32x4_t, hash_e: i32, wk: int32x4_t) -> int32x4_t; + fn _vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t; } - _vsha1pq_u32(hash_abcd.as_signed(), hash_e.as_signed(), wk.as_signed()).as_unsigned() + _vsha1pq_u32(hash_abcd, hash_e, wk) } #[doc = "SHA1 schedule update accelerator, first part."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1su0q_u32)"] @@ -56842,9 +56812,9 @@ pub unsafe fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_ link_name = "llvm.aarch64.crypto.sha1su0" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1su0")] - fn _vsha1su0q_u32(w0_3: int32x4_t, w4_7: int32x4_t, w8_11: int32x4_t) -> int32x4_t; + fn _vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t) -> uint32x4_t; } - _vsha1su0q_u32(w0_3.as_signed(), w4_7.as_signed(), w8_11.as_signed()).as_unsigned() + _vsha1su0q_u32(w0_3, w4_7, w8_11) } #[doc = "SHA1 schedule update accelerator, second part."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1su1q_u32)"] @@ -56869,9 +56839,9 @@ pub unsafe fn vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t link_name = "llvm.aarch64.crypto.sha1su1" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1su1")] - fn _vsha1su1q_u32(tw0_3: int32x4_t, w12_15: int32x4_t) -> int32x4_t; + fn _vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t; } - _vsha1su1q_u32(tw0_3.as_signed(), w12_15.as_signed()).as_unsigned() + _vsha1su1q_u32(tw0_3, w12_15) } #[doc = "SHA1 schedule update accelerator, upper part."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256h2q_u32)"] @@ -56900,9 +56870,13 @@ pub unsafe fn vsha256h2q_u32( link_name = "llvm.aarch64.crypto.sha256h2" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256h2")] - fn _vsha256h2q_u32(hash_abcd: int32x4_t, hash_efgh: int32x4_t, wk: int32x4_t) -> int32x4_t; + fn _vsha256h2q_u32( + hash_abcd: uint32x4_t, + hash_efgh: uint32x4_t, + wk: uint32x4_t, + ) -> uint32x4_t; } - _vsha256h2q_u32(hash_abcd.as_signed(), hash_efgh.as_signed(), wk.as_signed()).as_unsigned() + _vsha256h2q_u32(hash_abcd, hash_efgh, wk) } #[doc = "SHA1 schedule update accelerator, first part."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256hq_u32)"] @@ -56931,9 +56905,13 @@ pub unsafe fn vsha256hq_u32( link_name = "llvm.aarch64.crypto.sha256h" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256h")] - fn _vsha256hq_u32(hash_abcd: int32x4_t, hash_efgh: int32x4_t, wk: int32x4_t) -> int32x4_t; + fn _vsha256hq_u32( + hash_abcd: uint32x4_t, + hash_efgh: uint32x4_t, + wk: uint32x4_t, + ) -> uint32x4_t; } - _vsha256hq_u32(hash_abcd.as_signed(), hash_efgh.as_signed(), wk.as_signed()).as_unsigned() + _vsha256hq_u32(hash_abcd, hash_efgh, wk) } #[doc = "SHA256 schedule update accelerator, first part."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256su0q_u32)"] @@ -56958,9 +56936,9 @@ pub unsafe fn vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t link_name = "llvm.aarch64.crypto.sha256su0" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256su0")] - fn _vsha256su0q_u32(w0_3: int32x4_t, w4_7: int32x4_t) -> int32x4_t; + fn _vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t; } - _vsha256su0q_u32(w0_3.as_signed(), w4_7.as_signed()).as_unsigned() + _vsha256su0q_u32(w0_3, w4_7) } #[doc = "SHA256 schedule update accelerator, second part."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256su1q_u32)"] @@ -56989,9 +56967,10 @@ pub unsafe fn vsha256su1q_u32( link_name = "llvm.aarch64.crypto.sha256su1" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256su1")] - fn _vsha256su1q_u32(tw0_3: int32x4_t, w8_11: int32x4_t, w12_15: int32x4_t) -> int32x4_t; + fn _vsha256su1q_u32(tw0_3: uint32x4_t, w8_11: uint32x4_t, w12_15: uint32x4_t) + -> uint32x4_t; } - _vsha256su1q_u32(tw0_3.as_signed(), w8_11.as_signed(), w12_15.as_signed()).as_unsigned() + _vsha256su1q_u32(tw0_3, w8_11, w12_15) } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftins_v16i8)"] @@ -57780,9 +57759,9 @@ pub unsafe fn vshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ushl.v8i8" )] - fn _vshl_u8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + fn _vshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t; } - _vshl_u8(a.as_signed(), b).as_unsigned() + _vshl_u8(a, b) } #[doc = "Unsigned Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u8)"] @@ -57811,9 +57790,9 @@ pub unsafe fn vshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ushl.v16i8" )] - fn _vshlq_u8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + fn _vshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t; } - _vshlq_u8(a.as_signed(), b).as_unsigned() + _vshlq_u8(a, b) } #[doc = "Unsigned Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u16)"] @@ -57842,9 +57821,9 @@ pub unsafe fn vshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ushl.v4i16" )] - fn _vshl_u16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + fn _vshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t; } - _vshl_u16(a.as_signed(), b).as_unsigned() + _vshl_u16(a, b) } #[doc = "Unsigned Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u16)"] @@ -57873,9 +57852,9 @@ pub unsafe fn vshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ushl.v8i16" )] - fn _vshlq_u16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + fn _vshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t; } - _vshlq_u16(a.as_signed(), b).as_unsigned() + _vshlq_u16(a, b) } #[doc = "Unsigned Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u32)"] @@ -57904,9 +57883,9 @@ pub unsafe fn vshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ushl.v2i32" )] - fn _vshl_u32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + fn _vshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t; } - _vshl_u32(a.as_signed(), b).as_unsigned() + _vshl_u32(a, b) } #[doc = "Unsigned Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u32)"] @@ -57935,9 +57914,9 @@ pub unsafe fn vshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ushl.v4i32" )] - fn _vshlq_u32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + fn _vshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t; } - _vshlq_u32(a.as_signed(), b).as_unsigned() + _vshlq_u32(a, b) } #[doc = "Unsigned Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u64)"] @@ -57966,9 +57945,9 @@ pub unsafe fn vshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ushl.v1i64" )] - fn _vshl_u64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + fn _vshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t; } - _vshl_u64(a.as_signed(), b).as_unsigned() + _vshl_u64(a, b) } #[doc = "Unsigned Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u64)"] @@ -57997,9 +57976,9 @@ pub unsafe fn vshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.ushl.v2i64" )] - fn _vshlq_u64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + fn _vshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t; } - _vshlq_u64(a.as_signed(), b).as_unsigned() + _vshlq_u64(a, b) } #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s16)"] @@ -70210,9 +70189,9 @@ pub unsafe fn vusdot_s32(a: int32x2_t, b: uint8x8_t, c: int8x8_t) -> int32x2_t { link_name = "llvm.aarch64.neon.usdot.v2i32.v8i8" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.usdot.v2i32.v8i8")] - fn _vusdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t; + fn _vusdot_s32(a: int32x2_t, b: uint8x8_t, c: int8x8_t) -> int32x2_t; } - _vusdot_s32(a, b.as_signed(), c) + _vusdot_s32(a, b, c) } #[doc = "Dot product vector form with unsigned and signed integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_s32)"] @@ -70241,9 +70220,9 @@ pub unsafe fn vusdotq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_ link_name = "llvm.aarch64.neon.usdot.v4i32.v16i8" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.usdot.v4i32.v16i8")] - fn _vusdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; + fn _vusdotq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t; } - _vusdotq_s32(a, b.as_signed(), c) + _vusdotq_s32(a, b, c) } #[doc = "Unsigned and signed 8-bit integer matrix multiply-accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusmmlaq_s32)"] @@ -70272,9 +70251,9 @@ pub unsafe fn vusmmlaq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4 link_name = "llvm.aarch64.neon.usmmla.v4i32.v16i8" )] #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.usmmla.v4i32.v16i8")] - fn _vusmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; + fn _vusmmlaq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t; } - _vusmmlaq_s32(a, b.as_signed(), c) + _vusmmlaq_s32(a, b, c) } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_f16)"] diff --git a/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml b/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml index f3924b0f94..c0e1b2e313 100644 --- a/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml +++ b/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml @@ -5,6 +5,9 @@ arch_cfgs: # Generate big endian shuffles auto_big_endian: true +# We do not want to automatically generate signed/unsigned casts +auto_llvm_sign_conversion: false + # Repeatedly used anchors # #[stable(feature = "neon_intrinsics", since = "1.59.0")] neon-stable: &neon-stable @@ -1004,7 +1007,7 @@ intrinsics: links: - link: "llvm.aarch64.neon.facgt.{type[3]}.{type[1]}" arch: aarch64,arm64ec - - '_vcagth_f16(a, b).as_unsigned() as u16' + - '_vcagth_f16(a, b) as u16' - name: "vcage{neon_type[0].no}" doc: "Floating-point absolute compare greater than or equal" @@ -1064,7 +1067,7 @@ intrinsics: links: - link: "llvm.aarch64.neon.facge.{type[3]}.{type[1]}" arch: aarch64,arm64ec - - "_vcageh_f16(a, b).as_unsigned() as u16" + - "_vcageh_f16(a, b) as u16" - name: "vcalt{neon_type[0].no}" doc: "Floating-point absolute compare less than" @@ -1314,7 +1317,7 @@ intrinsics: links: - link: "llvm.aarch64.neon.vcvtfxu2fp.{type[1]}.{type[0]}" arch: aarch64,arm64ec - - FnCall: ["_vcvt{type[2]}_n_{type[1]}_{type[0]}", [a.as_signed(), N]] + - FnCall: ["_vcvt{type[2]}_n_{type[1]}_{type[0]}", [a, N]] - name: "vcvt{type[2]}_n_{type[1]}_{type[0]}" @@ -1406,7 +1409,7 @@ intrinsics: links: - link: "llvm.aarch64.neon.vcvtfxu2fp.{neon_type[1]}.{neon_type[0]}" arch: aarch64,arm64ec - - FnCall: ["_vcvt{neon_type[1].N}_{neon_type[0]}", ["a.as_signed()", N]] + - FnCall: ["_vcvt{neon_type[1].N}_{neon_type[0]}", ["a", N]] - name: "vcvt{type[2]}" doc: "Fixed-point convert to floating-point" @@ -1432,7 +1435,7 @@ intrinsics: links: - link: "llvm.aarch64.neon.vcvtfxu2fp.{type[1]}.{type[0]}" arch: aarch64,arm64ec - - FnCall: ["_vcvt{type[2]}", ["a.as_signed()", N]] + - FnCall: ["_vcvt{type[2]}", ["a", N]] - name: "vcvt{type[2]}" doc: "Fixed-point convert to floating-point" @@ -6023,7 +6026,7 @@ intrinsics: links: - link: "llvm.aarch64.neon.uaddlv.{type[2]}.{neon_type[0]}" arch: aarch64,arm64ec - - FnCall: ['_vaddlv{neon_type[0].no}', ['a.as_signed()']] + - FnCall: ['_vaddlv{neon_type[0].no}', ['a']] - name: "vaddlv{neon_type[0].no}" doc: Unsigned Add Long across Vector @@ -6041,7 +6044,7 @@ intrinsics: links: - link: "llvm.aarch64.neon.uaddlv.{type[2]}.{neon_type[0]}" arch: aarch64,arm64ec - - FnCall: ['_vaddlv{neon_type[0].no}', ['a.as_signed()']] + - FnCall: ['_vaddlv{neon_type[0].no}', ['a']] - name: "vsubw_high{neon_type[1].noq}" doc: Signed Subtract Wide @@ -8704,7 +8707,7 @@ intrinsics: links: - link: "llvm.aarch64.neon.uqshrn.i32" arch: aarch64,arm64ec - - FnCall: ["_vqshrnd_n_u64", ["a.as_signed()", N]] + - FnCall: ["_vqshrnd_n_u64", ["a", N]] - name: "vqshrn{type[0]}" doc: "Unsigned saturating shift right narrow" @@ -9845,9 +9848,9 @@ intrinsics: arch: aarch64,arm64ec - FnCall: - "_vsm3tt{type[0]}" - - - "a.as_signed()" - - "b.as_signed()" - - "c.as_signed()" + - - "a" + - "b" + - "c" - "IMM2 as i64" - name: "vxarq_u64" @@ -9877,8 +9880,8 @@ intrinsics: arch: aarch64,arm64ec - FnCall: - "_vxarq_u64" - - - "a.as_signed()" - - "b.as_signed()" + - - "a" + - "b" - "IMM6 as i64" - name: "vrnd32x{neon_type.no}" @@ -13979,7 +13982,7 @@ intrinsics: links: - link: "llvm.aarch64.neon.uaddlv.i32.{neon_type[0]}" arch: aarch64,arm64ec - - Identifier: ["_vaddlv{neon_type[0].no}(a.as_signed()).as_unsigned() as u16", Symbol] + - Identifier: ["_vaddlv{neon_type[0].no}(a) as u16", Symbol] - name: "vmaxv{neon_type[0].no}" doc: "Horizontal vector max." diff --git a/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml b/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml index 2668efdb24..b125ae9a96 100644 --- a/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml +++ b/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml @@ -1139,7 +1139,7 @@ intrinsics: links: - link: "llvm.arm.neon.vcvtfxu2fp.{neon_type[1]}.{neon_type[0]}" arch: arm - - FnCall: ["_vcvt{neon_type[1].N}_{neon_type[0]}", ["a.as_signed()", N]] + - FnCall: ["_vcvt{neon_type[1].N}_{neon_type[0]}", ["a", N]] - name: "vcvt{neon_type[1].N}_{neon_type[0]}" doc: "Fixed-point convert to floating-point" @@ -1166,7 +1166,7 @@ intrinsics: links: - link: "llvm.aarch64.neon.vcvtfxu2fp.{neon_type[1]}.{neon_type[0]}" arch: aarch64,arm64ec - - FnCall: ["_vcvt{neon_type[1].N}_{neon_type[0]}", ["a.as_signed()", N]] + - FnCall: ["_vcvt{neon_type[1].N}_{neon_type[0]}", ["a", N]] - name: "vcvt{neon_type[1].N}_{neon_type[0]}" doc: "Fixed-point convert to floating-point" @@ -1197,7 +1197,7 @@ intrinsics: arch: arm - link: "llvm.aarch64.neon.vcvtfxu2fp.{neon_type[1]}.{neon_type[0]}" arch: aarch64,arm64ec - - FnCall: ["_vcvt{neon_type[1].N}_{neon_type[0]}", ["a.as_signed()", N]] + - FnCall: ["_vcvt{neon_type[1].N}_{neon_type[0]}", ["a", N]] - name: "vcvt{neon_type[1].N}_{neon_type[0]}" @@ -8486,9 +8486,9 @@ intrinsics: safety: unsafe: [neon] types: - - [uint16x8_t, uint8x8_t, 'N >= 1 && N <= 8', 'const { uint16x8_t([-N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16]) }.as_signed()'] - - [uint32x4_t, uint16x4_t, 'N >= 1 && N <= 16', 'const { uint32x4_t([-N as u32, -N as u32, -N as u32, -N as u32]) }.as_signed()'] - - [uint64x2_t, uint32x2_t, 'N >= 1 && N <= 32', 'const { uint64x2_t([-N as u64, -N as u64]) }.as_signed()'] + - [uint16x8_t, uint8x8_t, 'N >= 1 && N <= 8', 'const { uint16x8_t([-N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16]) }'] + - [uint32x4_t, uint16x4_t, 'N >= 1 && N <= 16', 'const { uint32x4_t([-N as u32, -N as u32, -N as u32, -N as u32]) }'] + - [uint64x2_t, uint32x2_t, 'N >= 1 && N <= 32', 'const { uint64x2_t([-N as u64, -N as u64]) }'] compose: - FnCall: [static_assert!, ["{type[2]}"]] - LLVMLink: @@ -8499,7 +8499,7 @@ intrinsics: links: - link: "llvm.arm.neon.vqshiftnu.{neon_type[1]}" arch: arm - - FnCall: ["_vqshrn_n_{neon_type[0]}", ["a.as_signed()", "{type[3]}"]] + - FnCall: ["_vqshrn_n_{neon_type[0]}", ["a", "{type[3]}"]] - name: "vqshrn_n_{neon_type[0]}" doc: "Unsigned saturating shift right narrow" @@ -8527,7 +8527,7 @@ intrinsics: links: - link: "llvm.aarch64.neon.uqshrn.{neon_type[1]}" arch: aarch64,arm64ec - - FnCall: ["_vqshrn_n_{neon_type[0]}", ["a.as_signed()", N]] + - FnCall: ["_vqshrn_n_{neon_type[0]}", ["a", N]] - name: "vqshrun_n_{neon_type[0]}" doc: "Signed saturating shift right unsigned narrow" @@ -10987,9 +10987,9 @@ intrinsics: safety: unsafe: [neon] types: - - [uint16x8_t, uint8x8_t, '8', 'const { uint16x8_t([-N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16]) }.as_signed()'] - - [uint32x4_t, uint16x4_t, '16', 'const { uint32x4_t([-N as u32, -N as u32, -N as u32, -N as u32]) }.as_signed()'] - - [uint64x2_t, uint32x2_t, '32', 'const { uint64x2_t([-N as u64, -N as u64]) }.as_signed()'] + - [uint16x8_t, uint8x8_t, '8', 'const { uint16x8_t([-N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16]) }'] + - [uint32x4_t, uint16x4_t, '16', 'const { uint32x4_t([-N as u32, -N as u32, -N as u32, -N as u32]) }'] + - [uint64x2_t, uint32x2_t, '32', 'const { uint64x2_t([-N as u64, -N as u64]) }'] compose: - FnCall: [static_assert!, ['N >= 1 && N <= {type[2]}']] - LLVMLink: @@ -11000,7 +11000,7 @@ intrinsics: links: - link: "llvm.arm.neon.vqrshiftnu.{neon_type[1]}" arch: arm - - FnCall: ["_vqrshrn_n{neon_type[0].noq}", ["a.as_signed()", "{type[3]}"]] + - FnCall: ["_vqrshrn_n{neon_type[0].noq}", ["a", "{type[3]}"]] - name: "vqrshrn_n_{neon_type[0]}" doc: "Unsigned signed saturating rounded shift right narrow" @@ -11028,7 +11028,7 @@ intrinsics: links: - link: "llvm.aarch64.neon.uqrshrn.{neon_type[1]}" arch: aarch64,arm64ec - - FnCall: ["_vqrshrn_n_{neon_type[0]}", ["a.as_signed()", N]] + - FnCall: ["_vqrshrn_n_{neon_type[0]}", ["a", N]] - name: "vcvt{neon_type[1].no}_{neon_type[0]}" doc: "Floating-point convert to unsigned fixed-point, rounding toward zero" @@ -13167,7 +13167,7 @@ intrinsics: arch: aarch64,arm64ec - link: "llvm.arm.crc32b" arch: arm - - FnCall: ["___crc32b", ["crc.as_signed()", "data.as_signed() as i32"]] + - FnCall: ["___crc32b", ["crc", "data as u32"]] - name: "__crc32h" doc: "CRC32 single round checksum for bytes (16 bits)." @@ -13194,7 +13194,7 @@ intrinsics: arch: aarch64,arm64ec - link: "llvm.arm.crc32h" arch: arm - - FnCall: ["___crc32h", ["crc.as_signed()", "data.as_signed() as i32"]] + - FnCall: ["___crc32h", ["crc", "data as u32"]] - name: "__crc32w" doc: "CRC32 single round checksum for bytes (32 bits)." @@ -13244,7 +13244,7 @@ intrinsics: arch: aarch64,arm64ec - link: "llvm.arm.crc32cb" arch: arm - - FnCall: ["___crc32cb", ["crc.as_signed()", "data.as_signed() as i32"]] + - FnCall: ["___crc32cb", ["crc", "data as u32"]] - name: "__crc32ch" doc: "CRC32-C single round checksum for bytes (16 bits)." @@ -13271,7 +13271,7 @@ intrinsics: arch: aarch64,arm64ec - link: "llvm.arm.crc32ch" arch: arm - - FnCall: ["___crc32ch", ["crc.as_signed()", "data.as_signed() as i32"]] + - FnCall: ["___crc32ch", ["crc", "data as u32"]] - name: "__crc32cw" doc: "CRC32-C single round checksum for bytes (32 bits)." @@ -13313,10 +13313,9 @@ intrinsics: # As the call to `__crc32` does not get inlined, we define an LLVM binding # here, which is the same as above, and call it directly which results # in the correct instructions being generated - - Let: [a, i32, 'crc as i32'] - - Let: [b, i32, '(data & 0xFFFFFFFF).as_signed() as i32'] - - Let: [c, i32, '(data >> 32).as_signed() as i32'] - - 'unsafe extern "unadjusted" {{ #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32w")] fn ___crc32w(crc: i32, data: i32) -> i32;}} ___crc32w(___crc32w(a, b), c).as_unsigned()' + - Let: [b, u32, '(data & 0xFFFFFFFF) as u32'] + - Let: [c, u32, '(data >> 32) as u32'] + - 'unsafe extern "unadjusted" {{ #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32w")] fn ___crc32w(crc: u32, data: u32) -> u32;}} ___crc32w(___crc32w(crc, b), c)' - name: "__crc32cd" doc: "CRC32-C single round checksum for quad words (64 bits)." @@ -13332,10 +13331,9 @@ intrinsics: types: - [u32, u64] compose: - - Let: [a, i32, 'crc as i32'] - - Let: [b, i32, '(data & 0xFFFFFFFF).as_signed() as i32'] - - Let: [c, i32, '(data >> 32).as_signed() as i32'] - - 'unsafe extern "unadjusted" {{ #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32cw")] fn ___crc32cw(crc: i32, data: i32) -> i32;}} ___crc32cw(___crc32cw(a, b), c).as_unsigned() as u32' + - Let: [b, u32, '(data & 0xFFFFFFFF) as u32'] + - Let: [c, u32, '(data >> 32) as u32'] + - 'unsafe extern "unadjusted" {{ #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.crc32cw")] fn ___crc32cw(crc: u32, data: u32) -> u32;}} ___crc32cw(___crc32cw(crc, b), c)' - name: "vabs{neon_type.no}" doc: "Absolute value (wrapping)." diff --git a/crates/stdarch-gen-arm/src/context.rs b/crates/stdarch-gen-arm/src/context.rs index 44b5208f39..751fd9f2a3 100644 --- a/crates/stdarch-gen-arm/src/context.rs +++ b/crates/stdarch-gen-arm/src/context.rs @@ -39,6 +39,10 @@ pub struct GlobalContext { /// Should the yaml file automagically generate big endian shuffling #[serde(default)] pub auto_big_endian: Option, + + /// Should all LLVM wrappers convert their arguments to a signed type + #[serde(default)] + pub auto_llvm_sign_conversion: bool, } /// Context of an intrinsic group diff --git a/crates/stdarch-gen-arm/src/expression.rs b/crates/stdarch-gen-arm/src/expression.rs index b796bf675c..55957a6d6b 100644 --- a/crates/stdarch-gen-arm/src/expression.rs +++ b/crates/stdarch-gen-arm/src/expression.rs @@ -203,7 +203,7 @@ impl Expression { *self = intrinsic .llvm_link() .expect("got LLVMLink wildcard without a LLVM link in `compose`") - .apply_conversions_to_call(fn_call.clone(), ctx.local)? + .apply_conversions_to_call(fn_call.clone(), ctx)? } } diff --git a/crates/stdarch-gen-arm/src/intrinsic.rs b/crates/stdarch-gen-arm/src/intrinsic.rs index ad833f52ad..fc8f4343b5 100644 --- a/crates/stdarch-gen-arm/src/intrinsic.rs +++ b/crates/stdarch-gen-arm/src/intrinsic.rs @@ -548,7 +548,9 @@ impl LLVMLink { Ok(()) } - /// Alters all the unsigned types from the signature, as unsupported by LLVM. + /// Alters all the unsigned types from the signature. This is required where + /// a signed and unsigned variant require the same binding to an exposed + /// LLVM instrinsic. pub fn sanitise_uints(&mut self) { let transform = |tk: &mut TypeKind| { if let Some(BaseType::Sized(BaseTypeKind::UInt, size)) = tk.base_type() { @@ -603,7 +605,7 @@ impl LLVMLink { pub fn apply_conversions_to_call( &self, mut fn_call: FnCall, - ctx: &LocalContext, + ctx: &Context, ) -> context::Result { use BaseType::{Sized, Unsized}; use BaseTypeKind::{Bool, UInt}; @@ -618,6 +620,7 @@ impl LLVMLink { .map(|arg| -> context::Result { if let Expression::Identifier(ref var_name, IdentifierType::Variable) = arg { let (kind, scope) = ctx + .local .variables .get(&var_name.to_string()) .ok_or_else(|| format!("invalid variable {var_name:?} being referenced"))?; @@ -627,7 +630,11 @@ impl LLVMLink { Ok(convert("into", arg)) } (Argument, Some(Sized(UInt, _) | Unsized(UInt))) => { - Ok(convert("as_signed", arg)) + if ctx.global.auto_llvm_sign_conversion { + Ok(convert("as_signed", arg)) + } else { + Ok(arg) + } } _ => Ok(arg), } @@ -637,22 +644,25 @@ impl LLVMLink { }) .try_collect()?; - let return_type_requires_conversion = self - .signature - .as_ref() - .and_then(|sig| sig.return_type.as_ref()) - .and_then(|ty| { - if let Some(Sized(Bool, bitsize)) = ty.base_type() { - (*bitsize != 8).then_some(Bool) - } else if let Some(Sized(UInt, _) | Unsized(UInt)) = ty.base_type() { - Some(UInt) - } else { - None - } - }); + let return_type_conversion = if !ctx.global.auto_llvm_sign_conversion { + None + } else { + self.signature + .as_ref() + .and_then(|sig| sig.return_type.as_ref()) + .and_then(|ty| { + if let Some(Sized(Bool, bitsize)) = ty.base_type() { + (*bitsize != 8).then_some(Bool) + } else if let Some(Sized(UInt, _) | Unsized(UInt)) = ty.base_type() { + Some(UInt) + } else { + None + } + }) + }; let fn_call = Expression::FnCall(fn_call); - match return_type_requires_conversion { + match return_type_conversion { Some(Bool) => Ok(convert("into", fn_call)), Some(UInt) => Ok(convert("as_unsigned", fn_call)), _ => Ok(fn_call), @@ -1509,8 +1519,10 @@ impl Intrinsic { } if let Some(llvm_link) = self.llvm_link_mut() { - // Turn all Rust unsigned types into signed - llvm_link.sanitise_uints(); + /* Turn all Rust unsigned types into signed if required */ + if ctx.global.auto_llvm_sign_conversion { + llvm_link.sanitise_uints(); + } } if let Some(predicate_form) = ctx.local.predicate_form() {