Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Generate constant Neon vectors within a const block #1503

Merged
merged 2 commits into from
Aug 21, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
74 changes: 37 additions & 37 deletions crates/core_arch/src/arm_shared/neon/generated.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21990,7 +21990,7 @@ pub unsafe fn vqrshrn_n_s16<const N: i32>(a: int16x8_t) -> int8x8_t {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v8i8")]
fn vqrshrn_n_s16_(a: int16x8_t, n: int16x8_t) -> int8x8_t;
}
vqrshrn_n_s16_(a, int16x8_t([-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16]))
vqrshrn_n_s16_(a, const { int16x8_t([-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16]) })
}

/// Signed saturating rounded shift right narrow
Expand Down Expand Up @@ -22028,7 +22028,7 @@ pub unsafe fn vqrshrn_n_s32<const N: i32>(a: int32x4_t) -> int16x4_t {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v4i16")]
fn vqrshrn_n_s32_(a: int32x4_t, n: int32x4_t) -> int16x4_t;
}
vqrshrn_n_s32_(a, int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]))
vqrshrn_n_s32_(a, const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) })
}

/// Signed saturating rounded shift right narrow
Expand Down Expand Up @@ -22066,7 +22066,7 @@ pub unsafe fn vqrshrn_n_s64<const N: i32>(a: int64x2_t) -> int32x2_t {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v2i32")]
fn vqrshrn_n_s64_(a: int64x2_t, n: int64x2_t) -> int32x2_t;
}
vqrshrn_n_s64_(a, int64x2_t([-N as i64, -N as i64]))
vqrshrn_n_s64_(a, const { int64x2_t([-N as i64, -N as i64]) })
}

/// Signed saturating rounded shift right narrow
Expand Down Expand Up @@ -22104,7 +22104,7 @@ pub unsafe fn vqrshrn_n_u16<const N: i32>(a: uint16x8_t) -> uint8x8_t {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v8i8")]
fn vqrshrn_n_u16_(a: uint16x8_t, n: uint16x8_t) -> uint8x8_t;
}
vqrshrn_n_u16_(a, uint16x8_t([-N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16]))
vqrshrn_n_u16_(a, const { uint16x8_t([-N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16]) })
}

/// Unsigned signed saturating rounded shift right narrow
Expand Down Expand Up @@ -22142,7 +22142,7 @@ pub unsafe fn vqrshrn_n_u32<const N: i32>(a: uint32x4_t) -> uint16x4_t {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v4i16")]
fn vqrshrn_n_u32_(a: uint32x4_t, n: uint32x4_t) -> uint16x4_t;
}
vqrshrn_n_u32_(a, uint32x4_t([-N as u32, -N as u32, -N as u32, -N as u32]))
vqrshrn_n_u32_(a, const { uint32x4_t([-N as u32, -N as u32, -N as u32, -N as u32]) })
}

/// Unsigned signed saturating rounded shift right narrow
Expand Down Expand Up @@ -22180,7 +22180,7 @@ pub unsafe fn vqrshrn_n_u64<const N: i32>(a: uint64x2_t) -> uint32x2_t {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v2i32")]
fn vqrshrn_n_u64_(a: uint64x2_t, n: uint64x2_t) -> uint32x2_t;
}
vqrshrn_n_u64_(a, uint64x2_t([-N as u64, -N as u64]))
vqrshrn_n_u64_(a, const { uint64x2_t([-N as u64, -N as u64]) })
}

/// Unsigned signed saturating rounded shift right narrow
Expand Down Expand Up @@ -22218,7 +22218,7 @@ pub unsafe fn vqrshrun_n_s16<const N: i32>(a: int16x8_t) -> uint8x8_t {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v8i8")]
fn vqrshrun_n_s16_(a: int16x8_t, n: int16x8_t) -> uint8x8_t;
}
vqrshrun_n_s16_(a, int16x8_t([-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16]))
vqrshrun_n_s16_(a, const { int16x8_t([-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16]) })
}

/// Signed saturating rounded shift right unsigned narrow
Expand Down Expand Up @@ -22256,7 +22256,7 @@ pub unsafe fn vqrshrun_n_s32<const N: i32>(a: int32x4_t) -> uint16x4_t {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v4i16")]
fn vqrshrun_n_s32_(a: int32x4_t, n: int32x4_t) -> uint16x4_t;
}
vqrshrun_n_s32_(a, int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]))
vqrshrun_n_s32_(a, const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) })
}

/// Signed saturating rounded shift right unsigned narrow
Expand Down Expand Up @@ -22294,7 +22294,7 @@ pub unsafe fn vqrshrun_n_s64<const N: i32>(a: int64x2_t) -> uint32x2_t {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v2i32")]
fn vqrshrun_n_s64_(a: int64x2_t, n: int64x2_t) -> uint32x2_t;
}
vqrshrun_n_s64_(a, int64x2_t([-N as i64, -N as i64]))
vqrshrun_n_s64_(a, const { int64x2_t([-N as i64, -N as i64]) })
}

/// Signed saturating rounded shift right unsigned narrow
Expand Down Expand Up @@ -22908,7 +22908,7 @@ pub unsafe fn vqshlu_n_s8<const N: i32>(a: int8x8_t) -> uint8x8_t {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i8")]
fn vqshlu_n_s8_(a: int8x8_t, n: int8x8_t) -> uint8x8_t;
}
vqshlu_n_s8_(a, int8x8_t([N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8]))
vqshlu_n_s8_(a, const { int8x8_t([N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8]) })
}

/// Signed saturating shift left unsigned
Expand All @@ -22927,7 +22927,7 @@ pub unsafe fn vqshlu_n_s8<const N: i32>(a: int8x8_t) -> uint8x8_t {
#[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v8i8")]
fn vqshlu_n_s8_(a: int8x8_t, n: int8x8_t) -> uint8x8_t;
}
vqshlu_n_s8_(a, int8x8_t([N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8]))
vqshlu_n_s8_(a, const { int8x8_t([N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8]) })
}

/// Signed saturating shift left unsigned
Expand All @@ -22946,7 +22946,7 @@ pub unsafe fn vqshlu_n_s16<const N: i32>(a: int16x4_t) -> uint16x4_t {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i16")]
fn vqshlu_n_s16_(a: int16x4_t, n: int16x4_t) -> uint16x4_t;
}
vqshlu_n_s16_(a, int16x4_t([N as i16, N as i16, N as i16, N as i16]))
vqshlu_n_s16_(a, const { int16x4_t([N as i16, N as i16, N as i16, N as i16]) })
}

/// Signed saturating shift left unsigned
Expand All @@ -22965,7 +22965,7 @@ pub unsafe fn vqshlu_n_s16<const N: i32>(a: int16x4_t) -> uint16x4_t {
#[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v4i16")]
fn vqshlu_n_s16_(a: int16x4_t, n: int16x4_t) -> uint16x4_t;
}
vqshlu_n_s16_(a, int16x4_t([N as i16, N as i16, N as i16, N as i16]))
vqshlu_n_s16_(a, const { int16x4_t([N as i16, N as i16, N as i16, N as i16]) })
}

/// Signed saturating shift left unsigned
Expand All @@ -22984,7 +22984,7 @@ pub unsafe fn vqshlu_n_s32<const N: i32>(a: int32x2_t) -> uint32x2_t {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i32")]
fn vqshlu_n_s32_(a: int32x2_t, n: int32x2_t) -> uint32x2_t;
}
vqshlu_n_s32_(a, int32x2_t([N as i32, N as i32]))
vqshlu_n_s32_(a, const { int32x2_t([N as i32, N as i32]) })
}

/// Signed saturating shift left unsigned
Expand All @@ -23003,7 +23003,7 @@ pub unsafe fn vqshlu_n_s32<const N: i32>(a: int32x2_t) -> uint32x2_t {
#[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v2i32")]
fn vqshlu_n_s32_(a: int32x2_t, n: int32x2_t) -> uint32x2_t;
}
vqshlu_n_s32_(a, int32x2_t([N as i32, N as i32]))
vqshlu_n_s32_(a, const { int32x2_t([N as i32, N as i32]) })
}

/// Signed saturating shift left unsigned
Expand All @@ -23022,7 +23022,7 @@ pub unsafe fn vqshlu_n_s64<const N: i32>(a: int64x1_t) -> uint64x1_t {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v1i64")]
fn vqshlu_n_s64_(a: int64x1_t, n: int64x1_t) -> uint64x1_t;
}
vqshlu_n_s64_(a, int64x1_t([N as i64]))
vqshlu_n_s64_(a, const { int64x1_t([N as i64]) })
}

/// Signed saturating shift left unsigned
Expand All @@ -23041,7 +23041,7 @@ pub unsafe fn vqshlu_n_s64<const N: i32>(a: int64x1_t) -> uint64x1_t {
#[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v1i64")]
fn vqshlu_n_s64_(a: int64x1_t, n: int64x1_t) -> uint64x1_t;
}
vqshlu_n_s64_(a, int64x1_t([N as i64]))
vqshlu_n_s64_(a, const { int64x1_t([N as i64]) })
}

/// Signed saturating shift left unsigned
Expand All @@ -23060,7 +23060,7 @@ pub unsafe fn vqshluq_n_s8<const N: i32>(a: int8x16_t) -> uint8x16_t {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v16i8")]
fn vqshluq_n_s8_(a: int8x16_t, n: int8x16_t) -> uint8x16_t;
}
vqshluq_n_s8_(a, int8x16_t([N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8]))
vqshluq_n_s8_(a, const { int8x16_t([N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8]) })
}

/// Signed saturating shift left unsigned
Expand All @@ -23079,7 +23079,7 @@ pub unsafe fn vqshluq_n_s8<const N: i32>(a: int8x16_t) -> uint8x16_t {
#[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v16i8")]
fn vqshluq_n_s8_(a: int8x16_t, n: int8x16_t) -> uint8x16_t;
}
vqshluq_n_s8_(a, int8x16_t([N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8]))
vqshluq_n_s8_(a, const { int8x16_t([N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8]) })
}

/// Signed saturating shift left unsigned
Expand All @@ -23098,7 +23098,7 @@ pub unsafe fn vqshluq_n_s16<const N: i32>(a: int16x8_t) -> uint16x8_t {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i16")]
fn vqshluq_n_s16_(a: int16x8_t, n: int16x8_t) -> uint16x8_t;
}
vqshluq_n_s16_(a, int16x8_t([N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16]))
vqshluq_n_s16_(a, const { int16x8_t([N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16]) })
}

/// Signed saturating shift left unsigned
Expand All @@ -23117,7 +23117,7 @@ pub unsafe fn vqshluq_n_s16<const N: i32>(a: int16x8_t) -> uint16x8_t {
#[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v8i16")]
fn vqshluq_n_s16_(a: int16x8_t, n: int16x8_t) -> uint16x8_t;
}
vqshluq_n_s16_(a, int16x8_t([N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16]))
vqshluq_n_s16_(a, const { int16x8_t([N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16]) })
}

/// Signed saturating shift left unsigned
Expand All @@ -23136,7 +23136,7 @@ pub unsafe fn vqshluq_n_s32<const N: i32>(a: int32x4_t) -> uint32x4_t {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i32")]
fn vqshluq_n_s32_(a: int32x4_t, n: int32x4_t) -> uint32x4_t;
}
vqshluq_n_s32_(a, int32x4_t([N as i32, N as i32, N as i32, N as i32]))
vqshluq_n_s32_(a, const { int32x4_t([N as i32, N as i32, N as i32, N as i32]) })
}

/// Signed saturating shift left unsigned
Expand All @@ -23155,7 +23155,7 @@ pub unsafe fn vqshluq_n_s32<const N: i32>(a: int32x4_t) -> uint32x4_t {
#[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v4i32")]
fn vqshluq_n_s32_(a: int32x4_t, n: int32x4_t) -> uint32x4_t;
}
vqshluq_n_s32_(a, int32x4_t([N as i32, N as i32, N as i32, N as i32]))
vqshluq_n_s32_(a, const { int32x4_t([N as i32, N as i32, N as i32, N as i32]) })
}

/// Signed saturating shift left unsigned
Expand All @@ -23174,7 +23174,7 @@ pub unsafe fn vqshluq_n_s64<const N: i32>(a: int64x2_t) -> uint64x2_t {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i64")]
fn vqshluq_n_s64_(a: int64x2_t, n: int64x2_t) -> uint64x2_t;
}
vqshluq_n_s64_(a, int64x2_t([N as i64, N as i64]))
vqshluq_n_s64_(a, const { int64x2_t([N as i64, N as i64]) })
}

/// Signed saturating shift left unsigned
Expand All @@ -23193,7 +23193,7 @@ pub unsafe fn vqshluq_n_s64<const N: i32>(a: int64x2_t) -> uint64x2_t {
#[cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), link_name = "llvm.aarch64.neon.sqshlu.v2i64")]
fn vqshluq_n_s64_(a: int64x2_t, n: int64x2_t) -> uint64x2_t;
}
vqshluq_n_s64_(a, int64x2_t([N as i64, N as i64]))
vqshluq_n_s64_(a, const { int64x2_t([N as i64, N as i64]) })
}

/// Signed saturating shift right narrow
Expand All @@ -23212,7 +23212,7 @@ pub unsafe fn vqshrn_n_s16<const N: i32>(a: int16x8_t) -> int8x8_t {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v8i8")]
fn vqshrn_n_s16_(a: int16x8_t, n: int16x8_t) -> int8x8_t;
}
vqshrn_n_s16_(a, int16x8_t([-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16]))
vqshrn_n_s16_(a, const { int16x8_t([-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16]) })
}

/// Signed saturating shift right narrow
Expand Down Expand Up @@ -23250,7 +23250,7 @@ pub unsafe fn vqshrn_n_s32<const N: i32>(a: int32x4_t) -> int16x4_t {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v4i16")]
fn vqshrn_n_s32_(a: int32x4_t, n: int32x4_t) -> int16x4_t;
}
vqshrn_n_s32_(a, int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]))
vqshrn_n_s32_(a, const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) })
}

/// Signed saturating shift right narrow
Expand Down Expand Up @@ -23288,7 +23288,7 @@ pub unsafe fn vqshrn_n_s64<const N: i32>(a: int64x2_t) -> int32x2_t {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v2i32")]
fn vqshrn_n_s64_(a: int64x2_t, n: int64x2_t) -> int32x2_t;
}
vqshrn_n_s64_(a, int64x2_t([-N as i64, -N as i64]))
vqshrn_n_s64_(a, const { int64x2_t([-N as i64, -N as i64]) })
}

/// Signed saturating shift right narrow
Expand Down Expand Up @@ -23326,7 +23326,7 @@ pub unsafe fn vqshrn_n_u16<const N: i32>(a: uint16x8_t) -> uint8x8_t {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v8i8")]
fn vqshrn_n_u16_(a: uint16x8_t, n: uint16x8_t) -> uint8x8_t;
}
vqshrn_n_u16_(a, uint16x8_t([-N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16]))
vqshrn_n_u16_(a, const { uint16x8_t([-N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16]) })
}

/// Unsigned saturating shift right narrow
Expand Down Expand Up @@ -23364,7 +23364,7 @@ pub unsafe fn vqshrn_n_u32<const N: i32>(a: uint32x4_t) -> uint16x4_t {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v4i16")]
fn vqshrn_n_u32_(a: uint32x4_t, n: uint32x4_t) -> uint16x4_t;
}
vqshrn_n_u32_(a, uint32x4_t([-N as u32, -N as u32, -N as u32, -N as u32]))
vqshrn_n_u32_(a, const { uint32x4_t([-N as u32, -N as u32, -N as u32, -N as u32]) })
}

/// Unsigned saturating shift right narrow
Expand Down Expand Up @@ -23402,7 +23402,7 @@ pub unsafe fn vqshrn_n_u64<const N: i32>(a: uint64x2_t) -> uint32x2_t {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v2i32")]
fn vqshrn_n_u64_(a: uint64x2_t, n: uint64x2_t) -> uint32x2_t;
}
vqshrn_n_u64_(a, uint64x2_t([-N as u64, -N as u64]))
vqshrn_n_u64_(a, const { uint64x2_t([-N as u64, -N as u64]) })
}

/// Unsigned saturating shift right narrow
Expand Down Expand Up @@ -23440,7 +23440,7 @@ pub unsafe fn vqshrun_n_s16<const N: i32>(a: int16x8_t) -> uint8x8_t {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v8i8")]
fn vqshrun_n_s16_(a: int16x8_t, n: int16x8_t) -> uint8x8_t;
}
vqshrun_n_s16_(a, int16x8_t([-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16]))
vqshrun_n_s16_(a, const { int16x8_t([-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16]) })
}

/// Signed saturating shift right unsigned narrow
Expand Down Expand Up @@ -23478,7 +23478,7 @@ pub unsafe fn vqshrun_n_s32<const N: i32>(a: int32x4_t) -> uint16x4_t {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v4i16")]
fn vqshrun_n_s32_(a: int32x4_t, n: int32x4_t) -> uint16x4_t;
}
vqshrun_n_s32_(a, int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]))
vqshrun_n_s32_(a, const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) })
}

/// Signed saturating shift right unsigned narrow
Expand Down Expand Up @@ -23516,7 +23516,7 @@ pub unsafe fn vqshrun_n_s64<const N: i32>(a: int64x2_t) -> uint32x2_t {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v2i32")]
fn vqshrun_n_s64_(a: int64x2_t, n: int64x2_t) -> uint32x2_t;
}
vqshrun_n_s64_(a, int64x2_t([-N as i64, -N as i64]))
vqshrun_n_s64_(a, const { int64x2_t([-N as i64, -N as i64]) })
}

/// Signed saturating shift right unsigned narrow
Expand Down Expand Up @@ -28234,7 +28234,7 @@ pub unsafe fn vrshrn_n_s16<const N: i32>(a: int16x8_t) -> int8x8_t {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v8i8")]
fn vrshrn_n_s16_(a: int16x8_t, n: int16x8_t) -> int8x8_t;
}
vrshrn_n_s16_(a, int16x8_t([-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16]))
vrshrn_n_s16_(a, const { int16x8_t([-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16]) })
}

/// Rounding shift right narrow
Expand Down Expand Up @@ -28272,7 +28272,7 @@ pub unsafe fn vrshrn_n_s32<const N: i32>(a: int32x4_t) -> int16x4_t {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v4i16")]
fn vrshrn_n_s32_(a: int32x4_t, n: int32x4_t) -> int16x4_t;
}
vrshrn_n_s32_(a, int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]))
vrshrn_n_s32_(a, const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) })
}

/// Rounding shift right narrow
Expand Down Expand Up @@ -28310,7 +28310,7 @@ pub unsafe fn vrshrn_n_s64<const N: i32>(a: int64x2_t) -> int32x2_t {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v2i32")]
fn vrshrn_n_s64_(a: int64x2_t, n: int64x2_t) -> int32x2_t;
}
vrshrn_n_s64_(a, int64x2_t([-N as i64, -N as i64]))
vrshrn_n_s64_(a, const { int64x2_t([-N as i64, -N as i64]) })
}

/// Rounding shift right narrow
Expand Down
17 changes: 1 addition & 16 deletions crates/intrinsic-test/missing_aarch64.txt
Original file line number Diff line number Diff line change
Expand Up @@ -23,19 +23,4 @@ vrnd64zq_f64
vrnd32x_f64
vrnd32z_f64
vrnd64x_f64
vrnd64z_f64

# LLVM select error in debug builds
#vqshlu_n_s16
#vqshlu_n_s32
#vqshlu_n_s64
#vqshlu_n_s8
#vqshlub_n_s8
#vqshlud_n_s64
#vqshluh_n_s16
#vqshluq_n_s16
#vqshluq_n_s32
#vqshluq_n_s64
#vqshluq_n_s8
#vqshlus_n_s32

vrnd64z_f64
30 changes: 1 addition & 29 deletions crates/intrinsic-test/missing_arm.txt
Original file line number Diff line number Diff line change
Expand Up @@ -182,32 +182,4 @@ vrndpq_f32
vrndq_f32
vrndq_f32
vrndx_f32
vrndxq_f32

# LLVM select error in debug builds
#vqrshrn_n_s16
#vqrshrn_n_s32
#vqrshrn_n_s64
#vqrshrn_n_u16
#vqrshrn_n_u32
#vqrshrn_n_u64
#vqrshrun_n_s16
#vqrshrun_n_s32
#vqrshrun_n_s64
#vqshrn_n_s16
#vqshrn_n_s32
#vqshrn_n_s64
#vqshrn_n_u16
#vqshrn_n_u32
#vqshrn_n_u64
#vqshrun_n_s16
#vqshrun_n_s32
#vqshrun_n_s64
#vrshrn_n_s16
#vrshrn_n_s32
#vrshrn_n_s64
#vrshrn_n_u16
#vrshrn_n_u32
#vrshrn_n_u64
#vshrq_n_u64
#vshr_n_u64
vrndxq_f32
Loading