pub struct I32x8(/* private fields */);Expand description
[i32; 8] as a vector.
Implementations§
Source§impl I32x8
impl I32x8
Sourcepub const fn from_array(arr: [i32; 8]) -> Self
pub const fn from_array(arr: [i32; 8]) -> Self
Create a vector from an array.
Unlike the From trait function, the from_array function is const.
§Example
const MY_EXTREMELY_FUN_VALUE: I32x8 = I32x8::from_array([0, 1, 2, 3, 4, 5, 6, 7]);
for (i, value) in MY_EXTREMELY_FUN_VALUE.as_array().iter().copied().enumerate() {
assert_eq!(i as i32, value);
}Trait Implementations§
Source§impl Add for I32x8
impl Add for I32x8
Source§fn add(self, rhs: I32x8) -> I32x8
fn add(self, rhs: I32x8) -> I32x8
Perform a pairwise wrapping_add
§Scalar Equivalent
ⓘ
I32x8::from([
self.as_array()[0].wrapping_add(rhs.as_array()[0]),
self.as_array()[1].wrapping_add(rhs.as_array()[1]),
self.as_array()[2].wrapping_add(rhs.as_array()[2]),
self.as_array()[3].wrapping_add(rhs.as_array()[3]),
self.as_array()[4].wrapping_add(rhs.as_array()[4]),
self.as_array()[5].wrapping_add(rhs.as_array()[5]),
self.as_array()[6].wrapping_add(rhs.as_array()[6]),
self.as_array()[7].wrapping_add(rhs.as_array()[7]),
])§AVX2 Intrinsics Used
_mm256_add_epi32VPADDD ymm, ymm, ymm
§Neon Intrinsics Used
vaddq_s32- This intrinsic compiles to the following instructions:
Source§impl AddAssign for I32x8
impl AddAssign for I32x8
Source§fn add_assign(&mut self, other: I32x8)
fn add_assign(&mut self, other: I32x8)
Performs the
+= operation. Read moreSource§impl BitAnd for I32x8
impl BitAnd for I32x8
Source§fn bitand(self, rhs: I32x8) -> I32x8
fn bitand(self, rhs: I32x8) -> I32x8
Perform a pairwise bitwise and
§Scalar Equivalent
ⓘ
I32x8::from([
self.as_array()[0] & rhs.as_array()[0],
self.as_array()[1] & rhs.as_array()[1],
self.as_array()[2] & rhs.as_array()[2],
self.as_array()[3] & rhs.as_array()[3],
self.as_array()[4] & rhs.as_array()[4],
self.as_array()[5] & rhs.as_array()[5],
self.as_array()[6] & rhs.as_array()[6],
self.as_array()[7] & rhs.as_array()[7],
])§AVX2 Intrinsics Used
_mm256_and_si256VPAND ymm, ymm, ymm
§Neon Intrinsics Used
vandq_s32- This intrinsic compiles to the following instructions:
Source§impl BitAndAssign for I32x8
impl BitAndAssign for I32x8
Source§fn bitand_assign(&mut self, other: I32x8)
fn bitand_assign(&mut self, other: I32x8)
Performs the
&= operation. Read moreSource§impl BitOr for I32x8
impl BitOr for I32x8
Source§fn bitor(self, rhs: I32x8) -> I32x8
fn bitor(self, rhs: I32x8) -> I32x8
Perform a pairwise bitwise or
§Scalar Equivalent
ⓘ
I32x8::from([
self.as_array()[0] | rhs.as_array()[0],
self.as_array()[1] | rhs.as_array()[1],
self.as_array()[2] | rhs.as_array()[2],
self.as_array()[3] | rhs.as_array()[3],
self.as_array()[4] | rhs.as_array()[4],
self.as_array()[5] | rhs.as_array()[5],
self.as_array()[6] | rhs.as_array()[6],
self.as_array()[7] | rhs.as_array()[7],
])§AVX2 Intrinsics Used
_mm256_or_si256VPOR ymm, ymm, ymm
§Neon Intrinsics Used
vorrq_s32- This intrinsic compiles to the following instructions:
Source§impl BitOrAssign for I32x8
impl BitOrAssign for I32x8
Source§fn bitor_assign(&mut self, other: I32x8)
fn bitor_assign(&mut self, other: I32x8)
Performs the
|= operation. Read moreSource§impl BitXor for I32x8
impl BitXor for I32x8
Source§fn bitxor(self, rhs: I32x8) -> I32x8
fn bitxor(self, rhs: I32x8) -> I32x8
Perform a pairwise bitwise xor
§Scalar Equivalent
ⓘ
I32x8::from([
self.as_array()[0] ^ rhs.as_array()[0],
self.as_array()[1] ^ rhs.as_array()[1],
self.as_array()[2] ^ rhs.as_array()[2],
self.as_array()[3] ^ rhs.as_array()[3],
self.as_array()[4] ^ rhs.as_array()[4],
self.as_array()[5] ^ rhs.as_array()[5],
self.as_array()[6] ^ rhs.as_array()[6],
self.as_array()[7] ^ rhs.as_array()[7],
])§AVX2 Intrinsics Used
_mm256_xor_si256VPXOR ymm, ymm, ymm
§Neon Intrinsics Used
veorq_s32- This intrinsic compiles to the following instructions:
Source§impl BitXorAssign for I32x8
impl BitXorAssign for I32x8
Source§fn bitxor_assign(&mut self, other: I32x8)
fn bitxor_assign(&mut self, other: I32x8)
Performs the
^= operation. Read moreSource§impl ConditionallySelectable for I32x8
impl ConditionallySelectable for I32x8
Source§fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
Source§fn conditional_assign(&mut self, other: &Self, choice: Choice)
fn conditional_assign(&mut self, other: &Self, choice: Choice)
Source§fn conditional_swap(a: &mut Self, b: &mut Self, choice: Choice)
fn conditional_swap(a: &mut Self, b: &mut Self, choice: Choice)
Conditionally swap
self and other if choice == 1; otherwise,
reassign both unto themselves. Read moreSource§impl ConstantTimeEq for I32x8
impl ConstantTimeEq for I32x8
Source§impl<'de> Deserialize<'de> for I32x8
impl<'de> Deserialize<'de> for I32x8
Source§fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>where
D: Deserializer<'de>,
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>where
D: Deserializer<'de>,
Deserialize this value from the given Serde deserializer. Read more
Source§impl Distribution<I32x8> for Standard
impl Distribution<I32x8> for Standard
Source§impl ExtendingCast<I16x8> for I32x8
impl ExtendingCast<I16x8> for I32x8
Source§fn extending_cast_from(vector: I16x8) -> I32x8
fn extending_cast_from(vector: I16x8) -> I32x8
§Scalar Equivalent:
I32x8::from([
i32::from(vector.as_array()[0]),
i32::from(vector.as_array()[1]),
i32::from(vector.as_array()[2]),
i32::from(vector.as_array()[3]),
i32::from(vector.as_array()[4]),
i32::from(vector.as_array()[5]),
i32::from(vector.as_array()[6]),
i32::from(vector.as_array()[7]),
])§Avx2
-
VPMOVSXWD ymm, xmm
Source§impl ExtendingCast<I8x16> for I32x8
impl ExtendingCast<I8x16> for I32x8
Source§fn extending_cast_from(vector: I8x16) -> I32x8
fn extending_cast_from(vector: I8x16) -> I32x8
§Scalar Equivalent:
I32x8::from([
i32::from(vector.as_array()[0]),
i32::from(vector.as_array()[1]),
i32::from(vector.as_array()[2]),
i32::from(vector.as_array()[3]),
i32::from(vector.as_array()[4]),
i32::from(vector.as_array()[5]),
i32::from(vector.as_array()[6]),
i32::from(vector.as_array()[7]),
])§Avx2
-
VPMOVSXBD ymm, xmm
Source§impl From<I16x8> for I32x8
impl From<I16x8> for I32x8
Source§fn from(vector: I16x8) -> I32x8
fn from(vector: I16x8) -> I32x8
§Scalar Equivalent:
I32x8::from([
i32::from(vector.as_array()[0]),
i32::from(vector.as_array()[1]),
i32::from(vector.as_array()[2]),
i32::from(vector.as_array()[3]),
i32::from(vector.as_array()[4]),
i32::from(vector.as_array()[5]),
i32::from(vector.as_array()[6]),
i32::from(vector.as_array()[7]),
])§Avx2
-
VPMOVSXWD ymm, xmm
Source§impl From<I32x4> for I32x8
impl From<I32x4> for I32x8
Source§fn from(vector: I32x4) -> I32x8
fn from(vector: I32x4) -> I32x8
Source§impl Shl<u64> for I32x8
impl Shl<u64> for I32x8
Source§fn shl(self, amount: u64) -> I32x8
fn shl(self, amount: u64) -> I32x8
§Scalar Equivalent:
if amount >= 32 {
I32x8::ZERO
} else {
I32x8::from([
self.as_array()[0] << amount,
self.as_array()[1] << amount,
self.as_array()[2] << amount,
self.as_array()[3] << amount,
self.as_array()[4] << amount,
self.as_array()[5] << amount,
self.as_array()[6] << amount,
self.as_array()[7] << amount,
])
}§Avx2
-
VPSLLD ymm, ymm, xmm
-
Instruction sequence.
Source§impl Shl for I32x8
impl Shl for I32x8
Source§impl ShlAssign<u64> for I32x8
impl ShlAssign<u64> for I32x8
Source§fn shl_assign(&mut self, amount: u64)
fn shl_assign(&mut self, amount: u64)
Performs the
<<= operation. Read moreSource§impl ShlAssign for I32x8
impl ShlAssign for I32x8
Source§fn shl_assign(&mut self, amount: I32x8)
fn shl_assign(&mut self, amount: I32x8)
Performs the
<<= operation. Read moreSource§impl Shr<u64> for I32x8
impl Shr<u64> for I32x8
Source§fn shr(self, amount: u64) -> I32x8
fn shr(self, amount: u64) -> I32x8
§Scalar Equivalent:
if amount >= 32 {
let mut out = self.as_array();
for x in out.iter_mut() {
*x = if *x < 0 { -1 } else { 0 };
}
I32x8::from(out)
} else {
I32x8::from([
self.as_array()[0] >> amount,
self.as_array()[1] >> amount,
self.as_array()[2] >> amount,
self.as_array()[3] >> amount,
self.as_array()[4] >> amount,
self.as_array()[5] >> amount,
self.as_array()[6] >> amount,
self.as_array()[7] >> amount,
])
}§Avx2
-
VPSRAD ymm, ymm, xmm
-
Instruction sequence.
Source§impl Shr for I32x8
impl Shr for I32x8
Source§impl ShrAssign<u64> for I32x8
impl ShrAssign<u64> for I32x8
Source§fn shr_assign(&mut self, amount: u64)
fn shr_assign(&mut self, amount: u64)
Performs the
>>= operation. Read moreSource§impl ShrAssign for I32x8
impl ShrAssign for I32x8
Source§fn shr_assign(&mut self, amount: I32x8)
fn shr_assign(&mut self, amount: I32x8)
Performs the
>>= operation. Read moreSource§impl SimdBase for I32x8
impl SimdBase for I32x8
Source§fn is_zero(&self) -> bool
fn is_zero(&self) -> bool
Source§fn set_lo(scalar: i32) -> I32x8
fn set_lo(scalar: i32) -> I32x8
Source§fn broadcast_lo(vector: I32x4) -> I32x8
fn broadcast_lo(vector: I32x4) -> I32x8
Source§fn cmp_eq(&self, other: I32x8) -> I32x8
fn cmp_eq(&self, other: I32x8) -> I32x8
§Scalar Equivalent:
I32x8::from([
if self.as_array()[0] == other.as_array()[0] { -1 } else { 0 },
if self.as_array()[1] == other.as_array()[1] { -1 } else { 0 },
if self.as_array()[2] == other.as_array()[2] { -1 } else { 0 },
if self.as_array()[3] == other.as_array()[3] { -1 } else { 0 },
if self.as_array()[4] == other.as_array()[4] { -1 } else { 0 },
if self.as_array()[5] == other.as_array()[5] { -1 } else { 0 },
if self.as_array()[6] == other.as_array()[6] { -1 } else { 0 },
if self.as_array()[7] == other.as_array()[7] { -1 } else { 0 },
])§Avx2
-
VPCMPEQD ymm, ymm, ymm
Source§fn and_not(&self, other: I32x8) -> I32x8
fn and_not(&self, other: I32x8) -> I32x8
§Scalar Equivalent:
I32x8::from([
self.as_array()[0] & (!other.as_array()[0]),
self.as_array()[1] & (!other.as_array()[1]),
self.as_array()[2] & (!other.as_array()[2]),
self.as_array()[3] & (!other.as_array()[3]),
self.as_array()[4] & (!other.as_array()[4]),
self.as_array()[5] & (!other.as_array()[5]),
self.as_array()[6] & (!other.as_array()[6]),
self.as_array()[7] & (!other.as_array()[7]),
])§Avx2
-
VPANDN ymm, ymm, ymm
Source§fn cmp_gt(&self, other: I32x8) -> I32x8
fn cmp_gt(&self, other: I32x8) -> I32x8
§Scalar Equivalent:
I32x8::from([
if self.as_array()[0] > other.as_array()[0] { -1 } else { 0 },
if self.as_array()[1] > other.as_array()[1] { -1 } else { 0 },
if self.as_array()[2] > other.as_array()[2] { -1 } else { 0 },
if self.as_array()[3] > other.as_array()[3] { -1 } else { 0 },
if self.as_array()[4] > other.as_array()[4] { -1 } else { 0 },
if self.as_array()[5] > other.as_array()[5] { -1 } else { 0 },
if self.as_array()[6] > other.as_array()[6] { -1 } else { 0 },
if self.as_array()[7] > other.as_array()[7] { -1 } else { 0 },
])§Avx2
-
VPCMPGTD ymm, ymm, ymm
Source§fn shift_left<const BITS: usize>(&self) -> I32x8
fn shift_left<const BITS: usize>(&self) -> I32x8
Source§fn shift_right<const BITS: usize>(&self) -> I32x8
fn shift_right<const BITS: usize>(&self) -> I32x8
Source§fn unpack_lo(&self, other: I32x8) -> I32x8
fn unpack_lo(&self, other: I32x8) -> I32x8
Source§fn unpack_hi(&self, other: I32x8) -> I32x8
fn unpack_hi(&self, other: I32x8) -> I32x8
Source§fn max(&self, other: I32x8) -> I32x8
fn max(&self, other: I32x8) -> I32x8
§Scalar Equivalent:
I32x8::from([
self.as_array()[0].max(other.as_array()[0]),
self.as_array()[1].max(other.as_array()[1]),
self.as_array()[2].max(other.as_array()[2]),
self.as_array()[3].max(other.as_array()[3]),
self.as_array()[4].max(other.as_array()[4]),
self.as_array()[5].max(other.as_array()[5]),
self.as_array()[6].max(other.as_array()[6]),
self.as_array()[7].max(other.as_array()[7]),
])§Avx2
-
VPMAXSD ymm, ymm, ymm
Source§fn min(&self, other: I32x8) -> I32x8
fn min(&self, other: I32x8) -> I32x8
§Scalar Equivalent:
I32x8::from([
self.as_array()[0].min(other.as_array()[0]),
self.as_array()[1].min(other.as_array()[1]),
self.as_array()[2].min(other.as_array()[2]),
self.as_array()[3].min(other.as_array()[3]),
self.as_array()[4].min(other.as_array()[4]),
self.as_array()[5].min(other.as_array()[5]),
self.as_array()[6].min(other.as_array()[6]),
self.as_array()[7].min(other.as_array()[7]),
])§Avx2
-
VPMINSD ymm, ymm, ymm
Source§type BroadcastLoInput = I32x4
type BroadcastLoInput = I32x4
A vector of
[Self::Scalar; 128 / (8 * std::mem::size_of::<Self::Scalar>())]Source§impl SimdBase32 for I32x8
impl SimdBase32 for I32x8
Source§fn shuffle<const I3: usize, const I2: usize, const I1: usize, const I0: usize>(
&self,
) -> I32x8
fn shuffle<const I3: usize, const I2: usize, const I1: usize, const I0: usize>( &self, ) -> I32x8
§Scalar Equivalent:
I32x8::from([
// 128-bit Lane #0
self.as_array()[I0 + 0 * 4],
self.as_array()[I1 + 0 * 4],
self.as_array()[I2 + 0 * 4],
self.as_array()[I3 + 0 * 4],
// 128-bit Lane #1
self.as_array()[I0 + 1 * 4],
self.as_array()[I1 + 1 * 4],
self.as_array()[I2 + 1 * 4],
self.as_array()[I3 + 1 * 4],
])§Avx2
-
VPSHUFD ymm, ymm, imm8
Source§impl SimdBase8x for I32x8
impl SimdBase8x for I32x8
Source§fn blend<const B7: bool, const B6: bool, const B5: bool, const B4: bool, const B3: bool, const B2: bool, const B1: bool, const B0: bool>(
&self,
if_true: I32x8,
) -> I32x8
fn blend<const B7: bool, const B6: bool, const B5: bool, const B4: bool, const B3: bool, const B2: bool, const B1: bool, const B0: bool>( &self, if_true: I32x8, ) -> I32x8
§Scalar Equivalent:
I32x8::from([
(if B0 { if_true } else { *self }).as_array()[0],
(if B1 { if_true } else { *self }).as_array()[1],
(if B2 { if_true } else { *self }).as_array()[2],
(if B3 { if_true } else { *self }).as_array()[3],
(if B4 { if_true } else { *self }).as_array()[4],
(if B5 { if_true } else { *self }).as_array()[5],
(if B6 { if_true } else { *self }).as_array()[6],
(if B7 { if_true } else { *self }).as_array()[7],
])§Avx2
-
VPBLENDD ymm, ymm, ymm, imm8
Source§impl SimdBaseGatherable<I32x8> for I32x8
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
impl SimdBaseGatherable<I32x8> for I32x8
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
Source§unsafe fn gather(base: *const i32, indices: I32x8) -> I32x8
unsafe fn gather(base: *const i32, indices: I32x8) -> I32x8
§Scalar Equivalent:
I32x8::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
base.offset(indices.as_array()[4] as isize).read_unaligned(),
base.offset(indices.as_array()[5] as isize).read_unaligned(),
base.offset(indices.as_array()[6] as isize).read_unaligned(),
base.offset(indices.as_array()[7] as isize).read_unaligned(),
])§Avx2
-
VPGATHERDD ymm, vm32x, ymm
Source§unsafe fn gather_masked(
base: *const i32,
indices: I32x8,
mask: I32x8,
src: I32x8,
) -> I32x8
unsafe fn gather_masked( base: *const i32, indices: I32x8, mask: I32x8, src: I32x8, ) -> I32x8
§Scalar Equivalent:
I32x8::from([
if ((mask.as_array()[0] as u32) >> 31) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if ((mask.as_array()[1] as u32) >> 31) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if ((mask.as_array()[2] as u32) >> 31) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if ((mask.as_array()[3] as u32) >> 31) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
if ((mask.as_array()[4] as u32) >> 31) == 1 {
base.offset(indices.as_array()[4] as isize).read_unaligned()
} else {
src.as_array()[4]
},
if ((mask.as_array()[5] as u32) >> 31) == 1 {
base.offset(indices.as_array()[5] as isize).read_unaligned()
} else {
src.as_array()[5]
},
if ((mask.as_array()[6] as u32) >> 31) == 1 {
base.offset(indices.as_array()[6] as isize).read_unaligned()
} else {
src.as_array()[6]
},
if ((mask.as_array()[7] as u32) >> 31) == 1 {
base.offset(indices.as_array()[7] as isize).read_unaligned()
} else {
src.as_array()[7]
},
])§Avx2
-
VPGATHERDD ymm, vm32x, ymm
Source§impl SimdBaseGatherable<I32x8> for U32x8
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
impl SimdBaseGatherable<I32x8> for U32x8
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
Source§unsafe fn gather(base: *const u32, indices: I32x8) -> U32x8
unsafe fn gather(base: *const u32, indices: I32x8) -> U32x8
§Scalar Equivalent:
U32x8::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
base.offset(indices.as_array()[4] as isize).read_unaligned(),
base.offset(indices.as_array()[5] as isize).read_unaligned(),
base.offset(indices.as_array()[6] as isize).read_unaligned(),
base.offset(indices.as_array()[7] as isize).read_unaligned(),
])§Avx2
-
VPGATHERDD ymm, vm32x, ymm
Source§unsafe fn gather_masked(
base: *const u32,
indices: I32x8,
mask: U32x8,
src: U32x8,
) -> U32x8
unsafe fn gather_masked( base: *const u32, indices: I32x8, mask: U32x8, src: U32x8, ) -> U32x8
§Scalar Equivalent:
U32x8::from([
if (mask.as_array()[0] >> 31) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if (mask.as_array()[1] >> 31) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if (mask.as_array()[2] >> 31) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if (mask.as_array()[3] >> 31) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
if (mask.as_array()[4] >> 31) == 1 {
base.offset(indices.as_array()[4] as isize).read_unaligned()
} else {
src.as_array()[4]
},
if (mask.as_array()[5] >> 31) == 1 {
base.offset(indices.as_array()[5] as isize).read_unaligned()
} else {
src.as_array()[5]
},
if (mask.as_array()[6] >> 31) == 1 {
base.offset(indices.as_array()[6] as isize).read_unaligned()
} else {
src.as_array()[6]
},
if (mask.as_array()[7] >> 31) == 1 {
base.offset(indices.as_array()[7] as isize).read_unaligned()
} else {
src.as_array()[7]
},
])§Avx2
-
VPGATHERDD ymm, vm32x, ymm
Source§impl Sub for I32x8
impl Sub for I32x8
Source§fn sub(self, rhs: I32x8) -> I32x8
fn sub(self, rhs: I32x8) -> I32x8
Perform a pairwise wrapping_sub
§Scalar Equivalent
ⓘ
I32x8::from([
self.as_array()[0].wrapping_sub(rhs.as_array()[0]),
self.as_array()[1].wrapping_sub(rhs.as_array()[1]),
self.as_array()[2].wrapping_sub(rhs.as_array()[2]),
self.as_array()[3].wrapping_sub(rhs.as_array()[3]),
self.as_array()[4].wrapping_sub(rhs.as_array()[4]),
self.as_array()[5].wrapping_sub(rhs.as_array()[5]),
self.as_array()[6].wrapping_sub(rhs.as_array()[6]),
self.as_array()[7].wrapping_sub(rhs.as_array()[7]),
])§AVX2 Intrinsics Used
_mm256_sub_epi32VPSUBD ymm, ymm, ymm
§Neon Intrinsics Used
vsubq_s32- This intrinsic compiles to the following instructions:
Source§impl SubAssign for I32x8
impl SubAssign for I32x8
Source§fn sub_assign(&mut self, other: I32x8)
fn sub_assign(&mut self, other: I32x8)
Performs the
-= operation. Read moreimpl Copy for I32x8
impl Eq for I32x8
impl Pod for I32x8
Auto Trait Implementations§
impl Freeze for I32x8
impl RefUnwindSafe for I32x8
impl Send for I32x8
impl Sync for I32x8
impl Unpin for I32x8
impl UnwindSafe for I32x8
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
§impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
§type Bits = T
type Bits = T
Self must have the same layout as the specified Bits except for
the possible invalid bit patterns being checked during
is_valid_bit_pattern.§fn is_valid_bit_pattern(_bits: &T) -> bool
fn is_valid_bit_pattern(_bits: &T) -> bool
If this function returns true, then it must be valid to reinterpret
bits
as &Self.