pub struct I32x4(/* private fields */);Expand description
[i32; 4] as a vector.
Implementations§
Source§impl I32x4
impl I32x4
Sourcepub const fn from_array(arr: [i32; 4]) -> Self
pub const fn from_array(arr: [i32; 4]) -> Self
Create a vector from an array.
Unlike the From trait function, the from_array function is const.
§Example
const MY_EXTREMELY_FUN_VALUE: I32x4 = I32x4::from_array([0, 1, 2, 3]);
for (i, value) in MY_EXTREMELY_FUN_VALUE.as_array().iter().copied().enumerate() {
assert_eq!(i as i32, value);
}Trait Implementations§
Source§impl Add for I32x4
impl Add for I32x4
Source§fn add(self, rhs: I32x4) -> I32x4
fn add(self, rhs: I32x4) -> I32x4
Perform a pairwise wrapping_add
§Scalar Equivalent
I32x4::from([
self.as_array()[0].wrapping_add(rhs.as_array()[0]),
self.as_array()[1].wrapping_add(rhs.as_array()[1]),
self.as_array()[2].wrapping_add(rhs.as_array()[2]),
self.as_array()[3].wrapping_add(rhs.as_array()[3]),
])§AVX2 Intrinsics Used
_mm_add_epi32PADDD xmm, xmm
§Neon Intrinsics Used
vaddq_s32- This intrinsic compiles to the following instructions:
Source§impl AddAssign for I32x4
impl AddAssign for I32x4
Source§fn add_assign(&mut self, other: I32x4)
fn add_assign(&mut self, other: I32x4)
+= operation. Read moreSource§impl BitAnd for I32x4
impl BitAnd for I32x4
Source§fn bitand(self, rhs: I32x4) -> I32x4
fn bitand(self, rhs: I32x4) -> I32x4
Perform a pairwise bitwise and
§Scalar Equivalent
I32x4::from([
self.as_array()[0] & rhs.as_array()[0],
self.as_array()[1] & rhs.as_array()[1],
self.as_array()[2] & rhs.as_array()[2],
self.as_array()[3] & rhs.as_array()[3],
])§AVX2 Intrinsics Used
_mm_and_si128PAND xmm, xmm
§Neon Intrinsics Used
vandq_s32- This intrinsic compiles to the following instructions:
Source§impl BitAndAssign for I32x4
impl BitAndAssign for I32x4
Source§fn bitand_assign(&mut self, other: I32x4)
fn bitand_assign(&mut self, other: I32x4)
&= operation. Read moreSource§impl BitOr for I32x4
impl BitOr for I32x4
Source§fn bitor(self, rhs: I32x4) -> I32x4
fn bitor(self, rhs: I32x4) -> I32x4
Perform a pairwise bitwise or
§Scalar Equivalent
I32x4::from([
self.as_array()[0] | rhs.as_array()[0],
self.as_array()[1] | rhs.as_array()[1],
self.as_array()[2] | rhs.as_array()[2],
self.as_array()[3] | rhs.as_array()[3],
])§AVX2 Intrinsics Used
_mm_or_si128POR xmm, xmm
§Neon Intrinsics Used
vorrq_s32- This intrinsic compiles to the following instructions:
Source§impl BitOrAssign for I32x4
impl BitOrAssign for I32x4
Source§fn bitor_assign(&mut self, other: I32x4)
fn bitor_assign(&mut self, other: I32x4)
|= operation. Read moreSource§impl BitXor for I32x4
impl BitXor for I32x4
Source§fn bitxor(self, rhs: I32x4) -> I32x4
fn bitxor(self, rhs: I32x4) -> I32x4
Perform a pairwise bitwise xor
§Scalar Equivalent
I32x4::from([
self.as_array()[0] ^ rhs.as_array()[0],
self.as_array()[1] ^ rhs.as_array()[1],
self.as_array()[2] ^ rhs.as_array()[2],
self.as_array()[3] ^ rhs.as_array()[3],
])§AVX2 Intrinsics Used
_mm_xor_si128PXOR xmm, xmm
§Neon Intrinsics Used
veorq_s32- This intrinsic compiles to the following instructions:
Source§impl BitXorAssign for I32x4
impl BitXorAssign for I32x4
Source§fn bitxor_assign(&mut self, other: I32x4)
fn bitxor_assign(&mut self, other: I32x4)
^= operation. Read moreSource§impl ConditionallySelectable for I32x4
impl ConditionallySelectable for I32x4
Source§fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
Source§fn conditional_assign(&mut self, other: &Self, choice: Choice)
fn conditional_assign(&mut self, other: &Self, choice: Choice)
Source§fn conditional_swap(a: &mut Self, b: &mut Self, choice: Choice)
fn conditional_swap(a: &mut Self, b: &mut Self, choice: Choice)
self and other if choice == 1; otherwise,
reassign both unto themselves. Read moreSource§impl ConstantTimeEq for I32x4
impl ConstantTimeEq for I32x4
Source§impl<'de> Deserialize<'de> for I32x4
impl<'de> Deserialize<'de> for I32x4
Source§fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>where
D: Deserializer<'de>,
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>where
D: Deserializer<'de>,
Source§impl Distribution<I32x4> for Standard
impl Distribution<I32x4> for Standard
Source§impl ExtendingCast<I16x8> for I32x4
impl ExtendingCast<I16x8> for I32x4
Source§impl ExtendingCast<I32x4> for I64x2
impl ExtendingCast<I32x4> for I64x2
Source§impl ExtendingCast<I32x4> for I64x4
impl ExtendingCast<I32x4> for I64x4
Source§impl ExtendingCast<I8x16> for I32x4
impl ExtendingCast<I8x16> for I32x4
Source§impl From<I32x4> for I32x8
impl From<I32x4> for I32x8
Source§fn from(vector: I32x4) -> I32x8
fn from(vector: I32x4) -> I32x8
Source§impl Shl<u64> for I32x4
impl Shl<u64> for I32x4
Source§impl Shl for I32x4
impl Shl for I32x4
Source§impl ShlAssign<u64> for I32x4
impl ShlAssign<u64> for I32x4
Source§fn shl_assign(&mut self, amount: u64)
fn shl_assign(&mut self, amount: u64)
<<= operation. Read moreSource§impl ShlAssign for I32x4
impl ShlAssign for I32x4
Source§fn shl_assign(&mut self, amount: I32x4)
fn shl_assign(&mut self, amount: I32x4)
<<= operation. Read moreSource§impl Shr<u64> for I32x4
impl Shr<u64> for I32x4
Source§fn shr(self, amount: u64) -> I32x4
fn shr(self, amount: u64) -> I32x4
§Scalar Equivalent:
if amount >= 32 {
let mut out = self.as_array();
for x in out.iter_mut() {
*x = if *x < 0 { -1 } else { 0 };
}
I32x4::from(out)
} else {
I32x4::from([
self.as_array()[0] >> amount,
self.as_array()[1] >> amount,
self.as_array()[2] >> amount,
self.as_array()[3] >> amount,
])
}§Avx2
-
Instruction sequence.
-
PSRAD xmm, xmm
Source§impl Shr for I32x4
impl Shr for I32x4
Source§impl ShrAssign<u64> for I32x4
impl ShrAssign<u64> for I32x4
Source§fn shr_assign(&mut self, amount: u64)
fn shr_assign(&mut self, amount: u64)
>>= operation. Read moreSource§impl ShrAssign for I32x4
impl ShrAssign for I32x4
Source§fn shr_assign(&mut self, amount: I32x4)
fn shr_assign(&mut self, amount: I32x4)
>>= operation. Read moreSource§impl SimdBase for I32x4
impl SimdBase for I32x4
Source§fn set_lo(scalar: i32) -> I32x4
fn set_lo(scalar: i32) -> I32x4
Source§fn broadcast_lo(vector: I32x4) -> I32x4
fn broadcast_lo(vector: I32x4) -> I32x4
Source§fn cmp_eq(&self, other: I32x4) -> I32x4
fn cmp_eq(&self, other: I32x4) -> I32x4
§Scalar Equivalent:
I32x4::from([
if self.as_array()[0] == other.as_array()[0] { -1 } else { 0 },
if self.as_array()[1] == other.as_array()[1] { -1 } else { 0 },
if self.as_array()[2] == other.as_array()[2] { -1 } else { 0 },
if self.as_array()[3] == other.as_array()[3] { -1 } else { 0 },
])§Avx2
-
PCMPEQD xmm, xmm
Source§fn and_not(&self, other: I32x4) -> I32x4
fn and_not(&self, other: I32x4) -> I32x4
Source§fn cmp_gt(&self, other: I32x4) -> I32x4
fn cmp_gt(&self, other: I32x4) -> I32x4
§Scalar Equivalent:
I32x4::from([
if self.as_array()[0] > other.as_array()[0] { -1 } else { 0 },
if self.as_array()[1] > other.as_array()[1] { -1 } else { 0 },
if self.as_array()[2] > other.as_array()[2] { -1 } else { 0 },
if self.as_array()[3] > other.as_array()[3] { -1 } else { 0 },
])§Avx2
-
PCMPGTD xmm, xmm
Source§fn shift_left<const BITS: usize>(&self) -> I32x4
fn shift_left<const BITS: usize>(&self) -> I32x4
Source§fn shift_right<const BITS: usize>(&self) -> I32x4
fn shift_right<const BITS: usize>(&self) -> I32x4
Source§fn unpack_lo(&self, other: I32x4) -> I32x4
fn unpack_lo(&self, other: I32x4) -> I32x4
Source§fn unpack_hi(&self, other: I32x4) -> I32x4
fn unpack_hi(&self, other: I32x4) -> I32x4
Source§fn max(&self, other: I32x4) -> I32x4
fn max(&self, other: I32x4) -> I32x4
Source§fn min(&self, other: I32x4) -> I32x4
fn min(&self, other: I32x4) -> I32x4
Source§type BroadcastLoInput = I32x4
type BroadcastLoInput = I32x4
[Self::Scalar; 128 / (8 * std::mem::size_of::<Self::Scalar>())]Source§impl SimdBase32 for I32x4
impl SimdBase32 for I32x4
Source§fn shuffle<const I3: usize, const I2: usize, const I1: usize, const I0: usize>(
&self,
) -> I32x4
fn shuffle<const I3: usize, const I2: usize, const I1: usize, const I0: usize>( &self, ) -> I32x4
Source§impl SimdBase4x for I32x4
impl SimdBase4x for I32x4
Source§fn blend<const B3: bool, const B2: bool, const B1: bool, const B0: bool>(
&self,
if_true: I32x4,
) -> I32x4
fn blend<const B3: bool, const B2: bool, const B1: bool, const B0: bool>( &self, if_true: I32x4, ) -> I32x4
Source§impl SimdBaseGatherable<I32x4> for I32x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
impl SimdBaseGatherable<I32x4> for I32x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
Source§unsafe fn gather(base: *const i32, indices: I32x4) -> I32x4
unsafe fn gather(base: *const i32, indices: I32x4) -> I32x4
§Scalar Equivalent:
I32x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])§Avx2
-
VPGATHERDD xmm, vm32x, xmm
Source§unsafe fn gather_masked(
base: *const i32,
indices: I32x4,
mask: I32x4,
src: I32x4,
) -> I32x4
unsafe fn gather_masked( base: *const i32, indices: I32x4, mask: I32x4, src: I32x4, ) -> I32x4
§Scalar Equivalent:
I32x4::from([
if ((mask.as_array()[0] as u32) >> 31) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if ((mask.as_array()[1] as u32) >> 31) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if ((mask.as_array()[2] as u32) >> 31) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if ((mask.as_array()[3] as u32) >> 31) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])§Avx2
-
VPGATHERDD xmm, vm32x, xmm
Source§impl SimdBaseGatherable<I32x4> for I64x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
impl SimdBaseGatherable<I32x4> for I64x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
Source§unsafe fn gather(base: *const i64, indices: I32x4) -> I64x4
unsafe fn gather(base: *const i64, indices: I32x4) -> I64x4
§Scalar Equivalent:
I64x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])§Avx2
-
VPGATHERDQ ymm, vm32x, ymm
Source§unsafe fn gather_masked(
base: *const i64,
indices: I32x4,
mask: I64x4,
src: I64x4,
) -> I64x4
unsafe fn gather_masked( base: *const i64, indices: I32x4, mask: I64x4, src: I64x4, ) -> I64x4
§Scalar Equivalent:
I64x4::from([
if ((mask.as_array()[0] as u64) >> 63) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if ((mask.as_array()[1] as u64) >> 63) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if ((mask.as_array()[2] as u64) >> 63) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if ((mask.as_array()[3] as u64) >> 63) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])§Avx2
-
VPGATHERDQ ymm, vm32x, ymm
Source§impl SimdBaseGatherable<I32x4> for U32x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
impl SimdBaseGatherable<I32x4> for U32x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
Source§unsafe fn gather(base: *const u32, indices: I32x4) -> U32x4
unsafe fn gather(base: *const u32, indices: I32x4) -> U32x4
§Scalar Equivalent:
U32x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])§Avx2
-
VPGATHERDD xmm, vm32x, xmm
Source§unsafe fn gather_masked(
base: *const u32,
indices: I32x4,
mask: U32x4,
src: U32x4,
) -> U32x4
unsafe fn gather_masked( base: *const u32, indices: I32x4, mask: U32x4, src: U32x4, ) -> U32x4
§Scalar Equivalent:
U32x4::from([
if (mask.as_array()[0] >> 31) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if (mask.as_array()[1] >> 31) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if (mask.as_array()[2] >> 31) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if (mask.as_array()[3] >> 31) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])§Avx2
-
VPGATHERDD xmm, vm32x, xmm
Source§impl SimdBaseGatherable<I32x4> for U64x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
impl SimdBaseGatherable<I32x4> for U64x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
Source§unsafe fn gather(base: *const u64, indices: I32x4) -> U64x4
unsafe fn gather(base: *const u64, indices: I32x4) -> U64x4
§Scalar Equivalent:
U64x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])§Avx2
-
VPGATHERDQ ymm, vm32x, ymm
Source§unsafe fn gather_masked(
base: *const u64,
indices: I32x4,
mask: U64x4,
src: U64x4,
) -> U64x4
unsafe fn gather_masked( base: *const u64, indices: I32x4, mask: U64x4, src: U64x4, ) -> U64x4
§Scalar Equivalent:
U64x4::from([
if (mask.as_array()[0] >> 63) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if (mask.as_array()[1] >> 63) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if (mask.as_array()[2] >> 63) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if (mask.as_array()[3] >> 63) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])§Avx2
-
VPGATHERDQ ymm, vm32x, ymm
Source§impl SimdBaseGatherable<I64x4> for I32x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
impl SimdBaseGatherable<I64x4> for I32x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
Source§unsafe fn gather(base: *const i32, indices: I64x4) -> I32x4
unsafe fn gather(base: *const i32, indices: I64x4) -> I32x4
§Scalar Equivalent:
I32x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])§Avx2
-
VPGATHERQD xmm, vm64y, xmm
Source§unsafe fn gather_masked(
base: *const i32,
indices: I64x4,
mask: I32x4,
src: I32x4,
) -> I32x4
unsafe fn gather_masked( base: *const i32, indices: I64x4, mask: I32x4, src: I32x4, ) -> I32x4
§Scalar Equivalent:
I32x4::from([
if ((mask.as_array()[0] as u32) >> 31) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if ((mask.as_array()[1] as u32) >> 31) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if ((mask.as_array()[2] as u32) >> 31) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if ((mask.as_array()[3] as u32) >> 31) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])§Avx2
-
VPGATHERQD xmm, vm64y, xmm
Source§impl SimdBaseGatherable<U64x4> for I32x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
impl SimdBaseGatherable<U64x4> for I32x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
Source§unsafe fn gather(base: *const i32, indices: U64x4) -> I32x4
unsafe fn gather(base: *const i32, indices: U64x4) -> I32x4
§Scalar Equivalent:
I32x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])§Avx2
-
VPGATHERQD xmm, vm64y, xmm
Source§unsafe fn gather_masked(
base: *const i32,
indices: U64x4,
mask: I32x4,
src: I32x4,
) -> I32x4
unsafe fn gather_masked( base: *const i32, indices: U64x4, mask: I32x4, src: I32x4, ) -> I32x4
§Scalar Equivalent:
I32x4::from([
if ((mask.as_array()[0] as u32) >> 31) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if ((mask.as_array()[1] as u32) >> 31) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if ((mask.as_array()[2] as u32) >> 31) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if ((mask.as_array()[3] as u32) >> 31) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])§Avx2
-
VPGATHERQD xmm, vm64y, xmm
Source§impl Sub for I32x4
impl Sub for I32x4
Source§fn sub(self, rhs: I32x4) -> I32x4
fn sub(self, rhs: I32x4) -> I32x4
Perform a pairwise wrapping_sub
§Scalar Equivalent
I32x4::from([
self.as_array()[0].wrapping_sub(rhs.as_array()[0]),
self.as_array()[1].wrapping_sub(rhs.as_array()[1]),
self.as_array()[2].wrapping_sub(rhs.as_array()[2]),
self.as_array()[3].wrapping_sub(rhs.as_array()[3]),
])§AVX2 Intrinsics Used
_mm_sub_epi32PSUBD xmm, xmm
§Neon Intrinsics Used
vsubq_s32- This intrinsic compiles to the following instructions:
Source§impl SubAssign for I32x4
impl SubAssign for I32x4
Source§fn sub_assign(&mut self, other: I32x4)
fn sub_assign(&mut self, other: I32x4)
-= operation. Read moreimpl Copy for I32x4
impl Eq for I32x4
impl Pod for I32x4
Auto Trait Implementations§
impl Freeze for I32x4
impl RefUnwindSafe for I32x4
impl Send for I32x4
impl Sync for I32x4
impl Unpin for I32x4
impl UnwindSafe for I32x4
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
§impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
§type Bits = T
type Bits = T
Self must have the same layout as the specified Bits except for
the possible invalid bit patterns being checked during
is_valid_bit_pattern.§fn is_valid_bit_pattern(_bits: &T) -> bool
fn is_valid_bit_pattern(_bits: &T) -> bool
bits
as &Self.