pub struct I8x32(/* private fields */);Expand description
[i8; 32] as a vector.
Implementations§
Source§impl I8x32
impl I8x32
Sourcepub const fn from_array(arr: [i8; 32]) -> Self
pub const fn from_array(arr: [i8; 32]) -> Self
Create a vector from an array.
Unlike the From trait function, the from_array function is const.
§Example
const MY_EXTREMELY_FUN_VALUE: I8x32 = I8x32::from_array([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
23, 24, 25, 26, 27, 28, 29, 30, 31,
]);
for (i, value) in MY_EXTREMELY_FUN_VALUE.as_array().iter().copied().enumerate() {
assert_eq!(i as i8, value);
}Source§impl I8x32
impl I8x32
Sourcepub fn shuffle(&self, order: U8x32) -> I8x32
pub fn shuffle(&self, order: U8x32) -> I8x32
§Scalar Equivalent:
let mut arr = [0; 32];
for (lane_dst, (lane_src, order)) in
arr.chunks_exact_mut(16).zip(
self.as_array().chunks_exact(16)
.zip(order.as_array().chunks_exact(16))
)
{
for (dst, idx) in lane_dst.iter_mut().zip(order) {
let idx = *idx;
*dst = if (idx >> 7) == 1 {
0
} else {
lane_src[(idx as usize) % 16]
};
}
}
arr.into()§Avx2
-
VPSHUFB ymm, ymm, ymm
Trait Implementations§
Source§impl Add for I8x32
impl Add for I8x32
Source§fn add(self, rhs: I8x32) -> I8x32
fn add(self, rhs: I8x32) -> I8x32
Perform a pairwise wrapping_add
§Scalar Equivalent
ⓘ
I8x32::from([
self.as_array()[0].wrapping_add(rhs.as_array()[0]),
self.as_array()[1].wrapping_add(rhs.as_array()[1]),
self.as_array()[2].wrapping_add(rhs.as_array()[2]),
self.as_array()[3].wrapping_add(rhs.as_array()[3]),
self.as_array()[4].wrapping_add(rhs.as_array()[4]),
self.as_array()[5].wrapping_add(rhs.as_array()[5]),
self.as_array()[6].wrapping_add(rhs.as_array()[6]),
self.as_array()[7].wrapping_add(rhs.as_array()[7]),
self.as_array()[8].wrapping_add(rhs.as_array()[8]),
self.as_array()[9].wrapping_add(rhs.as_array()[9]),
self.as_array()[10].wrapping_add(rhs.as_array()[10]),
self.as_array()[11].wrapping_add(rhs.as_array()[11]),
self.as_array()[12].wrapping_add(rhs.as_array()[12]),
self.as_array()[13].wrapping_add(rhs.as_array()[13]),
self.as_array()[14].wrapping_add(rhs.as_array()[14]),
self.as_array()[15].wrapping_add(rhs.as_array()[15]),
self.as_array()[16].wrapping_add(rhs.as_array()[16]),
self.as_array()[17].wrapping_add(rhs.as_array()[17]),
self.as_array()[18].wrapping_add(rhs.as_array()[18]),
self.as_array()[19].wrapping_add(rhs.as_array()[19]),
self.as_array()[20].wrapping_add(rhs.as_array()[20]),
self.as_array()[21].wrapping_add(rhs.as_array()[21]),
self.as_array()[22].wrapping_add(rhs.as_array()[22]),
self.as_array()[23].wrapping_add(rhs.as_array()[23]),
self.as_array()[24].wrapping_add(rhs.as_array()[24]),
self.as_array()[25].wrapping_add(rhs.as_array()[25]),
self.as_array()[26].wrapping_add(rhs.as_array()[26]),
self.as_array()[27].wrapping_add(rhs.as_array()[27]),
self.as_array()[28].wrapping_add(rhs.as_array()[28]),
self.as_array()[29].wrapping_add(rhs.as_array()[29]),
self.as_array()[30].wrapping_add(rhs.as_array()[30]),
self.as_array()[31].wrapping_add(rhs.as_array()[31]),
])§AVX2 Intrinsics Used
_mm256_add_epi8VPADDB ymm, ymm, ymm
§Neon Intrinsics Used
vaddq_s8- This intrinsic compiles to the following instructions:
Source§impl AddAssign for I8x32
impl AddAssign for I8x32
Source§fn add_assign(&mut self, other: I8x32)
fn add_assign(&mut self, other: I8x32)
Performs the
+= operation. Read moreSource§impl BitAnd for I8x32
impl BitAnd for I8x32
Source§fn bitand(self, rhs: I8x32) -> I8x32
fn bitand(self, rhs: I8x32) -> I8x32
Perform a pairwise bitwise and
§Scalar Equivalent
ⓘ
I8x32::from([
self.as_array()[0] & rhs.as_array()[0],
self.as_array()[1] & rhs.as_array()[1],
self.as_array()[2] & rhs.as_array()[2],
self.as_array()[3] & rhs.as_array()[3],
self.as_array()[4] & rhs.as_array()[4],
self.as_array()[5] & rhs.as_array()[5],
self.as_array()[6] & rhs.as_array()[6],
self.as_array()[7] & rhs.as_array()[7],
self.as_array()[8] & rhs.as_array()[8],
self.as_array()[9] & rhs.as_array()[9],
self.as_array()[10] & rhs.as_array()[10],
self.as_array()[11] & rhs.as_array()[11],
self.as_array()[12] & rhs.as_array()[12],
self.as_array()[13] & rhs.as_array()[13],
self.as_array()[14] & rhs.as_array()[14],
self.as_array()[15] & rhs.as_array()[15],
self.as_array()[16] & rhs.as_array()[16],
self.as_array()[17] & rhs.as_array()[17],
self.as_array()[18] & rhs.as_array()[18],
self.as_array()[19] & rhs.as_array()[19],
self.as_array()[20] & rhs.as_array()[20],
self.as_array()[21] & rhs.as_array()[21],
self.as_array()[22] & rhs.as_array()[22],
self.as_array()[23] & rhs.as_array()[23],
self.as_array()[24] & rhs.as_array()[24],
self.as_array()[25] & rhs.as_array()[25],
self.as_array()[26] & rhs.as_array()[26],
self.as_array()[27] & rhs.as_array()[27],
self.as_array()[28] & rhs.as_array()[28],
self.as_array()[29] & rhs.as_array()[29],
self.as_array()[30] & rhs.as_array()[30],
self.as_array()[31] & rhs.as_array()[31],
])§AVX2 Intrinsics Used
_mm256_and_si256VPAND ymm, ymm, ymm
§Neon Intrinsics Used
vandq_s8- This intrinsic compiles to the following instructions:
Source§impl BitAndAssign for I8x32
impl BitAndAssign for I8x32
Source§fn bitand_assign(&mut self, other: I8x32)
fn bitand_assign(&mut self, other: I8x32)
Performs the
&= operation. Read moreSource§impl BitOr for I8x32
impl BitOr for I8x32
Source§fn bitor(self, rhs: I8x32) -> I8x32
fn bitor(self, rhs: I8x32) -> I8x32
Perform a pairwise bitwise or
§Scalar Equivalent
ⓘ
I8x32::from([
self.as_array()[0] | rhs.as_array()[0],
self.as_array()[1] | rhs.as_array()[1],
self.as_array()[2] | rhs.as_array()[2],
self.as_array()[3] | rhs.as_array()[3],
self.as_array()[4] | rhs.as_array()[4],
self.as_array()[5] | rhs.as_array()[5],
self.as_array()[6] | rhs.as_array()[6],
self.as_array()[7] | rhs.as_array()[7],
self.as_array()[8] | rhs.as_array()[8],
self.as_array()[9] | rhs.as_array()[9],
self.as_array()[10] | rhs.as_array()[10],
self.as_array()[11] | rhs.as_array()[11],
self.as_array()[12] | rhs.as_array()[12],
self.as_array()[13] | rhs.as_array()[13],
self.as_array()[14] | rhs.as_array()[14],
self.as_array()[15] | rhs.as_array()[15],
self.as_array()[16] | rhs.as_array()[16],
self.as_array()[17] | rhs.as_array()[17],
self.as_array()[18] | rhs.as_array()[18],
self.as_array()[19] | rhs.as_array()[19],
self.as_array()[20] | rhs.as_array()[20],
self.as_array()[21] | rhs.as_array()[21],
self.as_array()[22] | rhs.as_array()[22],
self.as_array()[23] | rhs.as_array()[23],
self.as_array()[24] | rhs.as_array()[24],
self.as_array()[25] | rhs.as_array()[25],
self.as_array()[26] | rhs.as_array()[26],
self.as_array()[27] | rhs.as_array()[27],
self.as_array()[28] | rhs.as_array()[28],
self.as_array()[29] | rhs.as_array()[29],
self.as_array()[30] | rhs.as_array()[30],
self.as_array()[31] | rhs.as_array()[31],
])§AVX2 Intrinsics Used
_mm256_or_si256VPOR ymm, ymm, ymm
§Neon Intrinsics Used
vorrq_s8- This intrinsic compiles to the following instructions:
Source§impl BitOrAssign for I8x32
impl BitOrAssign for I8x32
Source§fn bitor_assign(&mut self, other: I8x32)
fn bitor_assign(&mut self, other: I8x32)
Performs the
|= operation. Read moreSource§impl BitXor for I8x32
impl BitXor for I8x32
Source§fn bitxor(self, rhs: I8x32) -> I8x32
fn bitxor(self, rhs: I8x32) -> I8x32
Perform a pairwise bitwise xor
§Scalar Equivalent
ⓘ
I8x32::from([
self.as_array()[0] ^ rhs.as_array()[0],
self.as_array()[1] ^ rhs.as_array()[1],
self.as_array()[2] ^ rhs.as_array()[2],
self.as_array()[3] ^ rhs.as_array()[3],
self.as_array()[4] ^ rhs.as_array()[4],
self.as_array()[5] ^ rhs.as_array()[5],
self.as_array()[6] ^ rhs.as_array()[6],
self.as_array()[7] ^ rhs.as_array()[7],
self.as_array()[8] ^ rhs.as_array()[8],
self.as_array()[9] ^ rhs.as_array()[9],
self.as_array()[10] ^ rhs.as_array()[10],
self.as_array()[11] ^ rhs.as_array()[11],
self.as_array()[12] ^ rhs.as_array()[12],
self.as_array()[13] ^ rhs.as_array()[13],
self.as_array()[14] ^ rhs.as_array()[14],
self.as_array()[15] ^ rhs.as_array()[15],
self.as_array()[16] ^ rhs.as_array()[16],
self.as_array()[17] ^ rhs.as_array()[17],
self.as_array()[18] ^ rhs.as_array()[18],
self.as_array()[19] ^ rhs.as_array()[19],
self.as_array()[20] ^ rhs.as_array()[20],
self.as_array()[21] ^ rhs.as_array()[21],
self.as_array()[22] ^ rhs.as_array()[22],
self.as_array()[23] ^ rhs.as_array()[23],
self.as_array()[24] ^ rhs.as_array()[24],
self.as_array()[25] ^ rhs.as_array()[25],
self.as_array()[26] ^ rhs.as_array()[26],
self.as_array()[27] ^ rhs.as_array()[27],
self.as_array()[28] ^ rhs.as_array()[28],
self.as_array()[29] ^ rhs.as_array()[29],
self.as_array()[30] ^ rhs.as_array()[30],
self.as_array()[31] ^ rhs.as_array()[31],
])§AVX2 Intrinsics Used
_mm256_xor_si256VPXOR ymm, ymm, ymm
§Neon Intrinsics Used
veorq_s8- This intrinsic compiles to the following instructions:
Source§impl BitXorAssign for I8x32
impl BitXorAssign for I8x32
Source§fn bitxor_assign(&mut self, other: I8x32)
fn bitxor_assign(&mut self, other: I8x32)
Performs the
^= operation. Read moreSource§impl ConditionallySelectable for I8x32
impl ConditionallySelectable for I8x32
Source§fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
Source§fn conditional_assign(&mut self, other: &Self, choice: Choice)
fn conditional_assign(&mut self, other: &Self, choice: Choice)
Source§fn conditional_swap(a: &mut Self, b: &mut Self, choice: Choice)
fn conditional_swap(a: &mut Self, b: &mut Self, choice: Choice)
Conditionally swap
self and other if choice == 1; otherwise,
reassign both unto themselves. Read moreSource§impl ConstantTimeEq for I8x32
impl ConstantTimeEq for I8x32
Source§impl<'de> Deserialize<'de> for I8x32
impl<'de> Deserialize<'de> for I8x32
Source§fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>where
D: Deserializer<'de>,
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>where
D: Deserializer<'de>,
Deserialize this value from the given Serde deserializer. Read more
Source§impl Distribution<I8x32> for Standard
impl Distribution<I8x32> for Standard
Source§impl From<I8x16> for I8x32
impl From<I8x16> for I8x32
Source§fn from(vector: I8x16) -> I8x32
fn from(vector: I8x16) -> I8x32
NOTE: this will zero the upper bits of the destination. Other intrinsics are more effcient, but leave the upper bits undefined. At present, these more effcient intrinsics are not exposed.
§Scalar Equivalent:
let mut out = [0; 32];
out[0..16].copy_from_slice(&vector.as_array());
I8x32::from(out)§Avx2
Source§impl Shl<u64> for I8x32
impl Shl<u64> for I8x32
Source§fn shl(self, amount: u64) -> I8x32
fn shl(self, amount: u64) -> I8x32
§Scalar Equivalent:
if amount >= 8 {
I8x32::ZERO
} else {
I8x32::from([
self.as_array()[0] << amount,
self.as_array()[1] << amount,
self.as_array()[2] << amount,
self.as_array()[3] << amount,
self.as_array()[4] << amount,
self.as_array()[5] << amount,
self.as_array()[6] << amount,
self.as_array()[7] << amount,
self.as_array()[8] << amount,
self.as_array()[9] << amount,
self.as_array()[10] << amount,
self.as_array()[11] << amount,
self.as_array()[12] << amount,
self.as_array()[13] << amount,
self.as_array()[14] << amount,
self.as_array()[15] << amount,
self.as_array()[16] << amount,
self.as_array()[17] << amount,
self.as_array()[18] << amount,
self.as_array()[19] << amount,
self.as_array()[20] << amount,
self.as_array()[21] << amount,
self.as_array()[22] << amount,
self.as_array()[23] << amount,
self.as_array()[24] << amount,
self.as_array()[25] << amount,
self.as_array()[26] << amount,
self.as_array()[27] << amount,
self.as_array()[28] << amount,
self.as_array()[29] << amount,
self.as_array()[30] << amount,
self.as_array()[31] << amount,
])
}§Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
Source§impl ShlAssign<u64> for I8x32
impl ShlAssign<u64> for I8x32
Source§fn shl_assign(&mut self, amount: u64)
fn shl_assign(&mut self, amount: u64)
Performs the
<<= operation. Read moreSource§impl ShlAssign for I8x32
impl ShlAssign for I8x32
Source§fn shl_assign(&mut self, amount: I8x32)
fn shl_assign(&mut self, amount: I8x32)
Performs the
<<= operation. Read moreSource§impl Shr<u64> for I8x32
impl Shr<u64> for I8x32
Source§fn shr(self, amount: u64) -> I8x32
fn shr(self, amount: u64) -> I8x32
§Scalar Equivalent:
if amount >= 8 {
let mut out = self.as_array();
for x in out.iter_mut() {
*x = if *x < 0 { -1 } else { 0 };
}
I8x32::from(out)
} else {
I8x32::from([
self.as_array()[0] >> amount,
self.as_array()[1] >> amount,
self.as_array()[2] >> amount,
self.as_array()[3] >> amount,
self.as_array()[4] >> amount,
self.as_array()[5] >> amount,
self.as_array()[6] >> amount,
self.as_array()[7] >> amount,
self.as_array()[8] >> amount,
self.as_array()[9] >> amount,
self.as_array()[10] >> amount,
self.as_array()[11] >> amount,
self.as_array()[12] >> amount,
self.as_array()[13] >> amount,
self.as_array()[14] >> amount,
self.as_array()[15] >> amount,
self.as_array()[16] >> amount,
self.as_array()[17] >> amount,
self.as_array()[18] >> amount,
self.as_array()[19] >> amount,
self.as_array()[20] >> amount,
self.as_array()[21] >> amount,
self.as_array()[22] >> amount,
self.as_array()[23] >> amount,
self.as_array()[24] >> amount,
self.as_array()[25] >> amount,
self.as_array()[26] >> amount,
self.as_array()[27] >> amount,
self.as_array()[28] >> amount,
self.as_array()[29] >> amount,
self.as_array()[30] >> amount,
self.as_array()[31] >> amount,
])
}§Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
Source§impl Shr for I8x32
impl Shr for I8x32
Source§fn shr(self, amount: I8x32) -> I8x32
fn shr(self, amount: I8x32) -> I8x32
§Scalar Equivalent:
let mut out = self.as_array();
for (x, amm) in out.iter_mut().zip(amount.as_array().iter().copied()) {
*x = if (0..8).contains(&amm) {
*x >> amm
} else if *x < 0 {
-1
} else {
0
};
}
I8x32::from(out)§Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
Source§impl ShrAssign<u64> for I8x32
impl ShrAssign<u64> for I8x32
Source§fn shr_assign(&mut self, amount: u64)
fn shr_assign(&mut self, amount: u64)
Performs the
>>= operation. Read moreSource§impl ShrAssign for I8x32
impl ShrAssign for I8x32
Source§fn shr_assign(&mut self, amount: I8x32)
fn shr_assign(&mut self, amount: I8x32)
Performs the
>>= operation. Read moreSource§impl SimdBase for I8x32
impl SimdBase for I8x32
Source§fn is_zero(&self) -> bool
fn is_zero(&self) -> bool
Source§fn set_lo(scalar: i8) -> I8x32
fn set_lo(scalar: i8) -> I8x32
Source§fn broadcast_lo(vector: I8x16) -> I8x32
fn broadcast_lo(vector: I8x16) -> I8x32
Source§fn cmp_eq(&self, other: I8x32) -> I8x32
fn cmp_eq(&self, other: I8x32) -> I8x32
§Scalar Equivalent:
I8x32::from([
if self.as_array()[0] == other.as_array()[0] { -1 } else { 0 },
if self.as_array()[1] == other.as_array()[1] { -1 } else { 0 },
if self.as_array()[2] == other.as_array()[2] { -1 } else { 0 },
if self.as_array()[3] == other.as_array()[3] { -1 } else { 0 },
if self.as_array()[4] == other.as_array()[4] { -1 } else { 0 },
if self.as_array()[5] == other.as_array()[5] { -1 } else { 0 },
if self.as_array()[6] == other.as_array()[6] { -1 } else { 0 },
if self.as_array()[7] == other.as_array()[7] { -1 } else { 0 },
if self.as_array()[8] == other.as_array()[8] { -1 } else { 0 },
if self.as_array()[9] == other.as_array()[9] { -1 } else { 0 },
if self.as_array()[10] == other.as_array()[10] { -1 } else { 0 },
if self.as_array()[11] == other.as_array()[11] { -1 } else { 0 },
if self.as_array()[12] == other.as_array()[12] { -1 } else { 0 },
if self.as_array()[13] == other.as_array()[13] { -1 } else { 0 },
if self.as_array()[14] == other.as_array()[14] { -1 } else { 0 },
if self.as_array()[15] == other.as_array()[15] { -1 } else { 0 },
if self.as_array()[16] == other.as_array()[16] { -1 } else { 0 },
if self.as_array()[17] == other.as_array()[17] { -1 } else { 0 },
if self.as_array()[18] == other.as_array()[18] { -1 } else { 0 },
if self.as_array()[19] == other.as_array()[19] { -1 } else { 0 },
if self.as_array()[20] == other.as_array()[20] { -1 } else { 0 },
if self.as_array()[21] == other.as_array()[21] { -1 } else { 0 },
if self.as_array()[22] == other.as_array()[22] { -1 } else { 0 },
if self.as_array()[23] == other.as_array()[23] { -1 } else { 0 },
if self.as_array()[24] == other.as_array()[24] { -1 } else { 0 },
if self.as_array()[25] == other.as_array()[25] { -1 } else { 0 },
if self.as_array()[26] == other.as_array()[26] { -1 } else { 0 },
if self.as_array()[27] == other.as_array()[27] { -1 } else { 0 },
if self.as_array()[28] == other.as_array()[28] { -1 } else { 0 },
if self.as_array()[29] == other.as_array()[29] { -1 } else { 0 },
if self.as_array()[30] == other.as_array()[30] { -1 } else { 0 },
if self.as_array()[31] == other.as_array()[31] { -1 } else { 0 },
])§Avx2
-
VPCMPEQB ymm, ymm, ymm
Source§fn and_not(&self, other: I8x32) -> I8x32
fn and_not(&self, other: I8x32) -> I8x32
§Scalar Equivalent:
I8x32::from([
self.as_array()[0] & (!other.as_array()[0]),
self.as_array()[1] & (!other.as_array()[1]),
self.as_array()[2] & (!other.as_array()[2]),
self.as_array()[3] & (!other.as_array()[3]),
self.as_array()[4] & (!other.as_array()[4]),
self.as_array()[5] & (!other.as_array()[5]),
self.as_array()[6] & (!other.as_array()[6]),
self.as_array()[7] & (!other.as_array()[7]),
self.as_array()[8] & (!other.as_array()[8]),
self.as_array()[9] & (!other.as_array()[9]),
self.as_array()[10] & (!other.as_array()[10]),
self.as_array()[11] & (!other.as_array()[11]),
self.as_array()[12] & (!other.as_array()[12]),
self.as_array()[13] & (!other.as_array()[13]),
self.as_array()[14] & (!other.as_array()[14]),
self.as_array()[15] & (!other.as_array()[15]),
self.as_array()[16] & (!other.as_array()[16]),
self.as_array()[17] & (!other.as_array()[17]),
self.as_array()[18] & (!other.as_array()[18]),
self.as_array()[19] & (!other.as_array()[19]),
self.as_array()[20] & (!other.as_array()[20]),
self.as_array()[21] & (!other.as_array()[21]),
self.as_array()[22] & (!other.as_array()[22]),
self.as_array()[23] & (!other.as_array()[23]),
self.as_array()[24] & (!other.as_array()[24]),
self.as_array()[25] & (!other.as_array()[25]),
self.as_array()[26] & (!other.as_array()[26]),
self.as_array()[27] & (!other.as_array()[27]),
self.as_array()[28] & (!other.as_array()[28]),
self.as_array()[29] & (!other.as_array()[29]),
self.as_array()[30] & (!other.as_array()[30]),
self.as_array()[31] & (!other.as_array()[31]),
])§Avx2
-
VPANDN ymm, ymm, ymm
Source§fn cmp_gt(&self, other: I8x32) -> I8x32
fn cmp_gt(&self, other: I8x32) -> I8x32
§Scalar Equivalent:
I8x32::from([
if self.as_array()[0] > other.as_array()[0] { -1 } else { 0 },
if self.as_array()[1] > other.as_array()[1] { -1 } else { 0 },
if self.as_array()[2] > other.as_array()[2] { -1 } else { 0 },
if self.as_array()[3] > other.as_array()[3] { -1 } else { 0 },
if self.as_array()[4] > other.as_array()[4] { -1 } else { 0 },
if self.as_array()[5] > other.as_array()[5] { -1 } else { 0 },
if self.as_array()[6] > other.as_array()[6] { -1 } else { 0 },
if self.as_array()[7] > other.as_array()[7] { -1 } else { 0 },
if self.as_array()[8] > other.as_array()[8] { -1 } else { 0 },
if self.as_array()[9] > other.as_array()[9] { -1 } else { 0 },
if self.as_array()[10] > other.as_array()[10] { -1 } else { 0 },
if self.as_array()[11] > other.as_array()[11] { -1 } else { 0 },
if self.as_array()[12] > other.as_array()[12] { -1 } else { 0 },
if self.as_array()[13] > other.as_array()[13] { -1 } else { 0 },
if self.as_array()[14] > other.as_array()[14] { -1 } else { 0 },
if self.as_array()[15] > other.as_array()[15] { -1 } else { 0 },
if self.as_array()[16] > other.as_array()[16] { -1 } else { 0 },
if self.as_array()[17] > other.as_array()[17] { -1 } else { 0 },
if self.as_array()[18] > other.as_array()[18] { -1 } else { 0 },
if self.as_array()[19] > other.as_array()[19] { -1 } else { 0 },
if self.as_array()[20] > other.as_array()[20] { -1 } else { 0 },
if self.as_array()[21] > other.as_array()[21] { -1 } else { 0 },
if self.as_array()[22] > other.as_array()[22] { -1 } else { 0 },
if self.as_array()[23] > other.as_array()[23] { -1 } else { 0 },
if self.as_array()[24] > other.as_array()[24] { -1 } else { 0 },
if self.as_array()[25] > other.as_array()[25] { -1 } else { 0 },
if self.as_array()[26] > other.as_array()[26] { -1 } else { 0 },
if self.as_array()[27] > other.as_array()[27] { -1 } else { 0 },
if self.as_array()[28] > other.as_array()[28] { -1 } else { 0 },
if self.as_array()[29] > other.as_array()[29] { -1 } else { 0 },
if self.as_array()[30] > other.as_array()[30] { -1 } else { 0 },
if self.as_array()[31] > other.as_array()[31] { -1 } else { 0 },
])§Avx2
-
VPCMPGTB ymm, ymm, ymm
Source§fn shift_left<const BITS: usize>(&self) -> I8x32
fn shift_left<const BITS: usize>(&self) -> I8x32
Source§fn shift_right<const BITS: usize>(&self) -> I8x32
fn shift_right<const BITS: usize>(&self) -> I8x32
Source§fn unpack_lo(&self, other: I8x32) -> I8x32
fn unpack_lo(&self, other: I8x32) -> I8x32
§Scalar Equivalent:
I8x32::from([
// Lane# 0
self.as_array()[0],
other.as_array()[0],
self.as_array()[1],
other.as_array()[1],
self.as_array()[2],
other.as_array()[2],
self.as_array()[3],
other.as_array()[3],
self.as_array()[4],
other.as_array()[4],
self.as_array()[5],
other.as_array()[5],
self.as_array()[6],
other.as_array()[6],
self.as_array()[7],
other.as_array()[7],
// Lane# 1
self.as_array()[16],
other.as_array()[16],
self.as_array()[17],
other.as_array()[17],
self.as_array()[18],
other.as_array()[18],
self.as_array()[19],
other.as_array()[19],
self.as_array()[20],
other.as_array()[20],
self.as_array()[21],
other.as_array()[21],
self.as_array()[22],
other.as_array()[22],
self.as_array()[23],
other.as_array()[23],
])§Avx2
-
VPUNPCKLBW ymm, ymm, ymm
Source§fn unpack_hi(&self, other: I8x32) -> I8x32
fn unpack_hi(&self, other: I8x32) -> I8x32
§Scalar Equivalent:
I8x32::from([
// Lane# 0
self.as_array()[8],
other.as_array()[8],
self.as_array()[9],
other.as_array()[9],
self.as_array()[10],
other.as_array()[10],
self.as_array()[11],
other.as_array()[11],
self.as_array()[12],
other.as_array()[12],
self.as_array()[13],
other.as_array()[13],
self.as_array()[14],
other.as_array()[14],
self.as_array()[15],
other.as_array()[15],
// Lane# 1
self.as_array()[24],
other.as_array()[24],
self.as_array()[25],
other.as_array()[25],
self.as_array()[26],
other.as_array()[26],
self.as_array()[27],
other.as_array()[27],
self.as_array()[28],
other.as_array()[28],
self.as_array()[29],
other.as_array()[29],
self.as_array()[30],
other.as_array()[30],
self.as_array()[31],
other.as_array()[31],
])§Avx2
-
VPUNPCKHBW ymm, ymm, ymm
Source§fn max(&self, other: I8x32) -> I8x32
fn max(&self, other: I8x32) -> I8x32
§Scalar Equivalent:
I8x32::from([
self.as_array()[0].max(other.as_array()[0]),
self.as_array()[1].max(other.as_array()[1]),
self.as_array()[2].max(other.as_array()[2]),
self.as_array()[3].max(other.as_array()[3]),
self.as_array()[4].max(other.as_array()[4]),
self.as_array()[5].max(other.as_array()[5]),
self.as_array()[6].max(other.as_array()[6]),
self.as_array()[7].max(other.as_array()[7]),
self.as_array()[8].max(other.as_array()[8]),
self.as_array()[9].max(other.as_array()[9]),
self.as_array()[10].max(other.as_array()[10]),
self.as_array()[11].max(other.as_array()[11]),
self.as_array()[12].max(other.as_array()[12]),
self.as_array()[13].max(other.as_array()[13]),
self.as_array()[14].max(other.as_array()[14]),
self.as_array()[15].max(other.as_array()[15]),
self.as_array()[16].max(other.as_array()[16]),
self.as_array()[17].max(other.as_array()[17]),
self.as_array()[18].max(other.as_array()[18]),
self.as_array()[19].max(other.as_array()[19]),
self.as_array()[20].max(other.as_array()[20]),
self.as_array()[21].max(other.as_array()[21]),
self.as_array()[22].max(other.as_array()[22]),
self.as_array()[23].max(other.as_array()[23]),
self.as_array()[24].max(other.as_array()[24]),
self.as_array()[25].max(other.as_array()[25]),
self.as_array()[26].max(other.as_array()[26]),
self.as_array()[27].max(other.as_array()[27]),
self.as_array()[28].max(other.as_array()[28]),
self.as_array()[29].max(other.as_array()[29]),
self.as_array()[30].max(other.as_array()[30]),
self.as_array()[31].max(other.as_array()[31]),
])§Avx2
-
VPMAXSB ymm, ymm, ymm
Source§fn min(&self, other: I8x32) -> I8x32
fn min(&self, other: I8x32) -> I8x32
§Scalar Equivalent:
I8x32::from([
self.as_array()[0].min(other.as_array()[0]),
self.as_array()[1].min(other.as_array()[1]),
self.as_array()[2].min(other.as_array()[2]),
self.as_array()[3].min(other.as_array()[3]),
self.as_array()[4].min(other.as_array()[4]),
self.as_array()[5].min(other.as_array()[5]),
self.as_array()[6].min(other.as_array()[6]),
self.as_array()[7].min(other.as_array()[7]),
self.as_array()[8].min(other.as_array()[8]),
self.as_array()[9].min(other.as_array()[9]),
self.as_array()[10].min(other.as_array()[10]),
self.as_array()[11].min(other.as_array()[11]),
self.as_array()[12].min(other.as_array()[12]),
self.as_array()[13].min(other.as_array()[13]),
self.as_array()[14].min(other.as_array()[14]),
self.as_array()[15].min(other.as_array()[15]),
self.as_array()[16].min(other.as_array()[16]),
self.as_array()[17].min(other.as_array()[17]),
self.as_array()[18].min(other.as_array()[18]),
self.as_array()[19].min(other.as_array()[19]),
self.as_array()[20].min(other.as_array()[20]),
self.as_array()[21].min(other.as_array()[21]),
self.as_array()[22].min(other.as_array()[22]),
self.as_array()[23].min(other.as_array()[23]),
self.as_array()[24].min(other.as_array()[24]),
self.as_array()[25].min(other.as_array()[25]),
self.as_array()[26].min(other.as_array()[26]),
self.as_array()[27].min(other.as_array()[27]),
self.as_array()[28].min(other.as_array()[28]),
self.as_array()[29].min(other.as_array()[29]),
self.as_array()[30].min(other.as_array()[30]),
self.as_array()[31].min(other.as_array()[31]),
])§Avx2
-
VPMINSB ymm, ymm, ymm
Source§type BroadcastLoInput = I8x16
type BroadcastLoInput = I8x16
A vector of
[Self::Scalar; 128 / (8 * std::mem::size_of::<Self::Scalar>())]Source§impl SimdBase8 for I8x32
impl SimdBase8 for I8x32
Source§fn shift_bytes_left<const AMOUNT: usize>(&self) -> I8x32
fn shift_bytes_left<const AMOUNT: usize>(&self) -> I8x32
Source§fn shift_bytes_right<const AMOUNT: usize>(&self) -> I8x32
fn shift_bytes_right<const AMOUNT: usize>(&self) -> I8x32
Source§impl SimdSaturatingArithmetic for I8x32
impl SimdSaturatingArithmetic for I8x32
Source§fn saturating_add(&self, other: I8x32) -> I8x32
fn saturating_add(&self, other: I8x32) -> I8x32
Source§impl Sub for I8x32
impl Sub for I8x32
Source§fn sub(self, rhs: I8x32) -> I8x32
fn sub(self, rhs: I8x32) -> I8x32
Perform a pairwise wrapping_sub
§Scalar Equivalent
ⓘ
I8x32::from([
self.as_array()[0].wrapping_sub(rhs.as_array()[0]),
self.as_array()[1].wrapping_sub(rhs.as_array()[1]),
self.as_array()[2].wrapping_sub(rhs.as_array()[2]),
self.as_array()[3].wrapping_sub(rhs.as_array()[3]),
self.as_array()[4].wrapping_sub(rhs.as_array()[4]),
self.as_array()[5].wrapping_sub(rhs.as_array()[5]),
self.as_array()[6].wrapping_sub(rhs.as_array()[6]),
self.as_array()[7].wrapping_sub(rhs.as_array()[7]),
self.as_array()[8].wrapping_sub(rhs.as_array()[8]),
self.as_array()[9].wrapping_sub(rhs.as_array()[9]),
self.as_array()[10].wrapping_sub(rhs.as_array()[10]),
self.as_array()[11].wrapping_sub(rhs.as_array()[11]),
self.as_array()[12].wrapping_sub(rhs.as_array()[12]),
self.as_array()[13].wrapping_sub(rhs.as_array()[13]),
self.as_array()[14].wrapping_sub(rhs.as_array()[14]),
self.as_array()[15].wrapping_sub(rhs.as_array()[15]),
self.as_array()[16].wrapping_sub(rhs.as_array()[16]),
self.as_array()[17].wrapping_sub(rhs.as_array()[17]),
self.as_array()[18].wrapping_sub(rhs.as_array()[18]),
self.as_array()[19].wrapping_sub(rhs.as_array()[19]),
self.as_array()[20].wrapping_sub(rhs.as_array()[20]),
self.as_array()[21].wrapping_sub(rhs.as_array()[21]),
self.as_array()[22].wrapping_sub(rhs.as_array()[22]),
self.as_array()[23].wrapping_sub(rhs.as_array()[23]),
self.as_array()[24].wrapping_sub(rhs.as_array()[24]),
self.as_array()[25].wrapping_sub(rhs.as_array()[25]),
self.as_array()[26].wrapping_sub(rhs.as_array()[26]),
self.as_array()[27].wrapping_sub(rhs.as_array()[27]),
self.as_array()[28].wrapping_sub(rhs.as_array()[28]),
self.as_array()[29].wrapping_sub(rhs.as_array()[29]),
self.as_array()[30].wrapping_sub(rhs.as_array()[30]),
self.as_array()[31].wrapping_sub(rhs.as_array()[31]),
])§AVX2 Intrinsics Used
_mm256_sub_epi8VPSUBB ymm, ymm, ymm
§Neon Intrinsics Used
vsubq_s8- This intrinsic compiles to the following instructions:
Source§impl SubAssign for I8x32
impl SubAssign for I8x32
Source§fn sub_assign(&mut self, other: I8x32)
fn sub_assign(&mut self, other: I8x32)
Performs the
-= operation. Read moreimpl Copy for I8x32
impl Eq for I8x32
impl Pod for I8x32
Auto Trait Implementations§
impl Freeze for I8x32
impl RefUnwindSafe for I8x32
impl Send for I8x32
impl Sync for I8x32
impl Unpin for I8x32
impl UnwindSafe for I8x32
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
§impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
§type Bits = T
type Bits = T
Self must have the same layout as the specified Bits except for
the possible invalid bit patterns being checked during
is_valid_bit_pattern.§fn is_valid_bit_pattern(_bits: &T) -> bool
fn is_valid_bit_pattern(_bits: &T) -> bool
If this function returns true, then it must be valid to reinterpret
bits
as &Self.