pub struct U8x32(/* private fields */);Expand description
[u8; 32] as a vector.
Implementations§
Source§impl U8x32
impl U8x32
Sourcepub const fn from_array(arr: [u8; 32]) -> Self
pub const fn from_array(arr: [u8; 32]) -> Self
Create a vector from an array.
Unlike the From trait function, the from_array function is const.
§Example
const MY_EXTREMELY_FUN_VALUE: U8x32 = U8x32::from_array([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
23, 24, 25, 26, 27, 28, 29, 30, 31,
]);
for (i, value) in MY_EXTREMELY_FUN_VALUE.as_array().iter().copied().enumerate() {
assert_eq!(i as u8, value);
}Source§impl U8x32
impl U8x32
Sourcepub fn shuffle(&self, order: U8x32) -> U8x32
pub fn shuffle(&self, order: U8x32) -> U8x32
§Scalar Equivalent:
let mut arr = [0; 32];
for (lane_dst, (lane_src, order)) in
arr.chunks_exact_mut(16).zip(
self.as_array().chunks_exact(16)
.zip(order.as_array().chunks_exact(16))
)
{
for (dst, idx) in lane_dst.iter_mut().zip(order) {
let idx = *idx;
*dst = if (idx >> 7) == 1 {
0
} else {
lane_src[(idx as usize) % 16]
};
}
}
arr.into()§Avx2
-
VPSHUFB ymm, ymm, ymm
Trait Implementations§
Source§impl Add for U8x32
impl Add for U8x32
Source§fn add(self, rhs: U8x32) -> U8x32
fn add(self, rhs: U8x32) -> U8x32
Perform a pairwise wrapping_add
§Scalar Equivalent
ⓘ
U8x32::from([
self.as_array()[0].wrapping_add(rhs.as_array()[0]),
self.as_array()[1].wrapping_add(rhs.as_array()[1]),
self.as_array()[2].wrapping_add(rhs.as_array()[2]),
self.as_array()[3].wrapping_add(rhs.as_array()[3]),
self.as_array()[4].wrapping_add(rhs.as_array()[4]),
self.as_array()[5].wrapping_add(rhs.as_array()[5]),
self.as_array()[6].wrapping_add(rhs.as_array()[6]),
self.as_array()[7].wrapping_add(rhs.as_array()[7]),
self.as_array()[8].wrapping_add(rhs.as_array()[8]),
self.as_array()[9].wrapping_add(rhs.as_array()[9]),
self.as_array()[10].wrapping_add(rhs.as_array()[10]),
self.as_array()[11].wrapping_add(rhs.as_array()[11]),
self.as_array()[12].wrapping_add(rhs.as_array()[12]),
self.as_array()[13].wrapping_add(rhs.as_array()[13]),
self.as_array()[14].wrapping_add(rhs.as_array()[14]),
self.as_array()[15].wrapping_add(rhs.as_array()[15]),
self.as_array()[16].wrapping_add(rhs.as_array()[16]),
self.as_array()[17].wrapping_add(rhs.as_array()[17]),
self.as_array()[18].wrapping_add(rhs.as_array()[18]),
self.as_array()[19].wrapping_add(rhs.as_array()[19]),
self.as_array()[20].wrapping_add(rhs.as_array()[20]),
self.as_array()[21].wrapping_add(rhs.as_array()[21]),
self.as_array()[22].wrapping_add(rhs.as_array()[22]),
self.as_array()[23].wrapping_add(rhs.as_array()[23]),
self.as_array()[24].wrapping_add(rhs.as_array()[24]),
self.as_array()[25].wrapping_add(rhs.as_array()[25]),
self.as_array()[26].wrapping_add(rhs.as_array()[26]),
self.as_array()[27].wrapping_add(rhs.as_array()[27]),
self.as_array()[28].wrapping_add(rhs.as_array()[28]),
self.as_array()[29].wrapping_add(rhs.as_array()[29]),
self.as_array()[30].wrapping_add(rhs.as_array()[30]),
self.as_array()[31].wrapping_add(rhs.as_array()[31]),
])§AVX2 Intrinsics Used
_mm256_add_epi8VPADDB ymm, ymm, ymm
§Neon Intrinsics Used
vaddq_u8- This intrinsic compiles to the following instructions:
Source§impl AddAssign for U8x32
impl AddAssign for U8x32
Source§fn add_assign(&mut self, other: U8x32)
fn add_assign(&mut self, other: U8x32)
Performs the
+= operation. Read moreSource§impl BitAnd for U8x32
impl BitAnd for U8x32
Source§fn bitand(self, rhs: U8x32) -> U8x32
fn bitand(self, rhs: U8x32) -> U8x32
Perform a pairwise bitwise and
§Scalar Equivalent
ⓘ
U8x32::from([
self.as_array()[0] & rhs.as_array()[0],
self.as_array()[1] & rhs.as_array()[1],
self.as_array()[2] & rhs.as_array()[2],
self.as_array()[3] & rhs.as_array()[3],
self.as_array()[4] & rhs.as_array()[4],
self.as_array()[5] & rhs.as_array()[5],
self.as_array()[6] & rhs.as_array()[6],
self.as_array()[7] & rhs.as_array()[7],
self.as_array()[8] & rhs.as_array()[8],
self.as_array()[9] & rhs.as_array()[9],
self.as_array()[10] & rhs.as_array()[10],
self.as_array()[11] & rhs.as_array()[11],
self.as_array()[12] & rhs.as_array()[12],
self.as_array()[13] & rhs.as_array()[13],
self.as_array()[14] & rhs.as_array()[14],
self.as_array()[15] & rhs.as_array()[15],
self.as_array()[16] & rhs.as_array()[16],
self.as_array()[17] & rhs.as_array()[17],
self.as_array()[18] & rhs.as_array()[18],
self.as_array()[19] & rhs.as_array()[19],
self.as_array()[20] & rhs.as_array()[20],
self.as_array()[21] & rhs.as_array()[21],
self.as_array()[22] & rhs.as_array()[22],
self.as_array()[23] & rhs.as_array()[23],
self.as_array()[24] & rhs.as_array()[24],
self.as_array()[25] & rhs.as_array()[25],
self.as_array()[26] & rhs.as_array()[26],
self.as_array()[27] & rhs.as_array()[27],
self.as_array()[28] & rhs.as_array()[28],
self.as_array()[29] & rhs.as_array()[29],
self.as_array()[30] & rhs.as_array()[30],
self.as_array()[31] & rhs.as_array()[31],
])§AVX2 Intrinsics Used
_mm256_and_si256VPAND ymm, ymm, ymm
§Neon Intrinsics Used
vandq_u8- This intrinsic compiles to the following instructions:
Source§impl BitAndAssign for U8x32
impl BitAndAssign for U8x32
Source§fn bitand_assign(&mut self, other: U8x32)
fn bitand_assign(&mut self, other: U8x32)
Performs the
&= operation. Read moreSource§impl BitOr for U8x32
impl BitOr for U8x32
Source§fn bitor(self, rhs: U8x32) -> U8x32
fn bitor(self, rhs: U8x32) -> U8x32
Perform a pairwise bitwise or
§Scalar Equivalent
ⓘ
U8x32::from([
self.as_array()[0] | rhs.as_array()[0],
self.as_array()[1] | rhs.as_array()[1],
self.as_array()[2] | rhs.as_array()[2],
self.as_array()[3] | rhs.as_array()[3],
self.as_array()[4] | rhs.as_array()[4],
self.as_array()[5] | rhs.as_array()[5],
self.as_array()[6] | rhs.as_array()[6],
self.as_array()[7] | rhs.as_array()[7],
self.as_array()[8] | rhs.as_array()[8],
self.as_array()[9] | rhs.as_array()[9],
self.as_array()[10] | rhs.as_array()[10],
self.as_array()[11] | rhs.as_array()[11],
self.as_array()[12] | rhs.as_array()[12],
self.as_array()[13] | rhs.as_array()[13],
self.as_array()[14] | rhs.as_array()[14],
self.as_array()[15] | rhs.as_array()[15],
self.as_array()[16] | rhs.as_array()[16],
self.as_array()[17] | rhs.as_array()[17],
self.as_array()[18] | rhs.as_array()[18],
self.as_array()[19] | rhs.as_array()[19],
self.as_array()[20] | rhs.as_array()[20],
self.as_array()[21] | rhs.as_array()[21],
self.as_array()[22] | rhs.as_array()[22],
self.as_array()[23] | rhs.as_array()[23],
self.as_array()[24] | rhs.as_array()[24],
self.as_array()[25] | rhs.as_array()[25],
self.as_array()[26] | rhs.as_array()[26],
self.as_array()[27] | rhs.as_array()[27],
self.as_array()[28] | rhs.as_array()[28],
self.as_array()[29] | rhs.as_array()[29],
self.as_array()[30] | rhs.as_array()[30],
self.as_array()[31] | rhs.as_array()[31],
])§AVX2 Intrinsics Used
_mm256_or_si256VPOR ymm, ymm, ymm
§Neon Intrinsics Used
vorrq_u8- This intrinsic compiles to the following instructions:
Source§impl BitOrAssign for U8x32
impl BitOrAssign for U8x32
Source§fn bitor_assign(&mut self, other: U8x32)
fn bitor_assign(&mut self, other: U8x32)
Performs the
|= operation. Read moreSource§impl BitXor for U8x32
impl BitXor for U8x32
Source§fn bitxor(self, rhs: U8x32) -> U8x32
fn bitxor(self, rhs: U8x32) -> U8x32
Perform a pairwise bitwise xor
§Scalar Equivalent
ⓘ
U8x32::from([
self.as_array()[0] ^ rhs.as_array()[0],
self.as_array()[1] ^ rhs.as_array()[1],
self.as_array()[2] ^ rhs.as_array()[2],
self.as_array()[3] ^ rhs.as_array()[3],
self.as_array()[4] ^ rhs.as_array()[4],
self.as_array()[5] ^ rhs.as_array()[5],
self.as_array()[6] ^ rhs.as_array()[6],
self.as_array()[7] ^ rhs.as_array()[7],
self.as_array()[8] ^ rhs.as_array()[8],
self.as_array()[9] ^ rhs.as_array()[9],
self.as_array()[10] ^ rhs.as_array()[10],
self.as_array()[11] ^ rhs.as_array()[11],
self.as_array()[12] ^ rhs.as_array()[12],
self.as_array()[13] ^ rhs.as_array()[13],
self.as_array()[14] ^ rhs.as_array()[14],
self.as_array()[15] ^ rhs.as_array()[15],
self.as_array()[16] ^ rhs.as_array()[16],
self.as_array()[17] ^ rhs.as_array()[17],
self.as_array()[18] ^ rhs.as_array()[18],
self.as_array()[19] ^ rhs.as_array()[19],
self.as_array()[20] ^ rhs.as_array()[20],
self.as_array()[21] ^ rhs.as_array()[21],
self.as_array()[22] ^ rhs.as_array()[22],
self.as_array()[23] ^ rhs.as_array()[23],
self.as_array()[24] ^ rhs.as_array()[24],
self.as_array()[25] ^ rhs.as_array()[25],
self.as_array()[26] ^ rhs.as_array()[26],
self.as_array()[27] ^ rhs.as_array()[27],
self.as_array()[28] ^ rhs.as_array()[28],
self.as_array()[29] ^ rhs.as_array()[29],
self.as_array()[30] ^ rhs.as_array()[30],
self.as_array()[31] ^ rhs.as_array()[31],
])§AVX2 Intrinsics Used
_mm256_xor_si256VPXOR ymm, ymm, ymm
§Neon Intrinsics Used
veorq_u8- This intrinsic compiles to the following instructions:
Source§impl BitXorAssign for U8x32
impl BitXorAssign for U8x32
Source§fn bitxor_assign(&mut self, other: U8x32)
fn bitxor_assign(&mut self, other: U8x32)
Performs the
^= operation. Read moreSource§impl ConditionallySelectable for U8x32
impl ConditionallySelectable for U8x32
Source§fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
Source§fn conditional_assign(&mut self, other: &Self, choice: Choice)
fn conditional_assign(&mut self, other: &Self, choice: Choice)
Source§fn conditional_swap(a: &mut Self, b: &mut Self, choice: Choice)
fn conditional_swap(a: &mut Self, b: &mut Self, choice: Choice)
Conditionally swap
self and other if choice == 1; otherwise,
reassign both unto themselves. Read moreSource§impl ConstantTimeEq for U8x32
impl ConstantTimeEq for U8x32
Source§impl<'de> Deserialize<'de> for U8x32
impl<'de> Deserialize<'de> for U8x32
Source§fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>where
D: Deserializer<'de>,
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>where
D: Deserializer<'de>,
Deserialize this value from the given Serde deserializer. Read more
Source§impl Distribution<U8x32> for Standard
impl Distribution<U8x32> for Standard
Source§impl From<U8x16> for U8x32
impl From<U8x16> for U8x32
Source§fn from(vector: U8x16) -> U8x32
fn from(vector: U8x16) -> U8x32
NOTE: this will zero the upper bits of the destination. Other intrinsics are more effcient, but leave the upper bits undefined. At present, these more effcient intrinsics are not exposed.
§Scalar Equivalent:
let mut out = [0; 32];
out[0..16].copy_from_slice(&vector.as_array());
U8x32::from(out)§Avx2
Source§impl Shl<u64> for U8x32
impl Shl<u64> for U8x32
Source§fn shl(self, amount: u64) -> U8x32
fn shl(self, amount: u64) -> U8x32
§Scalar Equivalent:
if amount >= 8 {
U8x32::ZERO
} else {
U8x32::from([
self.as_array()[0] << amount,
self.as_array()[1] << amount,
self.as_array()[2] << amount,
self.as_array()[3] << amount,
self.as_array()[4] << amount,
self.as_array()[5] << amount,
self.as_array()[6] << amount,
self.as_array()[7] << amount,
self.as_array()[8] << amount,
self.as_array()[9] << amount,
self.as_array()[10] << amount,
self.as_array()[11] << amount,
self.as_array()[12] << amount,
self.as_array()[13] << amount,
self.as_array()[14] << amount,
self.as_array()[15] << amount,
self.as_array()[16] << amount,
self.as_array()[17] << amount,
self.as_array()[18] << amount,
self.as_array()[19] << amount,
self.as_array()[20] << amount,
self.as_array()[21] << amount,
self.as_array()[22] << amount,
self.as_array()[23] << amount,
self.as_array()[24] << amount,
self.as_array()[25] << amount,
self.as_array()[26] << amount,
self.as_array()[27] << amount,
self.as_array()[28] << amount,
self.as_array()[29] << amount,
self.as_array()[30] << amount,
self.as_array()[31] << amount,
])
}§Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
Source§impl ShlAssign<u64> for U8x32
impl ShlAssign<u64> for U8x32
Source§fn shl_assign(&mut self, amount: u64)
fn shl_assign(&mut self, amount: u64)
Performs the
<<= operation. Read moreSource§impl ShlAssign for U8x32
impl ShlAssign for U8x32
Source§fn shl_assign(&mut self, amount: U8x32)
fn shl_assign(&mut self, amount: U8x32)
Performs the
<<= operation. Read moreSource§impl Shr<u64> for U8x32
impl Shr<u64> for U8x32
Source§fn shr(self, amount: u64) -> U8x32
fn shr(self, amount: u64) -> U8x32
§Scalar Equivalent:
if amount >= 8 {
U8x32::ZERO
} else {
U8x32::from([
self.as_array()[0] >> amount,
self.as_array()[1] >> amount,
self.as_array()[2] >> amount,
self.as_array()[3] >> amount,
self.as_array()[4] >> amount,
self.as_array()[5] >> amount,
self.as_array()[6] >> amount,
self.as_array()[7] >> amount,
self.as_array()[8] >> amount,
self.as_array()[9] >> amount,
self.as_array()[10] >> amount,
self.as_array()[11] >> amount,
self.as_array()[12] >> amount,
self.as_array()[13] >> amount,
self.as_array()[14] >> amount,
self.as_array()[15] >> amount,
self.as_array()[16] >> amount,
self.as_array()[17] >> amount,
self.as_array()[18] >> amount,
self.as_array()[19] >> amount,
self.as_array()[20] >> amount,
self.as_array()[21] >> amount,
self.as_array()[22] >> amount,
self.as_array()[23] >> amount,
self.as_array()[24] >> amount,
self.as_array()[25] >> amount,
self.as_array()[26] >> amount,
self.as_array()[27] >> amount,
self.as_array()[28] >> amount,
self.as_array()[29] >> amount,
self.as_array()[30] >> amount,
self.as_array()[31] >> amount,
])
}§Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
Source§impl ShrAssign<u64> for U8x32
impl ShrAssign<u64> for U8x32
Source§fn shr_assign(&mut self, amount: u64)
fn shr_assign(&mut self, amount: u64)
Performs the
>>= operation. Read moreSource§impl ShrAssign for U8x32
impl ShrAssign for U8x32
Source§fn shr_assign(&mut self, amount: U8x32)
fn shr_assign(&mut self, amount: U8x32)
Performs the
>>= operation. Read moreSource§impl SimdBase for U8x32
impl SimdBase for U8x32
Source§fn is_zero(&self) -> bool
fn is_zero(&self) -> bool
Source§fn set_lo(scalar: u8) -> U8x32
fn set_lo(scalar: u8) -> U8x32
Source§fn broadcast_lo(vector: U8x16) -> U8x32
fn broadcast_lo(vector: U8x16) -> U8x32
Source§fn cmp_eq(&self, other: U8x32) -> U8x32
fn cmp_eq(&self, other: U8x32) -> U8x32
§Scalar Equivalent:
U8x32::from([
if self.as_array()[0] == other.as_array()[0] { u8::MAX } else { 0 },
if self.as_array()[1] == other.as_array()[1] { u8::MAX } else { 0 },
if self.as_array()[2] == other.as_array()[2] { u8::MAX } else { 0 },
if self.as_array()[3] == other.as_array()[3] { u8::MAX } else { 0 },
if self.as_array()[4] == other.as_array()[4] { u8::MAX } else { 0 },
if self.as_array()[5] == other.as_array()[5] { u8::MAX } else { 0 },
if self.as_array()[6] == other.as_array()[6] { u8::MAX } else { 0 },
if self.as_array()[7] == other.as_array()[7] { u8::MAX } else { 0 },
if self.as_array()[8] == other.as_array()[8] { u8::MAX } else { 0 },
if self.as_array()[9] == other.as_array()[9] { u8::MAX } else { 0 },
if self.as_array()[10] == other.as_array()[10] { u8::MAX } else { 0 },
if self.as_array()[11] == other.as_array()[11] { u8::MAX } else { 0 },
if self.as_array()[12] == other.as_array()[12] { u8::MAX } else { 0 },
if self.as_array()[13] == other.as_array()[13] { u8::MAX } else { 0 },
if self.as_array()[14] == other.as_array()[14] { u8::MAX } else { 0 },
if self.as_array()[15] == other.as_array()[15] { u8::MAX } else { 0 },
if self.as_array()[16] == other.as_array()[16] { u8::MAX } else { 0 },
if self.as_array()[17] == other.as_array()[17] { u8::MAX } else { 0 },
if self.as_array()[18] == other.as_array()[18] { u8::MAX } else { 0 },
if self.as_array()[19] == other.as_array()[19] { u8::MAX } else { 0 },
if self.as_array()[20] == other.as_array()[20] { u8::MAX } else { 0 },
if self.as_array()[21] == other.as_array()[21] { u8::MAX } else { 0 },
if self.as_array()[22] == other.as_array()[22] { u8::MAX } else { 0 },
if self.as_array()[23] == other.as_array()[23] { u8::MAX } else { 0 },
if self.as_array()[24] == other.as_array()[24] { u8::MAX } else { 0 },
if self.as_array()[25] == other.as_array()[25] { u8::MAX } else { 0 },
if self.as_array()[26] == other.as_array()[26] { u8::MAX } else { 0 },
if self.as_array()[27] == other.as_array()[27] { u8::MAX } else { 0 },
if self.as_array()[28] == other.as_array()[28] { u8::MAX } else { 0 },
if self.as_array()[29] == other.as_array()[29] { u8::MAX } else { 0 },
if self.as_array()[30] == other.as_array()[30] { u8::MAX } else { 0 },
if self.as_array()[31] == other.as_array()[31] { u8::MAX } else { 0 },
])§Avx2
-
VPCMPEQB ymm, ymm, ymm
Source§fn and_not(&self, other: U8x32) -> U8x32
fn and_not(&self, other: U8x32) -> U8x32
§Scalar Equivalent:
U8x32::from([
self.as_array()[0] & (!other.as_array()[0]),
self.as_array()[1] & (!other.as_array()[1]),
self.as_array()[2] & (!other.as_array()[2]),
self.as_array()[3] & (!other.as_array()[3]),
self.as_array()[4] & (!other.as_array()[4]),
self.as_array()[5] & (!other.as_array()[5]),
self.as_array()[6] & (!other.as_array()[6]),
self.as_array()[7] & (!other.as_array()[7]),
self.as_array()[8] & (!other.as_array()[8]),
self.as_array()[9] & (!other.as_array()[9]),
self.as_array()[10] & (!other.as_array()[10]),
self.as_array()[11] & (!other.as_array()[11]),
self.as_array()[12] & (!other.as_array()[12]),
self.as_array()[13] & (!other.as_array()[13]),
self.as_array()[14] & (!other.as_array()[14]),
self.as_array()[15] & (!other.as_array()[15]),
self.as_array()[16] & (!other.as_array()[16]),
self.as_array()[17] & (!other.as_array()[17]),
self.as_array()[18] & (!other.as_array()[18]),
self.as_array()[19] & (!other.as_array()[19]),
self.as_array()[20] & (!other.as_array()[20]),
self.as_array()[21] & (!other.as_array()[21]),
self.as_array()[22] & (!other.as_array()[22]),
self.as_array()[23] & (!other.as_array()[23]),
self.as_array()[24] & (!other.as_array()[24]),
self.as_array()[25] & (!other.as_array()[25]),
self.as_array()[26] & (!other.as_array()[26]),
self.as_array()[27] & (!other.as_array()[27]),
self.as_array()[28] & (!other.as_array()[28]),
self.as_array()[29] & (!other.as_array()[29]),
self.as_array()[30] & (!other.as_array()[30]),
self.as_array()[31] & (!other.as_array()[31]),
])§Avx2
-
VPANDN ymm, ymm, ymm
Source§fn cmp_gt(&self, other: U8x32) -> U8x32
fn cmp_gt(&self, other: U8x32) -> U8x32
§Scalar Equivalent:
U8x32::from([
if self.as_array()[0] > other.as_array()[0] { u8::MAX } else { 0 },
if self.as_array()[1] > other.as_array()[1] { u8::MAX } else { 0 },
if self.as_array()[2] > other.as_array()[2] { u8::MAX } else { 0 },
if self.as_array()[3] > other.as_array()[3] { u8::MAX } else { 0 },
if self.as_array()[4] > other.as_array()[4] { u8::MAX } else { 0 },
if self.as_array()[5] > other.as_array()[5] { u8::MAX } else { 0 },
if self.as_array()[6] > other.as_array()[6] { u8::MAX } else { 0 },
if self.as_array()[7] > other.as_array()[7] { u8::MAX } else { 0 },
if self.as_array()[8] > other.as_array()[8] { u8::MAX } else { 0 },
if self.as_array()[9] > other.as_array()[9] { u8::MAX } else { 0 },
if self.as_array()[10] > other.as_array()[10] { u8::MAX } else { 0 },
if self.as_array()[11] > other.as_array()[11] { u8::MAX } else { 0 },
if self.as_array()[12] > other.as_array()[12] { u8::MAX } else { 0 },
if self.as_array()[13] > other.as_array()[13] { u8::MAX } else { 0 },
if self.as_array()[14] > other.as_array()[14] { u8::MAX } else { 0 },
if self.as_array()[15] > other.as_array()[15] { u8::MAX } else { 0 },
if self.as_array()[16] > other.as_array()[16] { u8::MAX } else { 0 },
if self.as_array()[17] > other.as_array()[17] { u8::MAX } else { 0 },
if self.as_array()[18] > other.as_array()[18] { u8::MAX } else { 0 },
if self.as_array()[19] > other.as_array()[19] { u8::MAX } else { 0 },
if self.as_array()[20] > other.as_array()[20] { u8::MAX } else { 0 },
if self.as_array()[21] > other.as_array()[21] { u8::MAX } else { 0 },
if self.as_array()[22] > other.as_array()[22] { u8::MAX } else { 0 },
if self.as_array()[23] > other.as_array()[23] { u8::MAX } else { 0 },
if self.as_array()[24] > other.as_array()[24] { u8::MAX } else { 0 },
if self.as_array()[25] > other.as_array()[25] { u8::MAX } else { 0 },
if self.as_array()[26] > other.as_array()[26] { u8::MAX } else { 0 },
if self.as_array()[27] > other.as_array()[27] { u8::MAX } else { 0 },
if self.as_array()[28] > other.as_array()[28] { u8::MAX } else { 0 },
if self.as_array()[29] > other.as_array()[29] { u8::MAX } else { 0 },
if self.as_array()[30] > other.as_array()[30] { u8::MAX } else { 0 },
if self.as_array()[31] > other.as_array()[31] { u8::MAX } else { 0 },
])§Avx2
NOTE: this implementation uses an efficient vector polyfill, though this operation is not natively supported.
ⓘ
// Based on https://stackoverflow.com/a/33173643 and https://git.io/JmghK
let sign_bit = Self::broadcast(1 << 7);
Self::from(I8x32::from(*self ^ sign_bit).cmp_gt(
I8x32::from(other ^ sign_bit)
))Source§fn shift_left<const BITS: usize>(&self) -> U8x32
fn shift_left<const BITS: usize>(&self) -> U8x32
Source§fn shift_right<const BITS: usize>(&self) -> U8x32
fn shift_right<const BITS: usize>(&self) -> U8x32
Source§fn unpack_lo(&self, other: U8x32) -> U8x32
fn unpack_lo(&self, other: U8x32) -> U8x32
§Scalar Equivalent:
U8x32::from([
// Lane# 0
self.as_array()[0],
other.as_array()[0],
self.as_array()[1],
other.as_array()[1],
self.as_array()[2],
other.as_array()[2],
self.as_array()[3],
other.as_array()[3],
self.as_array()[4],
other.as_array()[4],
self.as_array()[5],
other.as_array()[5],
self.as_array()[6],
other.as_array()[6],
self.as_array()[7],
other.as_array()[7],
// Lane# 1
self.as_array()[16],
other.as_array()[16],
self.as_array()[17],
other.as_array()[17],
self.as_array()[18],
other.as_array()[18],
self.as_array()[19],
other.as_array()[19],
self.as_array()[20],
other.as_array()[20],
self.as_array()[21],
other.as_array()[21],
self.as_array()[22],
other.as_array()[22],
self.as_array()[23],
other.as_array()[23],
])§Avx2
-
VPUNPCKLBW ymm, ymm, ymm
Source§fn unpack_hi(&self, other: U8x32) -> U8x32
fn unpack_hi(&self, other: U8x32) -> U8x32
§Scalar Equivalent:
U8x32::from([
// Lane# 0
self.as_array()[8],
other.as_array()[8],
self.as_array()[9],
other.as_array()[9],
self.as_array()[10],
other.as_array()[10],
self.as_array()[11],
other.as_array()[11],
self.as_array()[12],
other.as_array()[12],
self.as_array()[13],
other.as_array()[13],
self.as_array()[14],
other.as_array()[14],
self.as_array()[15],
other.as_array()[15],
// Lane# 1
self.as_array()[24],
other.as_array()[24],
self.as_array()[25],
other.as_array()[25],
self.as_array()[26],
other.as_array()[26],
self.as_array()[27],
other.as_array()[27],
self.as_array()[28],
other.as_array()[28],
self.as_array()[29],
other.as_array()[29],
self.as_array()[30],
other.as_array()[30],
self.as_array()[31],
other.as_array()[31],
])§Avx2
-
VPUNPCKHBW ymm, ymm, ymm
Source§fn max(&self, other: U8x32) -> U8x32
fn max(&self, other: U8x32) -> U8x32
§Scalar Equivalent:
U8x32::from([
self.as_array()[0].max(other.as_array()[0]),
self.as_array()[1].max(other.as_array()[1]),
self.as_array()[2].max(other.as_array()[2]),
self.as_array()[3].max(other.as_array()[3]),
self.as_array()[4].max(other.as_array()[4]),
self.as_array()[5].max(other.as_array()[5]),
self.as_array()[6].max(other.as_array()[6]),
self.as_array()[7].max(other.as_array()[7]),
self.as_array()[8].max(other.as_array()[8]),
self.as_array()[9].max(other.as_array()[9]),
self.as_array()[10].max(other.as_array()[10]),
self.as_array()[11].max(other.as_array()[11]),
self.as_array()[12].max(other.as_array()[12]),
self.as_array()[13].max(other.as_array()[13]),
self.as_array()[14].max(other.as_array()[14]),
self.as_array()[15].max(other.as_array()[15]),
self.as_array()[16].max(other.as_array()[16]),
self.as_array()[17].max(other.as_array()[17]),
self.as_array()[18].max(other.as_array()[18]),
self.as_array()[19].max(other.as_array()[19]),
self.as_array()[20].max(other.as_array()[20]),
self.as_array()[21].max(other.as_array()[21]),
self.as_array()[22].max(other.as_array()[22]),
self.as_array()[23].max(other.as_array()[23]),
self.as_array()[24].max(other.as_array()[24]),
self.as_array()[25].max(other.as_array()[25]),
self.as_array()[26].max(other.as_array()[26]),
self.as_array()[27].max(other.as_array()[27]),
self.as_array()[28].max(other.as_array()[28]),
self.as_array()[29].max(other.as_array()[29]),
self.as_array()[30].max(other.as_array()[30]),
self.as_array()[31].max(other.as_array()[31]),
])§Avx2
-
VPMAXUB ymm, ymm, ymm
Source§fn min(&self, other: U8x32) -> U8x32
fn min(&self, other: U8x32) -> U8x32
§Scalar Equivalent:
U8x32::from([
self.as_array()[0].min(other.as_array()[0]),
self.as_array()[1].min(other.as_array()[1]),
self.as_array()[2].min(other.as_array()[2]),
self.as_array()[3].min(other.as_array()[3]),
self.as_array()[4].min(other.as_array()[4]),
self.as_array()[5].min(other.as_array()[5]),
self.as_array()[6].min(other.as_array()[6]),
self.as_array()[7].min(other.as_array()[7]),
self.as_array()[8].min(other.as_array()[8]),
self.as_array()[9].min(other.as_array()[9]),
self.as_array()[10].min(other.as_array()[10]),
self.as_array()[11].min(other.as_array()[11]),
self.as_array()[12].min(other.as_array()[12]),
self.as_array()[13].min(other.as_array()[13]),
self.as_array()[14].min(other.as_array()[14]),
self.as_array()[15].min(other.as_array()[15]),
self.as_array()[16].min(other.as_array()[16]),
self.as_array()[17].min(other.as_array()[17]),
self.as_array()[18].min(other.as_array()[18]),
self.as_array()[19].min(other.as_array()[19]),
self.as_array()[20].min(other.as_array()[20]),
self.as_array()[21].min(other.as_array()[21]),
self.as_array()[22].min(other.as_array()[22]),
self.as_array()[23].min(other.as_array()[23]),
self.as_array()[24].min(other.as_array()[24]),
self.as_array()[25].min(other.as_array()[25]),
self.as_array()[26].min(other.as_array()[26]),
self.as_array()[27].min(other.as_array()[27]),
self.as_array()[28].min(other.as_array()[28]),
self.as_array()[29].min(other.as_array()[29]),
self.as_array()[30].min(other.as_array()[30]),
self.as_array()[31].min(other.as_array()[31]),
])§Avx2
-
VPMINUB ymm, ymm, ymm
Source§type BroadcastLoInput = U8x16
type BroadcastLoInput = U8x16
A vector of
[Self::Scalar; 128 / (8 * std::mem::size_of::<Self::Scalar>())]Source§impl SimdBase8 for U8x32
impl SimdBase8 for U8x32
Source§fn shift_bytes_left<const AMOUNT: usize>(&self) -> U8x32
fn shift_bytes_left<const AMOUNT: usize>(&self) -> U8x32
Source§fn shift_bytes_right<const AMOUNT: usize>(&self) -> U8x32
fn shift_bytes_right<const AMOUNT: usize>(&self) -> U8x32
Source§impl SimdSaturatingArithmetic for U8x32
impl SimdSaturatingArithmetic for U8x32
Source§fn saturating_add(&self, other: U8x32) -> U8x32
fn saturating_add(&self, other: U8x32) -> U8x32
Source§impl Sub for U8x32
impl Sub for U8x32
Source§fn sub(self, rhs: U8x32) -> U8x32
fn sub(self, rhs: U8x32) -> U8x32
Perform a pairwise wrapping_sub
§Scalar Equivalent
ⓘ
U8x32::from([
self.as_array()[0].wrapping_sub(rhs.as_array()[0]),
self.as_array()[1].wrapping_sub(rhs.as_array()[1]),
self.as_array()[2].wrapping_sub(rhs.as_array()[2]),
self.as_array()[3].wrapping_sub(rhs.as_array()[3]),
self.as_array()[4].wrapping_sub(rhs.as_array()[4]),
self.as_array()[5].wrapping_sub(rhs.as_array()[5]),
self.as_array()[6].wrapping_sub(rhs.as_array()[6]),
self.as_array()[7].wrapping_sub(rhs.as_array()[7]),
self.as_array()[8].wrapping_sub(rhs.as_array()[8]),
self.as_array()[9].wrapping_sub(rhs.as_array()[9]),
self.as_array()[10].wrapping_sub(rhs.as_array()[10]),
self.as_array()[11].wrapping_sub(rhs.as_array()[11]),
self.as_array()[12].wrapping_sub(rhs.as_array()[12]),
self.as_array()[13].wrapping_sub(rhs.as_array()[13]),
self.as_array()[14].wrapping_sub(rhs.as_array()[14]),
self.as_array()[15].wrapping_sub(rhs.as_array()[15]),
self.as_array()[16].wrapping_sub(rhs.as_array()[16]),
self.as_array()[17].wrapping_sub(rhs.as_array()[17]),
self.as_array()[18].wrapping_sub(rhs.as_array()[18]),
self.as_array()[19].wrapping_sub(rhs.as_array()[19]),
self.as_array()[20].wrapping_sub(rhs.as_array()[20]),
self.as_array()[21].wrapping_sub(rhs.as_array()[21]),
self.as_array()[22].wrapping_sub(rhs.as_array()[22]),
self.as_array()[23].wrapping_sub(rhs.as_array()[23]),
self.as_array()[24].wrapping_sub(rhs.as_array()[24]),
self.as_array()[25].wrapping_sub(rhs.as_array()[25]),
self.as_array()[26].wrapping_sub(rhs.as_array()[26]),
self.as_array()[27].wrapping_sub(rhs.as_array()[27]),
self.as_array()[28].wrapping_sub(rhs.as_array()[28]),
self.as_array()[29].wrapping_sub(rhs.as_array()[29]),
self.as_array()[30].wrapping_sub(rhs.as_array()[30]),
self.as_array()[31].wrapping_sub(rhs.as_array()[31]),
])§AVX2 Intrinsics Used
_mm256_sub_epi8VPSUBB ymm, ymm, ymm
§Neon Intrinsics Used
vsubq_u8- This intrinsic compiles to the following instructions:
Source§impl SubAssign for U8x32
impl SubAssign for U8x32
Source§fn sub_assign(&mut self, other: U8x32)
fn sub_assign(&mut self, other: U8x32)
Performs the
-= operation. Read moreimpl Copy for U8x32
impl Eq for U8x32
impl Pod for U8x32
Auto Trait Implementations§
impl Freeze for U8x32
impl RefUnwindSafe for U8x32
impl Send for U8x32
impl Sync for U8x32
impl Unpin for U8x32
impl UnwindSafe for U8x32
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
§impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
§type Bits = T
type Bits = T
Self must have the same layout as the specified Bits except for
the possible invalid bit patterns being checked during
is_valid_bit_pattern.§fn is_valid_bit_pattern(_bits: &T) -> bool
fn is_valid_bit_pattern(_bits: &T) -> bool
If this function returns true, then it must be valid to reinterpret
bits
as &Self.