pub struct U8x16(/* private fields */);Expand description
[u8; 16] as a vector.
Implementations§
Source§impl U8x16
impl U8x16
Sourcepub fn carryless_mul_wide(self, b: Self) -> [Self; 2]
pub fn carryless_mul_wide(self, b: Self) -> [Self; 2]
Perform a (full) 128-bit wide carryless multiply
The result of the 128-bit wide carryless multiply is 256-bits. This is returned as
two 128-bit values [lower_bits, upper_bits].
If you’d like a single 256-bit value, it can be constructed like
let a = U8x16::from(3);
let b = U8x16::from(7);
let product: [U8x16; 2] = a.carryless_mul_wide(b);
let product: U8x32 = product.into();(This function doesn’t always return a U8x32, since it will use __m128i for
computation on x86_64 machines, and it may be slower to always construct a __m256i)
Source§impl U8x16
impl U8x16
Sourcepub const fn from_array(arr: [u8; 16]) -> Self
pub const fn from_array(arr: [u8; 16]) -> Self
Create a vector from an array.
Unlike the From trait function, the from_array function is const.
§Example
const MY_EXTREMELY_FUN_VALUE: U8x16 = U8x16::from_array([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
]);
for (i, value) in MY_EXTREMELY_FUN_VALUE.as_array().iter().copied().enumerate() {
assert_eq!(i as u8, value);
}Source§impl U8x16
impl U8x16
Sourcepub fn shuffle(&self, order: U8x16) -> U8x16
pub fn shuffle(&self, order: U8x16) -> U8x16
§Scalar Equivalent:
let mut arr = [0; 16];
for (lane_dst, (lane_src, order)) in
arr.chunks_exact_mut(16).zip(
self.as_array().chunks_exact(16)
.zip(order.as_array().chunks_exact(16))
)
{
for (dst, idx) in lane_dst.iter_mut().zip(order) {
let idx = *idx;
*dst = if (idx >> 7) == 1 {
0
} else {
lane_src[(idx as usize) % 16]
};
}
}
arr.into()§Avx2
-
PSHUFB xmm, xmm
Trait Implementations§
Source§impl Add for U8x16
impl Add for U8x16
Source§fn add(self, rhs: U8x16) -> U8x16
fn add(self, rhs: U8x16) -> U8x16
Perform a pairwise wrapping_add
§Scalar Equivalent
U8x16::from([
self.as_array()[0].wrapping_add(rhs.as_array()[0]),
self.as_array()[1].wrapping_add(rhs.as_array()[1]),
self.as_array()[2].wrapping_add(rhs.as_array()[2]),
self.as_array()[3].wrapping_add(rhs.as_array()[3]),
self.as_array()[4].wrapping_add(rhs.as_array()[4]),
self.as_array()[5].wrapping_add(rhs.as_array()[5]),
self.as_array()[6].wrapping_add(rhs.as_array()[6]),
self.as_array()[7].wrapping_add(rhs.as_array()[7]),
self.as_array()[8].wrapping_add(rhs.as_array()[8]),
self.as_array()[9].wrapping_add(rhs.as_array()[9]),
self.as_array()[10].wrapping_add(rhs.as_array()[10]),
self.as_array()[11].wrapping_add(rhs.as_array()[11]),
self.as_array()[12].wrapping_add(rhs.as_array()[12]),
self.as_array()[13].wrapping_add(rhs.as_array()[13]),
self.as_array()[14].wrapping_add(rhs.as_array()[14]),
self.as_array()[15].wrapping_add(rhs.as_array()[15]),
])§AVX2 Intrinsics Used
_mm_add_epi8PADDB xmm, xmm
§Neon Intrinsics Used
vaddq_u8- This intrinsic compiles to the following instructions:
Source§impl AddAssign for U8x16
impl AddAssign for U8x16
Source§fn add_assign(&mut self, other: U8x16)
fn add_assign(&mut self, other: U8x16)
+= operation. Read moreSource§impl BitAnd for U8x16
impl BitAnd for U8x16
Source§fn bitand(self, rhs: U8x16) -> U8x16
fn bitand(self, rhs: U8x16) -> U8x16
Perform a pairwise bitwise and
§Scalar Equivalent
U8x16::from([
self.as_array()[0] & rhs.as_array()[0],
self.as_array()[1] & rhs.as_array()[1],
self.as_array()[2] & rhs.as_array()[2],
self.as_array()[3] & rhs.as_array()[3],
self.as_array()[4] & rhs.as_array()[4],
self.as_array()[5] & rhs.as_array()[5],
self.as_array()[6] & rhs.as_array()[6],
self.as_array()[7] & rhs.as_array()[7],
self.as_array()[8] & rhs.as_array()[8],
self.as_array()[9] & rhs.as_array()[9],
self.as_array()[10] & rhs.as_array()[10],
self.as_array()[11] & rhs.as_array()[11],
self.as_array()[12] & rhs.as_array()[12],
self.as_array()[13] & rhs.as_array()[13],
self.as_array()[14] & rhs.as_array()[14],
self.as_array()[15] & rhs.as_array()[15],
])§AVX2 Intrinsics Used
_mm_and_si128PAND xmm, xmm
§Neon Intrinsics Used
vandq_u8- This intrinsic compiles to the following instructions:
Source§impl BitAndAssign for U8x16
impl BitAndAssign for U8x16
Source§fn bitand_assign(&mut self, other: U8x16)
fn bitand_assign(&mut self, other: U8x16)
&= operation. Read moreSource§impl BitOr for U8x16
impl BitOr for U8x16
Source§fn bitor(self, rhs: U8x16) -> U8x16
fn bitor(self, rhs: U8x16) -> U8x16
Perform a pairwise bitwise or
§Scalar Equivalent
U8x16::from([
self.as_array()[0] | rhs.as_array()[0],
self.as_array()[1] | rhs.as_array()[1],
self.as_array()[2] | rhs.as_array()[2],
self.as_array()[3] | rhs.as_array()[3],
self.as_array()[4] | rhs.as_array()[4],
self.as_array()[5] | rhs.as_array()[5],
self.as_array()[6] | rhs.as_array()[6],
self.as_array()[7] | rhs.as_array()[7],
self.as_array()[8] | rhs.as_array()[8],
self.as_array()[9] | rhs.as_array()[9],
self.as_array()[10] | rhs.as_array()[10],
self.as_array()[11] | rhs.as_array()[11],
self.as_array()[12] | rhs.as_array()[12],
self.as_array()[13] | rhs.as_array()[13],
self.as_array()[14] | rhs.as_array()[14],
self.as_array()[15] | rhs.as_array()[15],
])§AVX2 Intrinsics Used
_mm_or_si128POR xmm, xmm
§Neon Intrinsics Used
vorrq_u8- This intrinsic compiles to the following instructions:
Source§impl BitOrAssign for U8x16
impl BitOrAssign for U8x16
Source§fn bitor_assign(&mut self, other: U8x16)
fn bitor_assign(&mut self, other: U8x16)
|= operation. Read moreSource§impl BitXor for U8x16
impl BitXor for U8x16
Source§fn bitxor(self, rhs: U8x16) -> U8x16
fn bitxor(self, rhs: U8x16) -> U8x16
Perform a pairwise bitwise xor
§Scalar Equivalent
U8x16::from([
self.as_array()[0] ^ rhs.as_array()[0],
self.as_array()[1] ^ rhs.as_array()[1],
self.as_array()[2] ^ rhs.as_array()[2],
self.as_array()[3] ^ rhs.as_array()[3],
self.as_array()[4] ^ rhs.as_array()[4],
self.as_array()[5] ^ rhs.as_array()[5],
self.as_array()[6] ^ rhs.as_array()[6],
self.as_array()[7] ^ rhs.as_array()[7],
self.as_array()[8] ^ rhs.as_array()[8],
self.as_array()[9] ^ rhs.as_array()[9],
self.as_array()[10] ^ rhs.as_array()[10],
self.as_array()[11] ^ rhs.as_array()[11],
self.as_array()[12] ^ rhs.as_array()[12],
self.as_array()[13] ^ rhs.as_array()[13],
self.as_array()[14] ^ rhs.as_array()[14],
self.as_array()[15] ^ rhs.as_array()[15],
])§AVX2 Intrinsics Used
_mm_xor_si128PXOR xmm, xmm
§Neon Intrinsics Used
veorq_u8- This intrinsic compiles to the following instructions:
Source§impl BitXorAssign for U8x16
impl BitXorAssign for U8x16
Source§fn bitxor_assign(&mut self, other: U8x16)
fn bitxor_assign(&mut self, other: U8x16)
^= operation. Read moreSource§impl ConditionallySelectable for U8x16
impl ConditionallySelectable for U8x16
Source§fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
Source§fn conditional_assign(&mut self, other: &Self, choice: Choice)
fn conditional_assign(&mut self, other: &Self, choice: Choice)
Source§fn conditional_swap(a: &mut Self, b: &mut Self, choice: Choice)
fn conditional_swap(a: &mut Self, b: &mut Self, choice: Choice)
self and other if choice == 1; otherwise,
reassign both unto themselves. Read moreSource§impl ConstantTimeEq for U8x16
impl ConstantTimeEq for U8x16
Source§impl<'de> Deserialize<'de> for U8x16
impl<'de> Deserialize<'de> for U8x16
Source§fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>where
D: Deserializer<'de>,
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>where
D: Deserializer<'de>,
Source§impl Distribution<U8x16> for Standard
impl Distribution<U8x16> for Standard
Source§impl ExtendingCast<U8x16> for U16x16
impl ExtendingCast<U8x16> for U16x16
Source§fn extending_cast_from(vector: U8x16) -> U16x16
fn extending_cast_from(vector: U8x16) -> U16x16
§Scalar Equivalent:
U16x16::from([
u16::from(vector.as_array()[0]),
u16::from(vector.as_array()[1]),
u16::from(vector.as_array()[2]),
u16::from(vector.as_array()[3]),
u16::from(vector.as_array()[4]),
u16::from(vector.as_array()[5]),
u16::from(vector.as_array()[6]),
u16::from(vector.as_array()[7]),
u16::from(vector.as_array()[8]),
u16::from(vector.as_array()[9]),
u16::from(vector.as_array()[10]),
u16::from(vector.as_array()[11]),
u16::from(vector.as_array()[12]),
u16::from(vector.as_array()[13]),
u16::from(vector.as_array()[14]),
u16::from(vector.as_array()[15]),
])§Avx2
-
VPMOVZXBW ymm, xmm
Source§impl ExtendingCast<U8x16> for U16x8
impl ExtendingCast<U8x16> for U16x8
Source§fn extending_cast_from(vector: U8x16) -> U16x8
fn extending_cast_from(vector: U8x16) -> U16x8
§Scalar Equivalent:
U16x8::from([
u16::from(vector.as_array()[0]),
u16::from(vector.as_array()[1]),
u16::from(vector.as_array()[2]),
u16::from(vector.as_array()[3]),
u16::from(vector.as_array()[4]),
u16::from(vector.as_array()[5]),
u16::from(vector.as_array()[6]),
u16::from(vector.as_array()[7]),
])§Avx2
-
PMOVZXBW xmm, xmm
Source§impl ExtendingCast<U8x16> for U32x4
impl ExtendingCast<U8x16> for U32x4
Source§impl ExtendingCast<U8x16> for U32x8
impl ExtendingCast<U8x16> for U32x8
Source§fn extending_cast_from(vector: U8x16) -> U32x8
fn extending_cast_from(vector: U8x16) -> U32x8
§Scalar Equivalent:
U32x8::from([
u32::from(vector.as_array()[0]),
u32::from(vector.as_array()[1]),
u32::from(vector.as_array()[2]),
u32::from(vector.as_array()[3]),
u32::from(vector.as_array()[4]),
u32::from(vector.as_array()[5]),
u32::from(vector.as_array()[6]),
u32::from(vector.as_array()[7]),
])§Avx2
-
VPMOVZXBD ymm, xmm
Source§impl ExtendingCast<U8x16> for U64x2
impl ExtendingCast<U8x16> for U64x2
Source§impl ExtendingCast<U8x16> for U64x4
impl ExtendingCast<U8x16> for U64x4
Source§impl From<U8x16> for U16x16
impl From<U8x16> for U16x16
Source§fn from(vector: U8x16) -> U16x16
fn from(vector: U8x16) -> U16x16
§Scalar Equivalent:
U16x16::from([
u16::from(vector.as_array()[0]),
u16::from(vector.as_array()[1]),
u16::from(vector.as_array()[2]),
u16::from(vector.as_array()[3]),
u16::from(vector.as_array()[4]),
u16::from(vector.as_array()[5]),
u16::from(vector.as_array()[6]),
u16::from(vector.as_array()[7]),
u16::from(vector.as_array()[8]),
u16::from(vector.as_array()[9]),
u16::from(vector.as_array()[10]),
u16::from(vector.as_array()[11]),
u16::from(vector.as_array()[12]),
u16::from(vector.as_array()[13]),
u16::from(vector.as_array()[14]),
u16::from(vector.as_array()[15]),
])§Avx2
-
VPMOVZXBW ymm, xmm
Source§impl From<U8x16> for U8x32
impl From<U8x16> for U8x32
Source§fn from(vector: U8x16) -> U8x32
fn from(vector: U8x16) -> U8x32
NOTE: this will zero the upper bits of the destination. Other intrinsics are more effcient, but leave the upper bits undefined. At present, these more effcient intrinsics are not exposed.
§Scalar Equivalent:
let mut out = [0; 32];
out[0..16].copy_from_slice(&vector.as_array());
U8x32::from(out)§Avx2
Source§impl Shl<u64> for U8x16
impl Shl<u64> for U8x16
Source§fn shl(self, amount: u64) -> U8x16
fn shl(self, amount: u64) -> U8x16
§Scalar Equivalent:
if amount >= 8 {
U8x16::ZERO
} else {
U8x16::from([
self.as_array()[0] << amount,
self.as_array()[1] << amount,
self.as_array()[2] << amount,
self.as_array()[3] << amount,
self.as_array()[4] << amount,
self.as_array()[5] << amount,
self.as_array()[6] << amount,
self.as_array()[7] << amount,
self.as_array()[8] << amount,
self.as_array()[9] << amount,
self.as_array()[10] << amount,
self.as_array()[11] << amount,
self.as_array()[12] << amount,
self.as_array()[13] << amount,
self.as_array()[14] << amount,
self.as_array()[15] << amount,
])
}§Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
Source§impl ShlAssign<u64> for U8x16
impl ShlAssign<u64> for U8x16
Source§fn shl_assign(&mut self, amount: u64)
fn shl_assign(&mut self, amount: u64)
<<= operation. Read moreSource§impl ShlAssign for U8x16
impl ShlAssign for U8x16
Source§fn shl_assign(&mut self, amount: U8x16)
fn shl_assign(&mut self, amount: U8x16)
<<= operation. Read moreSource§impl Shr<u64> for U8x16
impl Shr<u64> for U8x16
Source§fn shr(self, amount: u64) -> U8x16
fn shr(self, amount: u64) -> U8x16
§Scalar Equivalent:
if amount >= 8 {
U8x16::ZERO
} else {
U8x16::from([
self.as_array()[0] >> amount,
self.as_array()[1] >> amount,
self.as_array()[2] >> amount,
self.as_array()[3] >> amount,
self.as_array()[4] >> amount,
self.as_array()[5] >> amount,
self.as_array()[6] >> amount,
self.as_array()[7] >> amount,
self.as_array()[8] >> amount,
self.as_array()[9] >> amount,
self.as_array()[10] >> amount,
self.as_array()[11] >> amount,
self.as_array()[12] >> amount,
self.as_array()[13] >> amount,
self.as_array()[14] >> amount,
self.as_array()[15] >> amount,
])
}§Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
Source§impl ShrAssign<u64> for U8x16
impl ShrAssign<u64> for U8x16
Source§fn shr_assign(&mut self, amount: u64)
fn shr_assign(&mut self, amount: u64)
>>= operation. Read moreSource§impl ShrAssign for U8x16
impl ShrAssign for U8x16
Source§fn shr_assign(&mut self, amount: U8x16)
fn shr_assign(&mut self, amount: U8x16)
>>= operation. Read moreSource§impl SimdBase for U8x16
impl SimdBase for U8x16
Source§fn set_lo(scalar: u8) -> U8x16
fn set_lo(scalar: u8) -> U8x16
Source§fn broadcast_lo(vector: U8x16) -> U8x16
fn broadcast_lo(vector: U8x16) -> U8x16
Source§fn cmp_eq(&self, other: U8x16) -> U8x16
fn cmp_eq(&self, other: U8x16) -> U8x16
§Scalar Equivalent:
U8x16::from([
if self.as_array()[0] == other.as_array()[0] { u8::MAX } else { 0 },
if self.as_array()[1] == other.as_array()[1] { u8::MAX } else { 0 },
if self.as_array()[2] == other.as_array()[2] { u8::MAX } else { 0 },
if self.as_array()[3] == other.as_array()[3] { u8::MAX } else { 0 },
if self.as_array()[4] == other.as_array()[4] { u8::MAX } else { 0 },
if self.as_array()[5] == other.as_array()[5] { u8::MAX } else { 0 },
if self.as_array()[6] == other.as_array()[6] { u8::MAX } else { 0 },
if self.as_array()[7] == other.as_array()[7] { u8::MAX } else { 0 },
if self.as_array()[8] == other.as_array()[8] { u8::MAX } else { 0 },
if self.as_array()[9] == other.as_array()[9] { u8::MAX } else { 0 },
if self.as_array()[10] == other.as_array()[10] { u8::MAX } else { 0 },
if self.as_array()[11] == other.as_array()[11] { u8::MAX } else { 0 },
if self.as_array()[12] == other.as_array()[12] { u8::MAX } else { 0 },
if self.as_array()[13] == other.as_array()[13] { u8::MAX } else { 0 },
if self.as_array()[14] == other.as_array()[14] { u8::MAX } else { 0 },
if self.as_array()[15] == other.as_array()[15] { u8::MAX } else { 0 },
])§Avx2
-
PCMPEQB xmm, xmm
Source§fn and_not(&self, other: U8x16) -> U8x16
fn and_not(&self, other: U8x16) -> U8x16
§Scalar Equivalent:
U8x16::from([
self.as_array()[0] & (!other.as_array()[0]),
self.as_array()[1] & (!other.as_array()[1]),
self.as_array()[2] & (!other.as_array()[2]),
self.as_array()[3] & (!other.as_array()[3]),
self.as_array()[4] & (!other.as_array()[4]),
self.as_array()[5] & (!other.as_array()[5]),
self.as_array()[6] & (!other.as_array()[6]),
self.as_array()[7] & (!other.as_array()[7]),
self.as_array()[8] & (!other.as_array()[8]),
self.as_array()[9] & (!other.as_array()[9]),
self.as_array()[10] & (!other.as_array()[10]),
self.as_array()[11] & (!other.as_array()[11]),
self.as_array()[12] & (!other.as_array()[12]),
self.as_array()[13] & (!other.as_array()[13]),
self.as_array()[14] & (!other.as_array()[14]),
self.as_array()[15] & (!other.as_array()[15]),
])§Avx2
-
PANDN xmm, xmm
Source§fn cmp_gt(&self, other: U8x16) -> U8x16
fn cmp_gt(&self, other: U8x16) -> U8x16
§Scalar Equivalent:
U8x16::from([
if self.as_array()[0] > other.as_array()[0] { u8::MAX } else { 0 },
if self.as_array()[1] > other.as_array()[1] { u8::MAX } else { 0 },
if self.as_array()[2] > other.as_array()[2] { u8::MAX } else { 0 },
if self.as_array()[3] > other.as_array()[3] { u8::MAX } else { 0 },
if self.as_array()[4] > other.as_array()[4] { u8::MAX } else { 0 },
if self.as_array()[5] > other.as_array()[5] { u8::MAX } else { 0 },
if self.as_array()[6] > other.as_array()[6] { u8::MAX } else { 0 },
if self.as_array()[7] > other.as_array()[7] { u8::MAX } else { 0 },
if self.as_array()[8] > other.as_array()[8] { u8::MAX } else { 0 },
if self.as_array()[9] > other.as_array()[9] { u8::MAX } else { 0 },
if self.as_array()[10] > other.as_array()[10] { u8::MAX } else { 0 },
if self.as_array()[11] > other.as_array()[11] { u8::MAX } else { 0 },
if self.as_array()[12] > other.as_array()[12] { u8::MAX } else { 0 },
if self.as_array()[13] > other.as_array()[13] { u8::MAX } else { 0 },
if self.as_array()[14] > other.as_array()[14] { u8::MAX } else { 0 },
if self.as_array()[15] > other.as_array()[15] { u8::MAX } else { 0 },
])§Avx2
NOTE: this implementation uses an efficient vector polyfill, though this operation is not natively supported.
// Based on https://stackoverflow.com/a/33173643 and https://git.io/JmghK
let sign_bit = Self::broadcast(1 << 7);
Self::from(I8x16::from(*self ^ sign_bit).cmp_gt(
I8x16::from(other ^ sign_bit)
))Source§fn shift_left<const BITS: usize>(&self) -> U8x16
fn shift_left<const BITS: usize>(&self) -> U8x16
Source§fn shift_right<const BITS: usize>(&self) -> U8x16
fn shift_right<const BITS: usize>(&self) -> U8x16
Source§fn unpack_lo(&self, other: U8x16) -> U8x16
fn unpack_lo(&self, other: U8x16) -> U8x16
§Scalar Equivalent:
U8x16::from([
// Lane# 0
self.as_array()[0],
other.as_array()[0],
self.as_array()[1],
other.as_array()[1],
self.as_array()[2],
other.as_array()[2],
self.as_array()[3],
other.as_array()[3],
self.as_array()[4],
other.as_array()[4],
self.as_array()[5],
other.as_array()[5],
self.as_array()[6],
other.as_array()[6],
self.as_array()[7],
other.as_array()[7],
])§Avx2
-
PUNPCKLBW xmm, xmm
Source§fn unpack_hi(&self, other: U8x16) -> U8x16
fn unpack_hi(&self, other: U8x16) -> U8x16
§Scalar Equivalent:
U8x16::from([
// Lane# 0
self.as_array()[8],
other.as_array()[8],
self.as_array()[9],
other.as_array()[9],
self.as_array()[10],
other.as_array()[10],
self.as_array()[11],
other.as_array()[11],
self.as_array()[12],
other.as_array()[12],
self.as_array()[13],
other.as_array()[13],
self.as_array()[14],
other.as_array()[14],
self.as_array()[15],
other.as_array()[15],
])§Avx2
-
PUNPCKHBW xmm, xmm
Source§fn max(&self, other: U8x16) -> U8x16
fn max(&self, other: U8x16) -> U8x16
§Scalar Equivalent:
U8x16::from([
self.as_array()[0].max(other.as_array()[0]),
self.as_array()[1].max(other.as_array()[1]),
self.as_array()[2].max(other.as_array()[2]),
self.as_array()[3].max(other.as_array()[3]),
self.as_array()[4].max(other.as_array()[4]),
self.as_array()[5].max(other.as_array()[5]),
self.as_array()[6].max(other.as_array()[6]),
self.as_array()[7].max(other.as_array()[7]),
self.as_array()[8].max(other.as_array()[8]),
self.as_array()[9].max(other.as_array()[9]),
self.as_array()[10].max(other.as_array()[10]),
self.as_array()[11].max(other.as_array()[11]),
self.as_array()[12].max(other.as_array()[12]),
self.as_array()[13].max(other.as_array()[13]),
self.as_array()[14].max(other.as_array()[14]),
self.as_array()[15].max(other.as_array()[15]),
])§Avx2
-
PMAXUB xmm, xmm
Source§fn min(&self, other: U8x16) -> U8x16
fn min(&self, other: U8x16) -> U8x16
§Scalar Equivalent:
U8x16::from([
self.as_array()[0].min(other.as_array()[0]),
self.as_array()[1].min(other.as_array()[1]),
self.as_array()[2].min(other.as_array()[2]),
self.as_array()[3].min(other.as_array()[3]),
self.as_array()[4].min(other.as_array()[4]),
self.as_array()[5].min(other.as_array()[5]),
self.as_array()[6].min(other.as_array()[6]),
self.as_array()[7].min(other.as_array()[7]),
self.as_array()[8].min(other.as_array()[8]),
self.as_array()[9].min(other.as_array()[9]),
self.as_array()[10].min(other.as_array()[10]),
self.as_array()[11].min(other.as_array()[11]),
self.as_array()[12].min(other.as_array()[12]),
self.as_array()[13].min(other.as_array()[13]),
self.as_array()[14].min(other.as_array()[14]),
self.as_array()[15].min(other.as_array()[15]),
])§Avx2
-
PMINUB xmm, xmm
Source§type BroadcastLoInput = U8x16
type BroadcastLoInput = U8x16
[Self::Scalar; 128 / (8 * std::mem::size_of::<Self::Scalar>())]Source§impl SimdBase8 for U8x16
impl SimdBase8 for U8x16
Source§fn shift_bytes_left<const AMOUNT: usize>(&self) -> U8x16
fn shift_bytes_left<const AMOUNT: usize>(&self) -> U8x16
Source§fn shift_bytes_right<const AMOUNT: usize>(&self) -> U8x16
fn shift_bytes_right<const AMOUNT: usize>(&self) -> U8x16
Source§impl SimdSaturatingArithmetic for U8x16
impl SimdSaturatingArithmetic for U8x16
Source§fn saturating_add(&self, other: U8x16) -> U8x16
fn saturating_add(&self, other: U8x16) -> U8x16
Source§impl Sub for U8x16
impl Sub for U8x16
Source§fn sub(self, rhs: U8x16) -> U8x16
fn sub(self, rhs: U8x16) -> U8x16
Perform a pairwise wrapping_sub
§Scalar Equivalent
U8x16::from([
self.as_array()[0].wrapping_sub(rhs.as_array()[0]),
self.as_array()[1].wrapping_sub(rhs.as_array()[1]),
self.as_array()[2].wrapping_sub(rhs.as_array()[2]),
self.as_array()[3].wrapping_sub(rhs.as_array()[3]),
self.as_array()[4].wrapping_sub(rhs.as_array()[4]),
self.as_array()[5].wrapping_sub(rhs.as_array()[5]),
self.as_array()[6].wrapping_sub(rhs.as_array()[6]),
self.as_array()[7].wrapping_sub(rhs.as_array()[7]),
self.as_array()[8].wrapping_sub(rhs.as_array()[8]),
self.as_array()[9].wrapping_sub(rhs.as_array()[9]),
self.as_array()[10].wrapping_sub(rhs.as_array()[10]),
self.as_array()[11].wrapping_sub(rhs.as_array()[11]),
self.as_array()[12].wrapping_sub(rhs.as_array()[12]),
self.as_array()[13].wrapping_sub(rhs.as_array()[13]),
self.as_array()[14].wrapping_sub(rhs.as_array()[14]),
self.as_array()[15].wrapping_sub(rhs.as_array()[15]),
])§AVX2 Intrinsics Used
_mm_sub_epi8PSUBB xmm, xmm
§Neon Intrinsics Used
vsubq_u8- This intrinsic compiles to the following instructions:
Source§impl SubAssign for U8x16
impl SubAssign for U8x16
Source§fn sub_assign(&mut self, other: U8x16)
fn sub_assign(&mut self, other: U8x16)
-= operation. Read moreimpl Copy for U8x16
impl Eq for U8x16
impl Pod for U8x16
Auto Trait Implementations§
impl Freeze for U8x16
impl RefUnwindSafe for U8x16
impl Send for U8x16
impl Sync for U8x16
impl Unpin for U8x16
impl UnwindSafe for U8x16
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
§impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
§type Bits = T
type Bits = T
Self must have the same layout as the specified Bits except for
the possible invalid bit patterns being checked during
is_valid_bit_pattern.§fn is_valid_bit_pattern(_bits: &T) -> bool
fn is_valid_bit_pattern(_bits: &T) -> bool
bits
as &Self.