Struct vectoreyes::U8x32
source · [−]#[repr(transparent)]pub struct U8x32(_);
Expand description
[u8; 32]
as a vector.
Implementations
sourceimpl U8x32
impl U8x32
sourcepub const fn from_array(array: [u8; 32]) -> U8x32
pub const fn from_array(array: [u8; 32]) -> U8x32
Create a vector from an array.
Unlike the From
trait function, the from_array
function is const
.
Example
const MY_EXTREMELY_FUN_VALUE: U8x32 =
U8x32::from_array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]);
for (i, value) in MY_EXTREMELY_FUN_VALUE.as_array().iter().copied().enumerate() {
assert_eq!(i as u8, value);
}
Avx2
Trait Implementations
sourceimpl Add<U8x32> for U8x32
impl Add<U8x32> for U8x32
sourcefn add(self, rhs: U8x32) -> U8x32
fn add(self, rhs: U8x32) -> U8x32
Scalar Equivalent:
U8x32::from([
self.as_array()[0].wrapping_add(rhs.as_array()[0]),
self.as_array()[1].wrapping_add(rhs.as_array()[1]),
self.as_array()[2].wrapping_add(rhs.as_array()[2]),
self.as_array()[3].wrapping_add(rhs.as_array()[3]),
self.as_array()[4].wrapping_add(rhs.as_array()[4]),
self.as_array()[5].wrapping_add(rhs.as_array()[5]),
self.as_array()[6].wrapping_add(rhs.as_array()[6]),
self.as_array()[7].wrapping_add(rhs.as_array()[7]),
self.as_array()[8].wrapping_add(rhs.as_array()[8]),
self.as_array()[9].wrapping_add(rhs.as_array()[9]),
self.as_array()[10].wrapping_add(rhs.as_array()[10]),
self.as_array()[11].wrapping_add(rhs.as_array()[11]),
self.as_array()[12].wrapping_add(rhs.as_array()[12]),
self.as_array()[13].wrapping_add(rhs.as_array()[13]),
self.as_array()[14].wrapping_add(rhs.as_array()[14]),
self.as_array()[15].wrapping_add(rhs.as_array()[15]),
self.as_array()[16].wrapping_add(rhs.as_array()[16]),
self.as_array()[17].wrapping_add(rhs.as_array()[17]),
self.as_array()[18].wrapping_add(rhs.as_array()[18]),
self.as_array()[19].wrapping_add(rhs.as_array()[19]),
self.as_array()[20].wrapping_add(rhs.as_array()[20]),
self.as_array()[21].wrapping_add(rhs.as_array()[21]),
self.as_array()[22].wrapping_add(rhs.as_array()[22]),
self.as_array()[23].wrapping_add(rhs.as_array()[23]),
self.as_array()[24].wrapping_add(rhs.as_array()[24]),
self.as_array()[25].wrapping_add(rhs.as_array()[25]),
self.as_array()[26].wrapping_add(rhs.as_array()[26]),
self.as_array()[27].wrapping_add(rhs.as_array()[27]),
self.as_array()[28].wrapping_add(rhs.as_array()[28]),
self.as_array()[29].wrapping_add(rhs.as_array()[29]),
self.as_array()[30].wrapping_add(rhs.as_array()[30]),
self.as_array()[31].wrapping_add(rhs.as_array()[31]),
])
Avx2
-
VPADDB ymm, ymm, ymm
sourceimpl AddAssign<U8x32> for U8x32
impl AddAssign<U8x32> for U8x32
sourcefn add_assign(&mut self, rhs: Self)
fn add_assign(&mut self, rhs: Self)
+=
operation. Read moresourceimpl BitAnd<U8x32> for U8x32
impl BitAnd<U8x32> for U8x32
sourcefn bitand(self, rhs: U8x32) -> U8x32
fn bitand(self, rhs: U8x32) -> U8x32
Scalar Equivalent:
U8x32::from([
self.as_array()[0] & rhs.as_array()[0],
self.as_array()[1] & rhs.as_array()[1],
self.as_array()[2] & rhs.as_array()[2],
self.as_array()[3] & rhs.as_array()[3],
self.as_array()[4] & rhs.as_array()[4],
self.as_array()[5] & rhs.as_array()[5],
self.as_array()[6] & rhs.as_array()[6],
self.as_array()[7] & rhs.as_array()[7],
self.as_array()[8] & rhs.as_array()[8],
self.as_array()[9] & rhs.as_array()[9],
self.as_array()[10] & rhs.as_array()[10],
self.as_array()[11] & rhs.as_array()[11],
self.as_array()[12] & rhs.as_array()[12],
self.as_array()[13] & rhs.as_array()[13],
self.as_array()[14] & rhs.as_array()[14],
self.as_array()[15] & rhs.as_array()[15],
self.as_array()[16] & rhs.as_array()[16],
self.as_array()[17] & rhs.as_array()[17],
self.as_array()[18] & rhs.as_array()[18],
self.as_array()[19] & rhs.as_array()[19],
self.as_array()[20] & rhs.as_array()[20],
self.as_array()[21] & rhs.as_array()[21],
self.as_array()[22] & rhs.as_array()[22],
self.as_array()[23] & rhs.as_array()[23],
self.as_array()[24] & rhs.as_array()[24],
self.as_array()[25] & rhs.as_array()[25],
self.as_array()[26] & rhs.as_array()[26],
self.as_array()[27] & rhs.as_array()[27],
self.as_array()[28] & rhs.as_array()[28],
self.as_array()[29] & rhs.as_array()[29],
self.as_array()[30] & rhs.as_array()[30],
self.as_array()[31] & rhs.as_array()[31],
])
Avx2
-
VPAND ymm, ymm, ymm
sourceimpl BitAndAssign<U8x32> for U8x32
impl BitAndAssign<U8x32> for U8x32
sourcefn bitand_assign(&mut self, rhs: Self)
fn bitand_assign(&mut self, rhs: Self)
&=
operation. Read moresourceimpl BitOr<U8x32> for U8x32
impl BitOr<U8x32> for U8x32
sourcefn bitor(self, rhs: U8x32) -> U8x32
fn bitor(self, rhs: U8x32) -> U8x32
Scalar Equivalent:
U8x32::from([
self.as_array()[0] | rhs.as_array()[0],
self.as_array()[1] | rhs.as_array()[1],
self.as_array()[2] | rhs.as_array()[2],
self.as_array()[3] | rhs.as_array()[3],
self.as_array()[4] | rhs.as_array()[4],
self.as_array()[5] | rhs.as_array()[5],
self.as_array()[6] | rhs.as_array()[6],
self.as_array()[7] | rhs.as_array()[7],
self.as_array()[8] | rhs.as_array()[8],
self.as_array()[9] | rhs.as_array()[9],
self.as_array()[10] | rhs.as_array()[10],
self.as_array()[11] | rhs.as_array()[11],
self.as_array()[12] | rhs.as_array()[12],
self.as_array()[13] | rhs.as_array()[13],
self.as_array()[14] | rhs.as_array()[14],
self.as_array()[15] | rhs.as_array()[15],
self.as_array()[16] | rhs.as_array()[16],
self.as_array()[17] | rhs.as_array()[17],
self.as_array()[18] | rhs.as_array()[18],
self.as_array()[19] | rhs.as_array()[19],
self.as_array()[20] | rhs.as_array()[20],
self.as_array()[21] | rhs.as_array()[21],
self.as_array()[22] | rhs.as_array()[22],
self.as_array()[23] | rhs.as_array()[23],
self.as_array()[24] | rhs.as_array()[24],
self.as_array()[25] | rhs.as_array()[25],
self.as_array()[26] | rhs.as_array()[26],
self.as_array()[27] | rhs.as_array()[27],
self.as_array()[28] | rhs.as_array()[28],
self.as_array()[29] | rhs.as_array()[29],
self.as_array()[30] | rhs.as_array()[30],
self.as_array()[31] | rhs.as_array()[31],
])
Avx2
-
VPOR ymm, ymm, ymm
sourceimpl BitOrAssign<U8x32> for U8x32
impl BitOrAssign<U8x32> for U8x32
sourcefn bitor_assign(&mut self, rhs: Self)
fn bitor_assign(&mut self, rhs: Self)
|=
operation. Read moresourceimpl BitXor<U8x32> for U8x32
impl BitXor<U8x32> for U8x32
sourcefn bitxor(self, rhs: U8x32) -> U8x32
fn bitxor(self, rhs: U8x32) -> U8x32
Scalar Equivalent:
U8x32::from([
self.as_array()[0] ^ rhs.as_array()[0],
self.as_array()[1] ^ rhs.as_array()[1],
self.as_array()[2] ^ rhs.as_array()[2],
self.as_array()[3] ^ rhs.as_array()[3],
self.as_array()[4] ^ rhs.as_array()[4],
self.as_array()[5] ^ rhs.as_array()[5],
self.as_array()[6] ^ rhs.as_array()[6],
self.as_array()[7] ^ rhs.as_array()[7],
self.as_array()[8] ^ rhs.as_array()[8],
self.as_array()[9] ^ rhs.as_array()[9],
self.as_array()[10] ^ rhs.as_array()[10],
self.as_array()[11] ^ rhs.as_array()[11],
self.as_array()[12] ^ rhs.as_array()[12],
self.as_array()[13] ^ rhs.as_array()[13],
self.as_array()[14] ^ rhs.as_array()[14],
self.as_array()[15] ^ rhs.as_array()[15],
self.as_array()[16] ^ rhs.as_array()[16],
self.as_array()[17] ^ rhs.as_array()[17],
self.as_array()[18] ^ rhs.as_array()[18],
self.as_array()[19] ^ rhs.as_array()[19],
self.as_array()[20] ^ rhs.as_array()[20],
self.as_array()[21] ^ rhs.as_array()[21],
self.as_array()[22] ^ rhs.as_array()[22],
self.as_array()[23] ^ rhs.as_array()[23],
self.as_array()[24] ^ rhs.as_array()[24],
self.as_array()[25] ^ rhs.as_array()[25],
self.as_array()[26] ^ rhs.as_array()[26],
self.as_array()[27] ^ rhs.as_array()[27],
self.as_array()[28] ^ rhs.as_array()[28],
self.as_array()[29] ^ rhs.as_array()[29],
self.as_array()[30] ^ rhs.as_array()[30],
self.as_array()[31] ^ rhs.as_array()[31],
])
Avx2
-
VPXOR ymm, ymm, ymm
sourceimpl BitXorAssign<U8x32> for U8x32
impl BitXorAssign<U8x32> for U8x32
sourcefn bitxor_assign(&mut self, rhs: Self)
fn bitxor_assign(&mut self, rhs: Self)
^=
operation. Read moresourceimpl ConditionallySelectable for U8x32
impl ConditionallySelectable for U8x32
sourcefn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
sourcefn conditional_assign(&mut self, other: &Self, choice: Choice)
fn conditional_assign(&mut self, other: &Self, choice: Choice)
sourceimpl ConstantTimeEq for U8x32
impl ConstantTimeEq for U8x32
sourceimpl From<[U8x16; 2]> for U8x32
impl From<[U8x16; 2]> for U8x32
sourceimpl From<U8x16> for U8x32
impl From<U8x16> for U8x32
sourcefn from(vector: U8x16) -> U8x32
fn from(vector: U8x16) -> U8x32
NOTE: this will zero the upper bits of the destination. Other intrinsics are more effcient, but leave the upper bits undefined. At present, these more effcient intrinsics are not exposed.
Scalar Equivalent:
let mut out = [0; 32];
out[0..16].copy_from_slice(&vector.as_array());
U8x32::from(out)
Avx2
sourceimpl From<U8x32> for [U8x16; 2]
impl From<U8x32> for [U8x16; 2]
sourceimpl Shl<U8x32> for U8x32
impl Shl<U8x32> for U8x32
sourcefn shl(self, amount: U8x32) -> U8x32
fn shl(self, amount: U8x32) -> U8x32
Scalar Equivalent:
let mut out = self.as_array();
for (x, amm) in out.iter_mut().zip(amount.as_array().iter().copied()) {
*x = if amm >= 8 {
0
} else {
*x << amm
};
}
U8x32::from(out)
Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourceimpl Shl<u64> for U8x32
impl Shl<u64> for U8x32
sourcefn shl(self, amount: u64) -> U8x32
fn shl(self, amount: u64) -> U8x32
Scalar Equivalent:
if amount >= 8 {
U8x32::ZERO
} else {
U8x32::from([
self.as_array()[0] << amount,
self.as_array()[1] << amount,
self.as_array()[2] << amount,
self.as_array()[3] << amount,
self.as_array()[4] << amount,
self.as_array()[5] << amount,
self.as_array()[6] << amount,
self.as_array()[7] << amount,
self.as_array()[8] << amount,
self.as_array()[9] << amount,
self.as_array()[10] << amount,
self.as_array()[11] << amount,
self.as_array()[12] << amount,
self.as_array()[13] << amount,
self.as_array()[14] << amount,
self.as_array()[15] << amount,
self.as_array()[16] << amount,
self.as_array()[17] << amount,
self.as_array()[18] << amount,
self.as_array()[19] << amount,
self.as_array()[20] << amount,
self.as_array()[21] << amount,
self.as_array()[22] << amount,
self.as_array()[23] << amount,
self.as_array()[24] << amount,
self.as_array()[25] << amount,
self.as_array()[26] << amount,
self.as_array()[27] << amount,
self.as_array()[28] << amount,
self.as_array()[29] << amount,
self.as_array()[30] << amount,
self.as_array()[31] << amount,
])
}
Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourceimpl ShlAssign<U8x32> for U8x32
impl ShlAssign<U8x32> for U8x32
sourcefn shl_assign(&mut self, amount: U8x32)
fn shl_assign(&mut self, amount: U8x32)
<<=
operation. Read moresourceimpl ShlAssign<u64> for U8x32
impl ShlAssign<u64> for U8x32
sourcefn shl_assign(&mut self, amount: u64)
fn shl_assign(&mut self, amount: u64)
<<=
operation. Read moresourceimpl Shr<U8x32> for U8x32
impl Shr<U8x32> for U8x32
sourcefn shr(self, amount: U8x32) -> U8x32
fn shr(self, amount: U8x32) -> U8x32
Scalar Equivalent:
let mut out = self.as_array();
for (x, amm) in out.iter_mut().zip(amount.as_array().iter().copied()) {
*x = if amm >= 8 {
0
} else {
*x >> amm
};
}
U8x32::from(out)
Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourceimpl Shr<u64> for U8x32
impl Shr<u64> for U8x32
sourcefn shr(self, amount: u64) -> U8x32
fn shr(self, amount: u64) -> U8x32
Scalar Equivalent:
if amount >= 8 {
U8x32::ZERO
} else {
U8x32::from([
self.as_array()[0] >> amount,
self.as_array()[1] >> amount,
self.as_array()[2] >> amount,
self.as_array()[3] >> amount,
self.as_array()[4] >> amount,
self.as_array()[5] >> amount,
self.as_array()[6] >> amount,
self.as_array()[7] >> amount,
self.as_array()[8] >> amount,
self.as_array()[9] >> amount,
self.as_array()[10] >> amount,
self.as_array()[11] >> amount,
self.as_array()[12] >> amount,
self.as_array()[13] >> amount,
self.as_array()[14] >> amount,
self.as_array()[15] >> amount,
self.as_array()[16] >> amount,
self.as_array()[17] >> amount,
self.as_array()[18] >> amount,
self.as_array()[19] >> amount,
self.as_array()[20] >> amount,
self.as_array()[21] >> amount,
self.as_array()[22] >> amount,
self.as_array()[23] >> amount,
self.as_array()[24] >> amount,
self.as_array()[25] >> amount,
self.as_array()[26] >> amount,
self.as_array()[27] >> amount,
self.as_array()[28] >> amount,
self.as_array()[29] >> amount,
self.as_array()[30] >> amount,
self.as_array()[31] >> amount,
])
}
Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourceimpl ShrAssign<U8x32> for U8x32
impl ShrAssign<U8x32> for U8x32
sourcefn shr_assign(&mut self, amount: U8x32)
fn shr_assign(&mut self, amount: U8x32)
>>=
operation. Read moresourceimpl ShrAssign<u64> for U8x32
impl ShrAssign<u64> for U8x32
sourcefn shr_assign(&mut self, amount: u64)
fn shr_assign(&mut self, amount: u64)
>>=
operation. Read moresourceimpl SimdBase for U8x32
impl SimdBase for U8x32
sourcefn set_lo(scalar: u8) -> U8x32
fn set_lo(scalar: u8) -> U8x32
Scalar Equivalent:
let mut out = [0; 32];
out[0] = scalar;
U8x32::from(out)
Avx2
-
Instruction sequence.
sourcefn broadcast_lo(vector: U8x16) -> U8x32
fn broadcast_lo(vector: U8x16) -> U8x32
sourcefn cmp_eq(&self, other: U8x32) -> U8x32
fn cmp_eq(&self, other: U8x32) -> U8x32
Scalar Equivalent:
U8x32::from([
if self.as_array()[0] == other.as_array()[0] { u8::MAX } else { 0 },
if self.as_array()[1] == other.as_array()[1] { u8::MAX } else { 0 },
if self.as_array()[2] == other.as_array()[2] { u8::MAX } else { 0 },
if self.as_array()[3] == other.as_array()[3] { u8::MAX } else { 0 },
if self.as_array()[4] == other.as_array()[4] { u8::MAX } else { 0 },
if self.as_array()[5] == other.as_array()[5] { u8::MAX } else { 0 },
if self.as_array()[6] == other.as_array()[6] { u8::MAX } else { 0 },
if self.as_array()[7] == other.as_array()[7] { u8::MAX } else { 0 },
if self.as_array()[8] == other.as_array()[8] { u8::MAX } else { 0 },
if self.as_array()[9] == other.as_array()[9] { u8::MAX } else { 0 },
if self.as_array()[10] == other.as_array()[10] { u8::MAX } else { 0 },
if self.as_array()[11] == other.as_array()[11] { u8::MAX } else { 0 },
if self.as_array()[12] == other.as_array()[12] { u8::MAX } else { 0 },
if self.as_array()[13] == other.as_array()[13] { u8::MAX } else { 0 },
if self.as_array()[14] == other.as_array()[14] { u8::MAX } else { 0 },
if self.as_array()[15] == other.as_array()[15] { u8::MAX } else { 0 },
if self.as_array()[16] == other.as_array()[16] { u8::MAX } else { 0 },
if self.as_array()[17] == other.as_array()[17] { u8::MAX } else { 0 },
if self.as_array()[18] == other.as_array()[18] { u8::MAX } else { 0 },
if self.as_array()[19] == other.as_array()[19] { u8::MAX } else { 0 },
if self.as_array()[20] == other.as_array()[20] { u8::MAX } else { 0 },
if self.as_array()[21] == other.as_array()[21] { u8::MAX } else { 0 },
if self.as_array()[22] == other.as_array()[22] { u8::MAX } else { 0 },
if self.as_array()[23] == other.as_array()[23] { u8::MAX } else { 0 },
if self.as_array()[24] == other.as_array()[24] { u8::MAX } else { 0 },
if self.as_array()[25] == other.as_array()[25] { u8::MAX } else { 0 },
if self.as_array()[26] == other.as_array()[26] { u8::MAX } else { 0 },
if self.as_array()[27] == other.as_array()[27] { u8::MAX } else { 0 },
if self.as_array()[28] == other.as_array()[28] { u8::MAX } else { 0 },
if self.as_array()[29] == other.as_array()[29] { u8::MAX } else { 0 },
if self.as_array()[30] == other.as_array()[30] { u8::MAX } else { 0 },
if self.as_array()[31] == other.as_array()[31] { u8::MAX } else { 0 },
])
Avx2
-
VPCMPEQB ymm, ymm, ymm
sourcefn and_not(&self, other: U8x32) -> U8x32
fn and_not(&self, other: U8x32) -> U8x32
Scalar Equivalent:
U8x32::from([
self.as_array()[0] & (!other.as_array()[0]),
self.as_array()[1] & (!other.as_array()[1]),
self.as_array()[2] & (!other.as_array()[2]),
self.as_array()[3] & (!other.as_array()[3]),
self.as_array()[4] & (!other.as_array()[4]),
self.as_array()[5] & (!other.as_array()[5]),
self.as_array()[6] & (!other.as_array()[6]),
self.as_array()[7] & (!other.as_array()[7]),
self.as_array()[8] & (!other.as_array()[8]),
self.as_array()[9] & (!other.as_array()[9]),
self.as_array()[10] & (!other.as_array()[10]),
self.as_array()[11] & (!other.as_array()[11]),
self.as_array()[12] & (!other.as_array()[12]),
self.as_array()[13] & (!other.as_array()[13]),
self.as_array()[14] & (!other.as_array()[14]),
self.as_array()[15] & (!other.as_array()[15]),
self.as_array()[16] & (!other.as_array()[16]),
self.as_array()[17] & (!other.as_array()[17]),
self.as_array()[18] & (!other.as_array()[18]),
self.as_array()[19] & (!other.as_array()[19]),
self.as_array()[20] & (!other.as_array()[20]),
self.as_array()[21] & (!other.as_array()[21]),
self.as_array()[22] & (!other.as_array()[22]),
self.as_array()[23] & (!other.as_array()[23]),
self.as_array()[24] & (!other.as_array()[24]),
self.as_array()[25] & (!other.as_array()[25]),
self.as_array()[26] & (!other.as_array()[26]),
self.as_array()[27] & (!other.as_array()[27]),
self.as_array()[28] & (!other.as_array()[28]),
self.as_array()[29] & (!other.as_array()[29]),
self.as_array()[30] & (!other.as_array()[30]),
self.as_array()[31] & (!other.as_array()[31]),
])
Avx2
-
VPANDN ymm, ymm, ymm
sourcefn cmp_gt(&self, other: U8x32) -> U8x32
fn cmp_gt(&self, other: U8x32) -> U8x32
Scalar Equivalent:
U8x32::from([
if self.as_array()[0] > other.as_array()[0] { u8::MAX } else { 0 },
if self.as_array()[1] > other.as_array()[1] { u8::MAX } else { 0 },
if self.as_array()[2] > other.as_array()[2] { u8::MAX } else { 0 },
if self.as_array()[3] > other.as_array()[3] { u8::MAX } else { 0 },
if self.as_array()[4] > other.as_array()[4] { u8::MAX } else { 0 },
if self.as_array()[5] > other.as_array()[5] { u8::MAX } else { 0 },
if self.as_array()[6] > other.as_array()[6] { u8::MAX } else { 0 },
if self.as_array()[7] > other.as_array()[7] { u8::MAX } else { 0 },
if self.as_array()[8] > other.as_array()[8] { u8::MAX } else { 0 },
if self.as_array()[9] > other.as_array()[9] { u8::MAX } else { 0 },
if self.as_array()[10] > other.as_array()[10] { u8::MAX } else { 0 },
if self.as_array()[11] > other.as_array()[11] { u8::MAX } else { 0 },
if self.as_array()[12] > other.as_array()[12] { u8::MAX } else { 0 },
if self.as_array()[13] > other.as_array()[13] { u8::MAX } else { 0 },
if self.as_array()[14] > other.as_array()[14] { u8::MAX } else { 0 },
if self.as_array()[15] > other.as_array()[15] { u8::MAX } else { 0 },
if self.as_array()[16] > other.as_array()[16] { u8::MAX } else { 0 },
if self.as_array()[17] > other.as_array()[17] { u8::MAX } else { 0 },
if self.as_array()[18] > other.as_array()[18] { u8::MAX } else { 0 },
if self.as_array()[19] > other.as_array()[19] { u8::MAX } else { 0 },
if self.as_array()[20] > other.as_array()[20] { u8::MAX } else { 0 },
if self.as_array()[21] > other.as_array()[21] { u8::MAX } else { 0 },
if self.as_array()[22] > other.as_array()[22] { u8::MAX } else { 0 },
if self.as_array()[23] > other.as_array()[23] { u8::MAX } else { 0 },
if self.as_array()[24] > other.as_array()[24] { u8::MAX } else { 0 },
if self.as_array()[25] > other.as_array()[25] { u8::MAX } else { 0 },
if self.as_array()[26] > other.as_array()[26] { u8::MAX } else { 0 },
if self.as_array()[27] > other.as_array()[27] { u8::MAX } else { 0 },
if self.as_array()[28] > other.as_array()[28] { u8::MAX } else { 0 },
if self.as_array()[29] > other.as_array()[29] { u8::MAX } else { 0 },
if self.as_array()[30] > other.as_array()[30] { u8::MAX } else { 0 },
if self.as_array()[31] > other.as_array()[31] { u8::MAX } else { 0 },
])
Avx2
NOTE: this implementation uses an efficient vector polyfill, though this operation is not natively supported.
// Based on https://stackoverflow.com/a/33173643 and https://git.io/JmghK
let sign_bit = Self::broadcast(1 << 7);
Self::from(I8x32::from(*self ^ sign_bit).cmp_gt(
I8x32::from(other ^ sign_bit)
))
sourcefn shift_left<const BITS: usize>(&self) -> U8x32
fn shift_left<const BITS: usize>(&self) -> U8x32
Scalar Equivalent:
let mut out = self.as_array();
for x in out.iter_mut() {
*x <<= BITS;
}
U8x32::from(out)
Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourcefn shift_right<const BITS: usize>(&self) -> U8x32
fn shift_right<const BITS: usize>(&self) -> U8x32
Scalar Equivalent:
let mut out = self.as_array();
for x in out.iter_mut() {
*x >>= BITS;
}
U8x32::from(out)
Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourcefn unpack_lo(&self, other: U8x32) -> U8x32
fn unpack_lo(&self, other: U8x32) -> U8x32
Scalar Equivalent:
U8x32::from([
// Lane# 0
self.as_array()[0],
other.as_array()[0],
self.as_array()[1],
other.as_array()[1],
self.as_array()[2],
other.as_array()[2],
self.as_array()[3],
other.as_array()[3],
self.as_array()[4],
other.as_array()[4],
self.as_array()[5],
other.as_array()[5],
self.as_array()[6],
other.as_array()[6],
self.as_array()[7],
other.as_array()[7],
// Lane# 1
self.as_array()[16],
other.as_array()[16],
self.as_array()[17],
other.as_array()[17],
self.as_array()[18],
other.as_array()[18],
self.as_array()[19],
other.as_array()[19],
self.as_array()[20],
other.as_array()[20],
self.as_array()[21],
other.as_array()[21],
self.as_array()[22],
other.as_array()[22],
self.as_array()[23],
other.as_array()[23],
])
Avx2
-
VPUNPCKLBW ymm, ymm, ymm
sourcefn unpack_hi(&self, other: U8x32) -> U8x32
fn unpack_hi(&self, other: U8x32) -> U8x32
Scalar Equivalent:
U8x32::from([
// Lane# 0
self.as_array()[8],
other.as_array()[8],
self.as_array()[9],
other.as_array()[9],
self.as_array()[10],
other.as_array()[10],
self.as_array()[11],
other.as_array()[11],
self.as_array()[12],
other.as_array()[12],
self.as_array()[13],
other.as_array()[13],
self.as_array()[14],
other.as_array()[14],
self.as_array()[15],
other.as_array()[15],
// Lane# 1
self.as_array()[24],
other.as_array()[24],
self.as_array()[25],
other.as_array()[25],
self.as_array()[26],
other.as_array()[26],
self.as_array()[27],
other.as_array()[27],
self.as_array()[28],
other.as_array()[28],
self.as_array()[29],
other.as_array()[29],
self.as_array()[30],
other.as_array()[30],
self.as_array()[31],
other.as_array()[31],
])
Avx2
-
VPUNPCKHBW ymm, ymm, ymm
sourcefn max(&self, other: U8x32) -> U8x32
fn max(&self, other: U8x32) -> U8x32
Scalar Equivalent:
U8x32::from([
self.as_array()[0].max(other.as_array()[0]),
self.as_array()[1].max(other.as_array()[1]),
self.as_array()[2].max(other.as_array()[2]),
self.as_array()[3].max(other.as_array()[3]),
self.as_array()[4].max(other.as_array()[4]),
self.as_array()[5].max(other.as_array()[5]),
self.as_array()[6].max(other.as_array()[6]),
self.as_array()[7].max(other.as_array()[7]),
self.as_array()[8].max(other.as_array()[8]),
self.as_array()[9].max(other.as_array()[9]),
self.as_array()[10].max(other.as_array()[10]),
self.as_array()[11].max(other.as_array()[11]),
self.as_array()[12].max(other.as_array()[12]),
self.as_array()[13].max(other.as_array()[13]),
self.as_array()[14].max(other.as_array()[14]),
self.as_array()[15].max(other.as_array()[15]),
self.as_array()[16].max(other.as_array()[16]),
self.as_array()[17].max(other.as_array()[17]),
self.as_array()[18].max(other.as_array()[18]),
self.as_array()[19].max(other.as_array()[19]),
self.as_array()[20].max(other.as_array()[20]),
self.as_array()[21].max(other.as_array()[21]),
self.as_array()[22].max(other.as_array()[22]),
self.as_array()[23].max(other.as_array()[23]),
self.as_array()[24].max(other.as_array()[24]),
self.as_array()[25].max(other.as_array()[25]),
self.as_array()[26].max(other.as_array()[26]),
self.as_array()[27].max(other.as_array()[27]),
self.as_array()[28].max(other.as_array()[28]),
self.as_array()[29].max(other.as_array()[29]),
self.as_array()[30].max(other.as_array()[30]),
self.as_array()[31].max(other.as_array()[31]),
])
Avx2
-
VPMAXUB ymm, ymm, ymm
sourcefn min(&self, other: U8x32) -> U8x32
fn min(&self, other: U8x32) -> U8x32
Scalar Equivalent:
U8x32::from([
self.as_array()[0].min(other.as_array()[0]),
self.as_array()[1].min(other.as_array()[1]),
self.as_array()[2].min(other.as_array()[2]),
self.as_array()[3].min(other.as_array()[3]),
self.as_array()[4].min(other.as_array()[4]),
self.as_array()[5].min(other.as_array()[5]),
self.as_array()[6].min(other.as_array()[6]),
self.as_array()[7].min(other.as_array()[7]),
self.as_array()[8].min(other.as_array()[8]),
self.as_array()[9].min(other.as_array()[9]),
self.as_array()[10].min(other.as_array()[10]),
self.as_array()[11].min(other.as_array()[11]),
self.as_array()[12].min(other.as_array()[12]),
self.as_array()[13].min(other.as_array()[13]),
self.as_array()[14].min(other.as_array()[14]),
self.as_array()[15].min(other.as_array()[15]),
self.as_array()[16].min(other.as_array()[16]),
self.as_array()[17].min(other.as_array()[17]),
self.as_array()[18].min(other.as_array()[18]),
self.as_array()[19].min(other.as_array()[19]),
self.as_array()[20].min(other.as_array()[20]),
self.as_array()[21].min(other.as_array()[21]),
self.as_array()[22].min(other.as_array()[22]),
self.as_array()[23].min(other.as_array()[23]),
self.as_array()[24].min(other.as_array()[24]),
self.as_array()[25].min(other.as_array()[25]),
self.as_array()[26].min(other.as_array()[26]),
self.as_array()[27].min(other.as_array()[27]),
self.as_array()[28].min(other.as_array()[28]),
self.as_array()[29].min(other.as_array()[29]),
self.as_array()[30].min(other.as_array()[30]),
self.as_array()[31].min(other.as_array()[31]),
])
Avx2
-
VPMINUB ymm, ymm, ymm
const ZERO: Self = _
type BroadcastLoInput = U8x16
sourceimpl SimdBase8 for U8x32
impl SimdBase8 for U8x32
sourcefn shift_bytes_left<const AMOUNT: usize>(&self) -> U8x32
fn shift_bytes_left<const AMOUNT: usize>(&self) -> U8x32
Scalar Equivalent:
let mut out = [0; 32];
for (out_lane, src_lane) in out
.chunks_exact_mut(16)
.zip(self.as_array().chunks_exact(16))
{
out_lane[AMOUNT..].copy_from_slice(&src_lane[0..16 - AMOUNT]);
}
U8x32::from(out)
Avx2
-
VPSLLDQ ymm, ymm, imm8
sourcefn shift_bytes_right<const AMOUNT: usize>(&self) -> U8x32
fn shift_bytes_right<const AMOUNT: usize>(&self) -> U8x32
Scalar Equivalent:
let mut out = [0; 32];
for (out_lane, src_lane) in out
.chunks_exact_mut(16)
.zip(self.as_array().chunks_exact(16))
{
out_lane[0..16 - AMOUNT].copy_from_slice(&src_lane[AMOUNT..]);
}
U8x32::from(out)
Avx2
-
VPSRLDQ ymm, ymm, imm8
sourcefn most_significant_bits(&self) -> u32
fn most_significant_bits(&self) -> u32
Scalar Equivalent:
let mut out: u32 = 0;
for (i, value) in self.as_array().iter().copied().enumerate() {
out |= u32::from((value as u8) >> 7) << i;
}
out
Avx2
-
VPMOVMSKB r32, ymm
sourceimpl Sub<U8x32> for U8x32
impl Sub<U8x32> for U8x32
sourcefn sub(self, rhs: U8x32) -> U8x32
fn sub(self, rhs: U8x32) -> U8x32
Scalar Equivalent:
U8x32::from([
self.as_array()[0].wrapping_sub(rhs.as_array()[0]),
self.as_array()[1].wrapping_sub(rhs.as_array()[1]),
self.as_array()[2].wrapping_sub(rhs.as_array()[2]),
self.as_array()[3].wrapping_sub(rhs.as_array()[3]),
self.as_array()[4].wrapping_sub(rhs.as_array()[4]),
self.as_array()[5].wrapping_sub(rhs.as_array()[5]),
self.as_array()[6].wrapping_sub(rhs.as_array()[6]),
self.as_array()[7].wrapping_sub(rhs.as_array()[7]),
self.as_array()[8].wrapping_sub(rhs.as_array()[8]),
self.as_array()[9].wrapping_sub(rhs.as_array()[9]),
self.as_array()[10].wrapping_sub(rhs.as_array()[10]),
self.as_array()[11].wrapping_sub(rhs.as_array()[11]),
self.as_array()[12].wrapping_sub(rhs.as_array()[12]),
self.as_array()[13].wrapping_sub(rhs.as_array()[13]),
self.as_array()[14].wrapping_sub(rhs.as_array()[14]),
self.as_array()[15].wrapping_sub(rhs.as_array()[15]),
self.as_array()[16].wrapping_sub(rhs.as_array()[16]),
self.as_array()[17].wrapping_sub(rhs.as_array()[17]),
self.as_array()[18].wrapping_sub(rhs.as_array()[18]),
self.as_array()[19].wrapping_sub(rhs.as_array()[19]),
self.as_array()[20].wrapping_sub(rhs.as_array()[20]),
self.as_array()[21].wrapping_sub(rhs.as_array()[21]),
self.as_array()[22].wrapping_sub(rhs.as_array()[22]),
self.as_array()[23].wrapping_sub(rhs.as_array()[23]),
self.as_array()[24].wrapping_sub(rhs.as_array()[24]),
self.as_array()[25].wrapping_sub(rhs.as_array()[25]),
self.as_array()[26].wrapping_sub(rhs.as_array()[26]),
self.as_array()[27].wrapping_sub(rhs.as_array()[27]),
self.as_array()[28].wrapping_sub(rhs.as_array()[28]),
self.as_array()[29].wrapping_sub(rhs.as_array()[29]),
self.as_array()[30].wrapping_sub(rhs.as_array()[30]),
self.as_array()[31].wrapping_sub(rhs.as_array()[31]),
])
Avx2
-
VPSUBB ymm, ymm, ymm
sourceimpl SubAssign<U8x32> for U8x32
impl SubAssign<U8x32> for U8x32
sourcefn sub_assign(&mut self, rhs: Self)
fn sub_assign(&mut self, rhs: Self)
-=
operation. Read moreimpl Copy for U8x32
impl Eq for U8x32
impl Pod for U8x32
Auto Trait Implementations
impl RefUnwindSafe for U8x32
impl Send for U8x32
impl Sync for U8x32
impl Unpin for U8x32
impl UnwindSafe for U8x32
Blanket Implementations
sourceimpl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
const: unstable · sourcefn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
type Bits = T
type Bits = T
Self
must have the same layout as the specified Bits
except for
the possible invalid bit patterns being checked during
is_valid_bit_pattern
. Read more