Struct vectoreyes::I8x32 
source · [−]#[repr(transparent)]pub struct I8x32(_);Expand description
[i8; 32] as a vector.
Implementations
sourceimpl I8x32
 
impl I8x32
sourcepub const fn from_array(array: [i8; 32]) -> I8x32
 
pub const fn from_array(array: [i8; 32]) -> I8x32
Create a vector from an array.
Unlike the From trait function, the from_array function is const.
Example
const MY_EXTREMELY_FUN_VALUE: I8x32 =
    I8x32::from_array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]);
for (i, value) in MY_EXTREMELY_FUN_VALUE.as_array().iter().copied().enumerate() {
    assert_eq!(i as i8, value);
}Avx2
Trait Implementations
sourceimpl Add<I8x32> for I8x32
 
impl Add<I8x32> for I8x32
sourcefn add(self, rhs: I8x32) -> I8x32
 
fn add(self, rhs: I8x32) -> I8x32
Scalar Equivalent:
I8x32::from([
    self.as_array()[0].wrapping_add(rhs.as_array()[0]),
    self.as_array()[1].wrapping_add(rhs.as_array()[1]),
    self.as_array()[2].wrapping_add(rhs.as_array()[2]),
    self.as_array()[3].wrapping_add(rhs.as_array()[3]),
    self.as_array()[4].wrapping_add(rhs.as_array()[4]),
    self.as_array()[5].wrapping_add(rhs.as_array()[5]),
    self.as_array()[6].wrapping_add(rhs.as_array()[6]),
    self.as_array()[7].wrapping_add(rhs.as_array()[7]),
    self.as_array()[8].wrapping_add(rhs.as_array()[8]),
    self.as_array()[9].wrapping_add(rhs.as_array()[9]),
    self.as_array()[10].wrapping_add(rhs.as_array()[10]),
    self.as_array()[11].wrapping_add(rhs.as_array()[11]),
    self.as_array()[12].wrapping_add(rhs.as_array()[12]),
    self.as_array()[13].wrapping_add(rhs.as_array()[13]),
    self.as_array()[14].wrapping_add(rhs.as_array()[14]),
    self.as_array()[15].wrapping_add(rhs.as_array()[15]),
    self.as_array()[16].wrapping_add(rhs.as_array()[16]),
    self.as_array()[17].wrapping_add(rhs.as_array()[17]),
    self.as_array()[18].wrapping_add(rhs.as_array()[18]),
    self.as_array()[19].wrapping_add(rhs.as_array()[19]),
    self.as_array()[20].wrapping_add(rhs.as_array()[20]),
    self.as_array()[21].wrapping_add(rhs.as_array()[21]),
    self.as_array()[22].wrapping_add(rhs.as_array()[22]),
    self.as_array()[23].wrapping_add(rhs.as_array()[23]),
    self.as_array()[24].wrapping_add(rhs.as_array()[24]),
    self.as_array()[25].wrapping_add(rhs.as_array()[25]),
    self.as_array()[26].wrapping_add(rhs.as_array()[26]),
    self.as_array()[27].wrapping_add(rhs.as_array()[27]),
    self.as_array()[28].wrapping_add(rhs.as_array()[28]),
    self.as_array()[29].wrapping_add(rhs.as_array()[29]),
    self.as_array()[30].wrapping_add(rhs.as_array()[30]),
    self.as_array()[31].wrapping_add(rhs.as_array()[31]),
])Avx2
- 
VPADDB ymm, ymm, ymm
 
sourceimpl AddAssign<I8x32> for I8x32
 
impl AddAssign<I8x32> for I8x32
sourcefn add_assign(&mut self, rhs: Self)
 
fn add_assign(&mut self, rhs: Self)
+= operation. Read moresourceimpl BitAnd<I8x32> for I8x32
 
impl BitAnd<I8x32> for I8x32
sourcefn bitand(self, rhs: I8x32) -> I8x32
 
fn bitand(self, rhs: I8x32) -> I8x32
Scalar Equivalent:
I8x32::from([
    self.as_array()[0] & rhs.as_array()[0],
    self.as_array()[1] & rhs.as_array()[1],
    self.as_array()[2] & rhs.as_array()[2],
    self.as_array()[3] & rhs.as_array()[3],
    self.as_array()[4] & rhs.as_array()[4],
    self.as_array()[5] & rhs.as_array()[5],
    self.as_array()[6] & rhs.as_array()[6],
    self.as_array()[7] & rhs.as_array()[7],
    self.as_array()[8] & rhs.as_array()[8],
    self.as_array()[9] & rhs.as_array()[9],
    self.as_array()[10] & rhs.as_array()[10],
    self.as_array()[11] & rhs.as_array()[11],
    self.as_array()[12] & rhs.as_array()[12],
    self.as_array()[13] & rhs.as_array()[13],
    self.as_array()[14] & rhs.as_array()[14],
    self.as_array()[15] & rhs.as_array()[15],
    self.as_array()[16] & rhs.as_array()[16],
    self.as_array()[17] & rhs.as_array()[17],
    self.as_array()[18] & rhs.as_array()[18],
    self.as_array()[19] & rhs.as_array()[19],
    self.as_array()[20] & rhs.as_array()[20],
    self.as_array()[21] & rhs.as_array()[21],
    self.as_array()[22] & rhs.as_array()[22],
    self.as_array()[23] & rhs.as_array()[23],
    self.as_array()[24] & rhs.as_array()[24],
    self.as_array()[25] & rhs.as_array()[25],
    self.as_array()[26] & rhs.as_array()[26],
    self.as_array()[27] & rhs.as_array()[27],
    self.as_array()[28] & rhs.as_array()[28],
    self.as_array()[29] & rhs.as_array()[29],
    self.as_array()[30] & rhs.as_array()[30],
    self.as_array()[31] & rhs.as_array()[31],
])Avx2
- 
VPAND ymm, ymm, ymm
 
sourceimpl BitAndAssign<I8x32> for I8x32
 
impl BitAndAssign<I8x32> for I8x32
sourcefn bitand_assign(&mut self, rhs: Self)
 
fn bitand_assign(&mut self, rhs: Self)
&= operation. Read moresourceimpl BitOr<I8x32> for I8x32
 
impl BitOr<I8x32> for I8x32
sourcefn bitor(self, rhs: I8x32) -> I8x32
 
fn bitor(self, rhs: I8x32) -> I8x32
Scalar Equivalent:
I8x32::from([
    self.as_array()[0] | rhs.as_array()[0],
    self.as_array()[1] | rhs.as_array()[1],
    self.as_array()[2] | rhs.as_array()[2],
    self.as_array()[3] | rhs.as_array()[3],
    self.as_array()[4] | rhs.as_array()[4],
    self.as_array()[5] | rhs.as_array()[5],
    self.as_array()[6] | rhs.as_array()[6],
    self.as_array()[7] | rhs.as_array()[7],
    self.as_array()[8] | rhs.as_array()[8],
    self.as_array()[9] | rhs.as_array()[9],
    self.as_array()[10] | rhs.as_array()[10],
    self.as_array()[11] | rhs.as_array()[11],
    self.as_array()[12] | rhs.as_array()[12],
    self.as_array()[13] | rhs.as_array()[13],
    self.as_array()[14] | rhs.as_array()[14],
    self.as_array()[15] | rhs.as_array()[15],
    self.as_array()[16] | rhs.as_array()[16],
    self.as_array()[17] | rhs.as_array()[17],
    self.as_array()[18] | rhs.as_array()[18],
    self.as_array()[19] | rhs.as_array()[19],
    self.as_array()[20] | rhs.as_array()[20],
    self.as_array()[21] | rhs.as_array()[21],
    self.as_array()[22] | rhs.as_array()[22],
    self.as_array()[23] | rhs.as_array()[23],
    self.as_array()[24] | rhs.as_array()[24],
    self.as_array()[25] | rhs.as_array()[25],
    self.as_array()[26] | rhs.as_array()[26],
    self.as_array()[27] | rhs.as_array()[27],
    self.as_array()[28] | rhs.as_array()[28],
    self.as_array()[29] | rhs.as_array()[29],
    self.as_array()[30] | rhs.as_array()[30],
    self.as_array()[31] | rhs.as_array()[31],
])Avx2
- 
VPOR ymm, ymm, ymm
 
sourceimpl BitOrAssign<I8x32> for I8x32
 
impl BitOrAssign<I8x32> for I8x32
sourcefn bitor_assign(&mut self, rhs: Self)
 
fn bitor_assign(&mut self, rhs: Self)
|= operation. Read moresourceimpl BitXor<I8x32> for I8x32
 
impl BitXor<I8x32> for I8x32
sourcefn bitxor(self, rhs: I8x32) -> I8x32
 
fn bitxor(self, rhs: I8x32) -> I8x32
Scalar Equivalent:
I8x32::from([
    self.as_array()[0] ^ rhs.as_array()[0],
    self.as_array()[1] ^ rhs.as_array()[1],
    self.as_array()[2] ^ rhs.as_array()[2],
    self.as_array()[3] ^ rhs.as_array()[3],
    self.as_array()[4] ^ rhs.as_array()[4],
    self.as_array()[5] ^ rhs.as_array()[5],
    self.as_array()[6] ^ rhs.as_array()[6],
    self.as_array()[7] ^ rhs.as_array()[7],
    self.as_array()[8] ^ rhs.as_array()[8],
    self.as_array()[9] ^ rhs.as_array()[9],
    self.as_array()[10] ^ rhs.as_array()[10],
    self.as_array()[11] ^ rhs.as_array()[11],
    self.as_array()[12] ^ rhs.as_array()[12],
    self.as_array()[13] ^ rhs.as_array()[13],
    self.as_array()[14] ^ rhs.as_array()[14],
    self.as_array()[15] ^ rhs.as_array()[15],
    self.as_array()[16] ^ rhs.as_array()[16],
    self.as_array()[17] ^ rhs.as_array()[17],
    self.as_array()[18] ^ rhs.as_array()[18],
    self.as_array()[19] ^ rhs.as_array()[19],
    self.as_array()[20] ^ rhs.as_array()[20],
    self.as_array()[21] ^ rhs.as_array()[21],
    self.as_array()[22] ^ rhs.as_array()[22],
    self.as_array()[23] ^ rhs.as_array()[23],
    self.as_array()[24] ^ rhs.as_array()[24],
    self.as_array()[25] ^ rhs.as_array()[25],
    self.as_array()[26] ^ rhs.as_array()[26],
    self.as_array()[27] ^ rhs.as_array()[27],
    self.as_array()[28] ^ rhs.as_array()[28],
    self.as_array()[29] ^ rhs.as_array()[29],
    self.as_array()[30] ^ rhs.as_array()[30],
    self.as_array()[31] ^ rhs.as_array()[31],
])Avx2
- 
VPXOR ymm, ymm, ymm
 
sourceimpl BitXorAssign<I8x32> for I8x32
 
impl BitXorAssign<I8x32> for I8x32
sourcefn bitxor_assign(&mut self, rhs: Self)
 
fn bitxor_assign(&mut self, rhs: Self)
^= operation. Read moresourceimpl ConditionallySelectable for I8x32
 
impl ConditionallySelectable for I8x32
sourcefn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
 
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
sourcefn conditional_assign(&mut self, other: &Self, choice: Choice)
 
fn conditional_assign(&mut self, other: &Self, choice: Choice)
sourceimpl ConstantTimeEq for I8x32
 
impl ConstantTimeEq for I8x32
sourceimpl From<[I8x16; 2]> for I8x32
 
impl From<[I8x16; 2]> for I8x32
sourceimpl From<I8x16> for I8x32
 
impl From<I8x16> for I8x32
sourcefn from(vector: I8x16) -> I8x32
 
fn from(vector: I8x16) -> I8x32
NOTE: this will zero the upper bits of the destination. Other intrinsics are more effcient, but leave the upper bits undefined. At present, these more effcient intrinsics are not exposed.
Scalar Equivalent:
let mut out = [0; 32];
out[0..16].copy_from_slice(&vector.as_array());
I8x32::from(out)Avx2
sourceimpl From<I8x32> for [I8x16; 2]
 
impl From<I8x32> for [I8x16; 2]
sourceimpl Shl<I8x32> for I8x32
 
impl Shl<I8x32> for I8x32
sourcefn shl(self, amount: I8x32) -> I8x32
 
fn shl(self, amount: I8x32) -> I8x32
Scalar Equivalent:
let mut out = self.as_array();
for (x, amm) in out.iter_mut().zip(amount.as_array().iter().copied()) {
    *x = if amm >= 8 || amm < 0 {
        0
    } else {
        *x << amm
    };
}
I8x32::from(out)Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourceimpl Shl<u64> for I8x32
 
impl Shl<u64> for I8x32
sourcefn shl(self, amount: u64) -> I8x32
 
fn shl(self, amount: u64) -> I8x32
Scalar Equivalent:
if amount >= 8 {
    I8x32::ZERO
} else {
    I8x32::from([
        self.as_array()[0] << amount,
        self.as_array()[1] << amount,
        self.as_array()[2] << amount,
        self.as_array()[3] << amount,
        self.as_array()[4] << amount,
        self.as_array()[5] << amount,
        self.as_array()[6] << amount,
        self.as_array()[7] << amount,
        self.as_array()[8] << amount,
        self.as_array()[9] << amount,
        self.as_array()[10] << amount,
        self.as_array()[11] << amount,
        self.as_array()[12] << amount,
        self.as_array()[13] << amount,
        self.as_array()[14] << amount,
        self.as_array()[15] << amount,
        self.as_array()[16] << amount,
        self.as_array()[17] << amount,
        self.as_array()[18] << amount,
        self.as_array()[19] << amount,
        self.as_array()[20] << amount,
        self.as_array()[21] << amount,
        self.as_array()[22] << amount,
        self.as_array()[23] << amount,
        self.as_array()[24] << amount,
        self.as_array()[25] << amount,
        self.as_array()[26] << amount,
        self.as_array()[27] << amount,
        self.as_array()[28] << amount,
        self.as_array()[29] << amount,
        self.as_array()[30] << amount,
        self.as_array()[31] << amount,
    ])
}Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourceimpl ShlAssign<I8x32> for I8x32
 
impl ShlAssign<I8x32> for I8x32
sourcefn shl_assign(&mut self, amount: I8x32)
 
fn shl_assign(&mut self, amount: I8x32)
<<= operation. Read moresourceimpl ShlAssign<u64> for I8x32
 
impl ShlAssign<u64> for I8x32
sourcefn shl_assign(&mut self, amount: u64)
 
fn shl_assign(&mut self, amount: u64)
<<= operation. Read moresourceimpl Shr<I8x32> for I8x32
 
impl Shr<I8x32> for I8x32
sourcefn shr(self, amount: I8x32) -> I8x32
 
fn shr(self, amount: I8x32) -> I8x32
Scalar Equivalent:
let mut out = self.as_array();
for (x, amm) in out.iter_mut().zip(amount.as_array().iter().copied()) {
    *x = if amm >= 8 || amm < 0 {
        if *x < 0 { -1 } else { 0 }
    } else {
        *x >> amm
    };
}
I8x32::from(out)Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourceimpl Shr<u64> for I8x32
 
impl Shr<u64> for I8x32
sourcefn shr(self, amount: u64) -> I8x32
 
fn shr(self, amount: u64) -> I8x32
Scalar Equivalent:
if amount >= 8 {
    let mut out = self.as_array();
    for x in out.iter_mut() {
        *x = if *x < 0 { -1 } else { 0 };
    }
    I8x32::from(out)
} else {
    I8x32::from([
        self.as_array()[0] >> amount,
        self.as_array()[1] >> amount,
        self.as_array()[2] >> amount,
        self.as_array()[3] >> amount,
        self.as_array()[4] >> amount,
        self.as_array()[5] >> amount,
        self.as_array()[6] >> amount,
        self.as_array()[7] >> amount,
        self.as_array()[8] >> amount,
        self.as_array()[9] >> amount,
        self.as_array()[10] >> amount,
        self.as_array()[11] >> amount,
        self.as_array()[12] >> amount,
        self.as_array()[13] >> amount,
        self.as_array()[14] >> amount,
        self.as_array()[15] >> amount,
        self.as_array()[16] >> amount,
        self.as_array()[17] >> amount,
        self.as_array()[18] >> amount,
        self.as_array()[19] >> amount,
        self.as_array()[20] >> amount,
        self.as_array()[21] >> amount,
        self.as_array()[22] >> amount,
        self.as_array()[23] >> amount,
        self.as_array()[24] >> amount,
        self.as_array()[25] >> amount,
        self.as_array()[26] >> amount,
        self.as_array()[27] >> amount,
        self.as_array()[28] >> amount,
        self.as_array()[29] >> amount,
        self.as_array()[30] >> amount,
        self.as_array()[31] >> amount,
    ])
}Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourceimpl ShrAssign<I8x32> for I8x32
 
impl ShrAssign<I8x32> for I8x32
sourcefn shr_assign(&mut self, amount: I8x32)
 
fn shr_assign(&mut self, amount: I8x32)
>>= operation. Read moresourceimpl ShrAssign<u64> for I8x32
 
impl ShrAssign<u64> for I8x32
sourcefn shr_assign(&mut self, amount: u64)
 
fn shr_assign(&mut self, amount: u64)
>>= operation. Read moresourceimpl SimdBase for I8x32
 
impl SimdBase for I8x32
sourcefn set_lo(scalar: i8) -> I8x32
 
fn set_lo(scalar: i8) -> I8x32
Scalar Equivalent:
let mut out = [0; 32];
out[0] = scalar;
I8x32::from(out)Avx2
- 
Instruction sequence.
 
sourcefn broadcast_lo(vector: I8x16) -> I8x32
 
fn broadcast_lo(vector: I8x16) -> I8x32
sourcefn cmp_eq(&self, other: I8x32) -> I8x32
 
fn cmp_eq(&self, other: I8x32) -> I8x32
Scalar Equivalent:
I8x32::from([
    if self.as_array()[0] == other.as_array()[0] {  -1  } else { 0 },
    if self.as_array()[1] == other.as_array()[1] {  -1  } else { 0 },
    if self.as_array()[2] == other.as_array()[2] {  -1  } else { 0 },
    if self.as_array()[3] == other.as_array()[3] {  -1  } else { 0 },
    if self.as_array()[4] == other.as_array()[4] {  -1  } else { 0 },
    if self.as_array()[5] == other.as_array()[5] {  -1  } else { 0 },
    if self.as_array()[6] == other.as_array()[6] {  -1  } else { 0 },
    if self.as_array()[7] == other.as_array()[7] {  -1  } else { 0 },
    if self.as_array()[8] == other.as_array()[8] {  -1  } else { 0 },
    if self.as_array()[9] == other.as_array()[9] {  -1  } else { 0 },
    if self.as_array()[10] == other.as_array()[10] {  -1  } else { 0 },
    if self.as_array()[11] == other.as_array()[11] {  -1  } else { 0 },
    if self.as_array()[12] == other.as_array()[12] {  -1  } else { 0 },
    if self.as_array()[13] == other.as_array()[13] {  -1  } else { 0 },
    if self.as_array()[14] == other.as_array()[14] {  -1  } else { 0 },
    if self.as_array()[15] == other.as_array()[15] {  -1  } else { 0 },
    if self.as_array()[16] == other.as_array()[16] {  -1  } else { 0 },
    if self.as_array()[17] == other.as_array()[17] {  -1  } else { 0 },
    if self.as_array()[18] == other.as_array()[18] {  -1  } else { 0 },
    if self.as_array()[19] == other.as_array()[19] {  -1  } else { 0 },
    if self.as_array()[20] == other.as_array()[20] {  -1  } else { 0 },
    if self.as_array()[21] == other.as_array()[21] {  -1  } else { 0 },
    if self.as_array()[22] == other.as_array()[22] {  -1  } else { 0 },
    if self.as_array()[23] == other.as_array()[23] {  -1  } else { 0 },
    if self.as_array()[24] == other.as_array()[24] {  -1  } else { 0 },
    if self.as_array()[25] == other.as_array()[25] {  -1  } else { 0 },
    if self.as_array()[26] == other.as_array()[26] {  -1  } else { 0 },
    if self.as_array()[27] == other.as_array()[27] {  -1  } else { 0 },
    if self.as_array()[28] == other.as_array()[28] {  -1  } else { 0 },
    if self.as_array()[29] == other.as_array()[29] {  -1  } else { 0 },
    if self.as_array()[30] == other.as_array()[30] {  -1  } else { 0 },
    if self.as_array()[31] == other.as_array()[31] {  -1  } else { 0 },
])Avx2
- 
VPCMPEQB ymm, ymm, ymm
 
sourcefn and_not(&self, other: I8x32) -> I8x32
 
fn and_not(&self, other: I8x32) -> I8x32
Scalar Equivalent:
I8x32::from([
    self.as_array()[0] & (!other.as_array()[0]),
    self.as_array()[1] & (!other.as_array()[1]),
    self.as_array()[2] & (!other.as_array()[2]),
    self.as_array()[3] & (!other.as_array()[3]),
    self.as_array()[4] & (!other.as_array()[4]),
    self.as_array()[5] & (!other.as_array()[5]),
    self.as_array()[6] & (!other.as_array()[6]),
    self.as_array()[7] & (!other.as_array()[7]),
    self.as_array()[8] & (!other.as_array()[8]),
    self.as_array()[9] & (!other.as_array()[9]),
    self.as_array()[10] & (!other.as_array()[10]),
    self.as_array()[11] & (!other.as_array()[11]),
    self.as_array()[12] & (!other.as_array()[12]),
    self.as_array()[13] & (!other.as_array()[13]),
    self.as_array()[14] & (!other.as_array()[14]),
    self.as_array()[15] & (!other.as_array()[15]),
    self.as_array()[16] & (!other.as_array()[16]),
    self.as_array()[17] & (!other.as_array()[17]),
    self.as_array()[18] & (!other.as_array()[18]),
    self.as_array()[19] & (!other.as_array()[19]),
    self.as_array()[20] & (!other.as_array()[20]),
    self.as_array()[21] & (!other.as_array()[21]),
    self.as_array()[22] & (!other.as_array()[22]),
    self.as_array()[23] & (!other.as_array()[23]),
    self.as_array()[24] & (!other.as_array()[24]),
    self.as_array()[25] & (!other.as_array()[25]),
    self.as_array()[26] & (!other.as_array()[26]),
    self.as_array()[27] & (!other.as_array()[27]),
    self.as_array()[28] & (!other.as_array()[28]),
    self.as_array()[29] & (!other.as_array()[29]),
    self.as_array()[30] & (!other.as_array()[30]),
    self.as_array()[31] & (!other.as_array()[31]),
])Avx2
- 
VPANDN ymm, ymm, ymm
 
sourcefn cmp_gt(&self, other: I8x32) -> I8x32
 
fn cmp_gt(&self, other: I8x32) -> I8x32
Scalar Equivalent:
I8x32::from([
    if self.as_array()[0] > other.as_array()[0] {  -1  } else { 0 },
    if self.as_array()[1] > other.as_array()[1] {  -1  } else { 0 },
    if self.as_array()[2] > other.as_array()[2] {  -1  } else { 0 },
    if self.as_array()[3] > other.as_array()[3] {  -1  } else { 0 },
    if self.as_array()[4] > other.as_array()[4] {  -1  } else { 0 },
    if self.as_array()[5] > other.as_array()[5] {  -1  } else { 0 },
    if self.as_array()[6] > other.as_array()[6] {  -1  } else { 0 },
    if self.as_array()[7] > other.as_array()[7] {  -1  } else { 0 },
    if self.as_array()[8] > other.as_array()[8] {  -1  } else { 0 },
    if self.as_array()[9] > other.as_array()[9] {  -1  } else { 0 },
    if self.as_array()[10] > other.as_array()[10] {  -1  } else { 0 },
    if self.as_array()[11] > other.as_array()[11] {  -1  } else { 0 },
    if self.as_array()[12] > other.as_array()[12] {  -1  } else { 0 },
    if self.as_array()[13] > other.as_array()[13] {  -1  } else { 0 },
    if self.as_array()[14] > other.as_array()[14] {  -1  } else { 0 },
    if self.as_array()[15] > other.as_array()[15] {  -1  } else { 0 },
    if self.as_array()[16] > other.as_array()[16] {  -1  } else { 0 },
    if self.as_array()[17] > other.as_array()[17] {  -1  } else { 0 },
    if self.as_array()[18] > other.as_array()[18] {  -1  } else { 0 },
    if self.as_array()[19] > other.as_array()[19] {  -1  } else { 0 },
    if self.as_array()[20] > other.as_array()[20] {  -1  } else { 0 },
    if self.as_array()[21] > other.as_array()[21] {  -1  } else { 0 },
    if self.as_array()[22] > other.as_array()[22] {  -1  } else { 0 },
    if self.as_array()[23] > other.as_array()[23] {  -1  } else { 0 },
    if self.as_array()[24] > other.as_array()[24] {  -1  } else { 0 },
    if self.as_array()[25] > other.as_array()[25] {  -1  } else { 0 },
    if self.as_array()[26] > other.as_array()[26] {  -1  } else { 0 },
    if self.as_array()[27] > other.as_array()[27] {  -1  } else { 0 },
    if self.as_array()[28] > other.as_array()[28] {  -1  } else { 0 },
    if self.as_array()[29] > other.as_array()[29] {  -1  } else { 0 },
    if self.as_array()[30] > other.as_array()[30] {  -1  } else { 0 },
    if self.as_array()[31] > other.as_array()[31] {  -1  } else { 0 },
])Avx2
- 
VPCMPGTB ymm, ymm, ymm
 
sourcefn shift_left<const BITS: usize>(&self) -> I8x32
 
fn shift_left<const BITS: usize>(&self) -> I8x32
Scalar Equivalent:
let mut out = self.as_array();
for x in out.iter_mut() {
    *x <<= BITS;
}
I8x32::from(out)Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourcefn shift_right<const BITS: usize>(&self) -> I8x32
 
fn shift_right<const BITS: usize>(&self) -> I8x32
Scalar Equivalent:
let mut out = self.as_array();
for x in out.iter_mut() {
    *x >>= BITS;
}
I8x32::from(out)Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourcefn unpack_lo(&self, other: I8x32) -> I8x32
 
fn unpack_lo(&self, other: I8x32) -> I8x32
Scalar Equivalent:
I8x32::from([
    // Lane# 0
    self.as_array()[0],
    other.as_array()[0],
    self.as_array()[1],
    other.as_array()[1],
    self.as_array()[2],
    other.as_array()[2],
    self.as_array()[3],
    other.as_array()[3],
    self.as_array()[4],
    other.as_array()[4],
    self.as_array()[5],
    other.as_array()[5],
    self.as_array()[6],
    other.as_array()[6],
    self.as_array()[7],
    other.as_array()[7],
    // Lane# 1
    self.as_array()[16],
    other.as_array()[16],
    self.as_array()[17],
    other.as_array()[17],
    self.as_array()[18],
    other.as_array()[18],
    self.as_array()[19],
    other.as_array()[19],
    self.as_array()[20],
    other.as_array()[20],
    self.as_array()[21],
    other.as_array()[21],
    self.as_array()[22],
    other.as_array()[22],
    self.as_array()[23],
    other.as_array()[23],
])Avx2
- 
VPUNPCKLBW ymm, ymm, ymm
 
sourcefn unpack_hi(&self, other: I8x32) -> I8x32
 
fn unpack_hi(&self, other: I8x32) -> I8x32
Scalar Equivalent:
I8x32::from([
    // Lane# 0
    self.as_array()[8],
    other.as_array()[8],
    self.as_array()[9],
    other.as_array()[9],
    self.as_array()[10],
    other.as_array()[10],
    self.as_array()[11],
    other.as_array()[11],
    self.as_array()[12],
    other.as_array()[12],
    self.as_array()[13],
    other.as_array()[13],
    self.as_array()[14],
    other.as_array()[14],
    self.as_array()[15],
    other.as_array()[15],
    // Lane# 1
    self.as_array()[24],
    other.as_array()[24],
    self.as_array()[25],
    other.as_array()[25],
    self.as_array()[26],
    other.as_array()[26],
    self.as_array()[27],
    other.as_array()[27],
    self.as_array()[28],
    other.as_array()[28],
    self.as_array()[29],
    other.as_array()[29],
    self.as_array()[30],
    other.as_array()[30],
    self.as_array()[31],
    other.as_array()[31],
])Avx2
- 
VPUNPCKHBW ymm, ymm, ymm
 
sourcefn max(&self, other: I8x32) -> I8x32
 
fn max(&self, other: I8x32) -> I8x32
Scalar Equivalent:
I8x32::from([
    self.as_array()[0].max(other.as_array()[0]),
    self.as_array()[1].max(other.as_array()[1]),
    self.as_array()[2].max(other.as_array()[2]),
    self.as_array()[3].max(other.as_array()[3]),
    self.as_array()[4].max(other.as_array()[4]),
    self.as_array()[5].max(other.as_array()[5]),
    self.as_array()[6].max(other.as_array()[6]),
    self.as_array()[7].max(other.as_array()[7]),
    self.as_array()[8].max(other.as_array()[8]),
    self.as_array()[9].max(other.as_array()[9]),
    self.as_array()[10].max(other.as_array()[10]),
    self.as_array()[11].max(other.as_array()[11]),
    self.as_array()[12].max(other.as_array()[12]),
    self.as_array()[13].max(other.as_array()[13]),
    self.as_array()[14].max(other.as_array()[14]),
    self.as_array()[15].max(other.as_array()[15]),
    self.as_array()[16].max(other.as_array()[16]),
    self.as_array()[17].max(other.as_array()[17]),
    self.as_array()[18].max(other.as_array()[18]),
    self.as_array()[19].max(other.as_array()[19]),
    self.as_array()[20].max(other.as_array()[20]),
    self.as_array()[21].max(other.as_array()[21]),
    self.as_array()[22].max(other.as_array()[22]),
    self.as_array()[23].max(other.as_array()[23]),
    self.as_array()[24].max(other.as_array()[24]),
    self.as_array()[25].max(other.as_array()[25]),
    self.as_array()[26].max(other.as_array()[26]),
    self.as_array()[27].max(other.as_array()[27]),
    self.as_array()[28].max(other.as_array()[28]),
    self.as_array()[29].max(other.as_array()[29]),
    self.as_array()[30].max(other.as_array()[30]),
    self.as_array()[31].max(other.as_array()[31]),
])Avx2
- 
VPMAXSB ymm, ymm, ymm
 
sourcefn min(&self, other: I8x32) -> I8x32
 
fn min(&self, other: I8x32) -> I8x32
Scalar Equivalent:
I8x32::from([
    self.as_array()[0].min(other.as_array()[0]),
    self.as_array()[1].min(other.as_array()[1]),
    self.as_array()[2].min(other.as_array()[2]),
    self.as_array()[3].min(other.as_array()[3]),
    self.as_array()[4].min(other.as_array()[4]),
    self.as_array()[5].min(other.as_array()[5]),
    self.as_array()[6].min(other.as_array()[6]),
    self.as_array()[7].min(other.as_array()[7]),
    self.as_array()[8].min(other.as_array()[8]),
    self.as_array()[9].min(other.as_array()[9]),
    self.as_array()[10].min(other.as_array()[10]),
    self.as_array()[11].min(other.as_array()[11]),
    self.as_array()[12].min(other.as_array()[12]),
    self.as_array()[13].min(other.as_array()[13]),
    self.as_array()[14].min(other.as_array()[14]),
    self.as_array()[15].min(other.as_array()[15]),
    self.as_array()[16].min(other.as_array()[16]),
    self.as_array()[17].min(other.as_array()[17]),
    self.as_array()[18].min(other.as_array()[18]),
    self.as_array()[19].min(other.as_array()[19]),
    self.as_array()[20].min(other.as_array()[20]),
    self.as_array()[21].min(other.as_array()[21]),
    self.as_array()[22].min(other.as_array()[22]),
    self.as_array()[23].min(other.as_array()[23]),
    self.as_array()[24].min(other.as_array()[24]),
    self.as_array()[25].min(other.as_array()[25]),
    self.as_array()[26].min(other.as_array()[26]),
    self.as_array()[27].min(other.as_array()[27]),
    self.as_array()[28].min(other.as_array()[28]),
    self.as_array()[29].min(other.as_array()[29]),
    self.as_array()[30].min(other.as_array()[30]),
    self.as_array()[31].min(other.as_array()[31]),
])Avx2
- 
VPMINSB ymm, ymm, ymm
 
const ZERO: Self = _
type BroadcastLoInput = I8x16
sourceimpl SimdBase8 for I8x32
 
impl SimdBase8 for I8x32
sourcefn shift_bytes_left<const AMOUNT: usize>(&self) -> I8x32
 
fn shift_bytes_left<const AMOUNT: usize>(&self) -> I8x32
Scalar Equivalent:
let mut out = [0; 32];
for (out_lane, src_lane) in out
    .chunks_exact_mut(16)
    .zip(self.as_array().chunks_exact(16))
{
    out_lane[AMOUNT..].copy_from_slice(&src_lane[0..16 - AMOUNT]);
}
I8x32::from(out)Avx2
- 
VPSLLDQ ymm, ymm, imm8
 
sourcefn shift_bytes_right<const AMOUNT: usize>(&self) -> I8x32
 
fn shift_bytes_right<const AMOUNT: usize>(&self) -> I8x32
Scalar Equivalent:
let mut out = [0; 32];
for (out_lane, src_lane) in out
    .chunks_exact_mut(16)
    .zip(self.as_array().chunks_exact(16))
{
    out_lane[0..16 - AMOUNT].copy_from_slice(&src_lane[AMOUNT..]);
}
I8x32::from(out)Avx2
- 
VPSRLDQ ymm, ymm, imm8
 
sourcefn most_significant_bits(&self) -> u32
 
fn most_significant_bits(&self) -> u32
Scalar Equivalent:
let mut out: u32 = 0;
for (i, value) in self.as_array().iter().copied().enumerate() {
    out |= u32::from((value as u8) >> 7) << i;
}
outAvx2
- 
VPMOVMSKB r32, ymm
 
sourceimpl Sub<I8x32> for I8x32
 
impl Sub<I8x32> for I8x32
sourcefn sub(self, rhs: I8x32) -> I8x32
 
fn sub(self, rhs: I8x32) -> I8x32
Scalar Equivalent:
I8x32::from([
    self.as_array()[0].wrapping_sub(rhs.as_array()[0]),
    self.as_array()[1].wrapping_sub(rhs.as_array()[1]),
    self.as_array()[2].wrapping_sub(rhs.as_array()[2]),
    self.as_array()[3].wrapping_sub(rhs.as_array()[3]),
    self.as_array()[4].wrapping_sub(rhs.as_array()[4]),
    self.as_array()[5].wrapping_sub(rhs.as_array()[5]),
    self.as_array()[6].wrapping_sub(rhs.as_array()[6]),
    self.as_array()[7].wrapping_sub(rhs.as_array()[7]),
    self.as_array()[8].wrapping_sub(rhs.as_array()[8]),
    self.as_array()[9].wrapping_sub(rhs.as_array()[9]),
    self.as_array()[10].wrapping_sub(rhs.as_array()[10]),
    self.as_array()[11].wrapping_sub(rhs.as_array()[11]),
    self.as_array()[12].wrapping_sub(rhs.as_array()[12]),
    self.as_array()[13].wrapping_sub(rhs.as_array()[13]),
    self.as_array()[14].wrapping_sub(rhs.as_array()[14]),
    self.as_array()[15].wrapping_sub(rhs.as_array()[15]),
    self.as_array()[16].wrapping_sub(rhs.as_array()[16]),
    self.as_array()[17].wrapping_sub(rhs.as_array()[17]),
    self.as_array()[18].wrapping_sub(rhs.as_array()[18]),
    self.as_array()[19].wrapping_sub(rhs.as_array()[19]),
    self.as_array()[20].wrapping_sub(rhs.as_array()[20]),
    self.as_array()[21].wrapping_sub(rhs.as_array()[21]),
    self.as_array()[22].wrapping_sub(rhs.as_array()[22]),
    self.as_array()[23].wrapping_sub(rhs.as_array()[23]),
    self.as_array()[24].wrapping_sub(rhs.as_array()[24]),
    self.as_array()[25].wrapping_sub(rhs.as_array()[25]),
    self.as_array()[26].wrapping_sub(rhs.as_array()[26]),
    self.as_array()[27].wrapping_sub(rhs.as_array()[27]),
    self.as_array()[28].wrapping_sub(rhs.as_array()[28]),
    self.as_array()[29].wrapping_sub(rhs.as_array()[29]),
    self.as_array()[30].wrapping_sub(rhs.as_array()[30]),
    self.as_array()[31].wrapping_sub(rhs.as_array()[31]),
])Avx2
- 
VPSUBB ymm, ymm, ymm
 
sourceimpl SubAssign<I8x32> for I8x32
 
impl SubAssign<I8x32> for I8x32
sourcefn sub_assign(&mut self, rhs: Self)
 
fn sub_assign(&mut self, rhs: Self)
-= operation. Read moreimpl Copy for I8x32
impl Eq for I8x32
impl Pod for I8x32
Auto Trait Implementations
impl RefUnwindSafe for I8x32
impl Send for I8x32
impl Sync for I8x32
impl Unpin for I8x32
impl UnwindSafe for I8x32
Blanket Implementations
sourceimpl<T> BorrowMut<T> for Twhere
    T: ?Sized,
 
impl<T> BorrowMut<T> for Twhere
    T: ?Sized,
const: unstable · sourcefn borrow_mut(&mut self) -> &mut T
 
fn borrow_mut(&mut self) -> &mut T
impl<T> CheckedBitPattern for Twhere
    T: AnyBitPattern,
impl<T> CheckedBitPattern for Twhere
    T: AnyBitPattern,
type Bits = T
type Bits = T
Self must have the same layout as the specified Bits except for
the possible invalid bit patterns being checked during
is_valid_bit_pattern. Read more