Struct vectoreyes::I8x16
source · [−]#[repr(transparent)]pub struct I8x16(_);
Expand description
[i8; 16]
as a vector.
Implementations
sourceimpl I8x16
impl I8x16
sourcepub const fn from_array(array: [i8; 16]) -> I8x16
pub const fn from_array(array: [i8; 16]) -> I8x16
Create a vector from an array.
Unlike the From
trait function, the from_array
function is const
.
Example
const MY_EXTREMELY_FUN_VALUE: I8x16 =
I8x16::from_array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
for (i, value) in MY_EXTREMELY_FUN_VALUE.as_array().iter().copied().enumerate() {
assert_eq!(i as i8, value);
}
Avx2
Trait Implementations
sourceimpl Add<I8x16> for I8x16
impl Add<I8x16> for I8x16
sourcefn add(self, rhs: I8x16) -> I8x16
fn add(self, rhs: I8x16) -> I8x16
Scalar Equivalent:
I8x16::from([
self.as_array()[0].wrapping_add(rhs.as_array()[0]),
self.as_array()[1].wrapping_add(rhs.as_array()[1]),
self.as_array()[2].wrapping_add(rhs.as_array()[2]),
self.as_array()[3].wrapping_add(rhs.as_array()[3]),
self.as_array()[4].wrapping_add(rhs.as_array()[4]),
self.as_array()[5].wrapping_add(rhs.as_array()[5]),
self.as_array()[6].wrapping_add(rhs.as_array()[6]),
self.as_array()[7].wrapping_add(rhs.as_array()[7]),
self.as_array()[8].wrapping_add(rhs.as_array()[8]),
self.as_array()[9].wrapping_add(rhs.as_array()[9]),
self.as_array()[10].wrapping_add(rhs.as_array()[10]),
self.as_array()[11].wrapping_add(rhs.as_array()[11]),
self.as_array()[12].wrapping_add(rhs.as_array()[12]),
self.as_array()[13].wrapping_add(rhs.as_array()[13]),
self.as_array()[14].wrapping_add(rhs.as_array()[14]),
self.as_array()[15].wrapping_add(rhs.as_array()[15]),
])
Avx2
-
PADDB xmm, xmm
sourceimpl AddAssign<I8x16> for I8x16
impl AddAssign<I8x16> for I8x16
sourcefn add_assign(&mut self, rhs: Self)
fn add_assign(&mut self, rhs: Self)
+=
operation. Read moresourceimpl BitAnd<I8x16> for I8x16
impl BitAnd<I8x16> for I8x16
sourcefn bitand(self, rhs: I8x16) -> I8x16
fn bitand(self, rhs: I8x16) -> I8x16
Scalar Equivalent:
I8x16::from([
self.as_array()[0] & rhs.as_array()[0],
self.as_array()[1] & rhs.as_array()[1],
self.as_array()[2] & rhs.as_array()[2],
self.as_array()[3] & rhs.as_array()[3],
self.as_array()[4] & rhs.as_array()[4],
self.as_array()[5] & rhs.as_array()[5],
self.as_array()[6] & rhs.as_array()[6],
self.as_array()[7] & rhs.as_array()[7],
self.as_array()[8] & rhs.as_array()[8],
self.as_array()[9] & rhs.as_array()[9],
self.as_array()[10] & rhs.as_array()[10],
self.as_array()[11] & rhs.as_array()[11],
self.as_array()[12] & rhs.as_array()[12],
self.as_array()[13] & rhs.as_array()[13],
self.as_array()[14] & rhs.as_array()[14],
self.as_array()[15] & rhs.as_array()[15],
])
Avx2
-
PAND xmm, xmm
sourceimpl BitAndAssign<I8x16> for I8x16
impl BitAndAssign<I8x16> for I8x16
sourcefn bitand_assign(&mut self, rhs: Self)
fn bitand_assign(&mut self, rhs: Self)
&=
operation. Read moresourceimpl BitOr<I8x16> for I8x16
impl BitOr<I8x16> for I8x16
sourcefn bitor(self, rhs: I8x16) -> I8x16
fn bitor(self, rhs: I8x16) -> I8x16
Scalar Equivalent:
I8x16::from([
self.as_array()[0] | rhs.as_array()[0],
self.as_array()[1] | rhs.as_array()[1],
self.as_array()[2] | rhs.as_array()[2],
self.as_array()[3] | rhs.as_array()[3],
self.as_array()[4] | rhs.as_array()[4],
self.as_array()[5] | rhs.as_array()[5],
self.as_array()[6] | rhs.as_array()[6],
self.as_array()[7] | rhs.as_array()[7],
self.as_array()[8] | rhs.as_array()[8],
self.as_array()[9] | rhs.as_array()[9],
self.as_array()[10] | rhs.as_array()[10],
self.as_array()[11] | rhs.as_array()[11],
self.as_array()[12] | rhs.as_array()[12],
self.as_array()[13] | rhs.as_array()[13],
self.as_array()[14] | rhs.as_array()[14],
self.as_array()[15] | rhs.as_array()[15],
])
Avx2
-
POR xmm, xmm
sourceimpl BitOrAssign<I8x16> for I8x16
impl BitOrAssign<I8x16> for I8x16
sourcefn bitor_assign(&mut self, rhs: Self)
fn bitor_assign(&mut self, rhs: Self)
|=
operation. Read moresourceimpl BitXor<I8x16> for I8x16
impl BitXor<I8x16> for I8x16
sourcefn bitxor(self, rhs: I8x16) -> I8x16
fn bitxor(self, rhs: I8x16) -> I8x16
Scalar Equivalent:
I8x16::from([
self.as_array()[0] ^ rhs.as_array()[0],
self.as_array()[1] ^ rhs.as_array()[1],
self.as_array()[2] ^ rhs.as_array()[2],
self.as_array()[3] ^ rhs.as_array()[3],
self.as_array()[4] ^ rhs.as_array()[4],
self.as_array()[5] ^ rhs.as_array()[5],
self.as_array()[6] ^ rhs.as_array()[6],
self.as_array()[7] ^ rhs.as_array()[7],
self.as_array()[8] ^ rhs.as_array()[8],
self.as_array()[9] ^ rhs.as_array()[9],
self.as_array()[10] ^ rhs.as_array()[10],
self.as_array()[11] ^ rhs.as_array()[11],
self.as_array()[12] ^ rhs.as_array()[12],
self.as_array()[13] ^ rhs.as_array()[13],
self.as_array()[14] ^ rhs.as_array()[14],
self.as_array()[15] ^ rhs.as_array()[15],
])
Avx2
-
PXOR xmm, xmm
sourceimpl BitXorAssign<I8x16> for I8x16
impl BitXorAssign<I8x16> for I8x16
sourcefn bitxor_assign(&mut self, rhs: Self)
fn bitxor_assign(&mut self, rhs: Self)
^=
operation. Read moresourceimpl ConditionallySelectable for I8x16
impl ConditionallySelectable for I8x16
sourcefn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
sourcefn conditional_assign(&mut self, other: &Self, choice: Choice)
fn conditional_assign(&mut self, other: &Self, choice: Choice)
sourceimpl ConstantTimeEq for I8x16
impl ConstantTimeEq for I8x16
sourceimpl ExtendingCast<I8x16> for I16x16
impl ExtendingCast<I8x16> for I16x16
sourcefn extending_cast_from(vector: I8x16) -> I16x16
fn extending_cast_from(vector: I8x16) -> I16x16
Scalar Equivalent:
I16x16::from([
i16::from(vector.as_array()[0]),
i16::from(vector.as_array()[1]),
i16::from(vector.as_array()[2]),
i16::from(vector.as_array()[3]),
i16::from(vector.as_array()[4]),
i16::from(vector.as_array()[5]),
i16::from(vector.as_array()[6]),
i16::from(vector.as_array()[7]),
i16::from(vector.as_array()[8]),
i16::from(vector.as_array()[9]),
i16::from(vector.as_array()[10]),
i16::from(vector.as_array()[11]),
i16::from(vector.as_array()[12]),
i16::from(vector.as_array()[13]),
i16::from(vector.as_array()[14]),
i16::from(vector.as_array()[15]),
])
Avx2
-
VPMOVSXBW ymm, xmm
sourceimpl ExtendingCast<I8x16> for I16x8
impl ExtendingCast<I8x16> for I16x8
sourcefn extending_cast_from(vector: I8x16) -> I16x8
fn extending_cast_from(vector: I8x16) -> I16x8
Scalar Equivalent:
I16x8::from([
i16::from(vector.as_array()[0]),
i16::from(vector.as_array()[1]),
i16::from(vector.as_array()[2]),
i16::from(vector.as_array()[3]),
i16::from(vector.as_array()[4]),
i16::from(vector.as_array()[5]),
i16::from(vector.as_array()[6]),
i16::from(vector.as_array()[7]),
])
Avx2
-
PMOVSXBW xmm, xmm
sourceimpl ExtendingCast<I8x16> for I32x4
impl ExtendingCast<I8x16> for I32x4
sourcefn extending_cast_from(vector: I8x16) -> I32x4
fn extending_cast_from(vector: I8x16) -> I32x4
Scalar Equivalent:
I32x4::from([
i32::from(vector.as_array()[0]),
i32::from(vector.as_array()[1]),
i32::from(vector.as_array()[2]),
i32::from(vector.as_array()[3]),
])
Avx2
-
PMOVSXBD xmm, xmm
sourceimpl ExtendingCast<I8x16> for I32x8
impl ExtendingCast<I8x16> for I32x8
sourcefn extending_cast_from(vector: I8x16) -> I32x8
fn extending_cast_from(vector: I8x16) -> I32x8
Scalar Equivalent:
I32x8::from([
i32::from(vector.as_array()[0]),
i32::from(vector.as_array()[1]),
i32::from(vector.as_array()[2]),
i32::from(vector.as_array()[3]),
i32::from(vector.as_array()[4]),
i32::from(vector.as_array()[5]),
i32::from(vector.as_array()[6]),
i32::from(vector.as_array()[7]),
])
Avx2
-
VPMOVSXBD ymm, xmm
sourceimpl ExtendingCast<I8x16> for I64x2
impl ExtendingCast<I8x16> for I64x2
sourcefn extending_cast_from(vector: I8x16) -> I64x2
fn extending_cast_from(vector: I8x16) -> I64x2
Scalar Equivalent:
I64x2::from([
i64::from(vector.as_array()[0]),
i64::from(vector.as_array()[1]),
])
Avx2
-
PMOVSXBQ xmm, xmm
sourceimpl ExtendingCast<I8x16> for I64x4
impl ExtendingCast<I8x16> for I64x4
sourcefn extending_cast_from(vector: I8x16) -> I64x4
fn extending_cast_from(vector: I8x16) -> I64x4
Scalar Equivalent:
I64x4::from([
i64::from(vector.as_array()[0]),
i64::from(vector.as_array()[1]),
i64::from(vector.as_array()[2]),
i64::from(vector.as_array()[3]),
])
Avx2
-
VPMOVSXBQ ymm, xmm
sourceimpl From<I8x16> for I16x16
impl From<I8x16> for I16x16
sourcefn from(vector: I8x16) -> I16x16
fn from(vector: I8x16) -> I16x16
Scalar Equivalent:
I16x16::from([
i16::from(vector.as_array()[0]),
i16::from(vector.as_array()[1]),
i16::from(vector.as_array()[2]),
i16::from(vector.as_array()[3]),
i16::from(vector.as_array()[4]),
i16::from(vector.as_array()[5]),
i16::from(vector.as_array()[6]),
i16::from(vector.as_array()[7]),
i16::from(vector.as_array()[8]),
i16::from(vector.as_array()[9]),
i16::from(vector.as_array()[10]),
i16::from(vector.as_array()[11]),
i16::from(vector.as_array()[12]),
i16::from(vector.as_array()[13]),
i16::from(vector.as_array()[14]),
i16::from(vector.as_array()[15]),
])
Avx2
-
VPMOVSXBW ymm, xmm
sourceimpl From<I8x16> for I8x32
impl From<I8x16> for I8x32
sourcefn from(vector: I8x16) -> I8x32
fn from(vector: I8x16) -> I8x32
NOTE: this will zero the upper bits of the destination. Other intrinsics are more effcient, but leave the upper bits undefined. At present, these more effcient intrinsics are not exposed.
Scalar Equivalent:
let mut out = [0; 32];
out[0..16].copy_from_slice(&vector.as_array());
I8x32::from(out)
Avx2
sourceimpl Shl<I8x16> for I8x16
impl Shl<I8x16> for I8x16
sourcefn shl(self, amount: I8x16) -> I8x16
fn shl(self, amount: I8x16) -> I8x16
Scalar Equivalent:
let mut out = self.as_array();
for (x, amm) in out.iter_mut().zip(amount.as_array().iter().copied()) {
*x = if amm >= 8 || amm < 0 {
0
} else {
*x << amm
};
}
I8x16::from(out)
Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourceimpl Shl<u64> for I8x16
impl Shl<u64> for I8x16
sourcefn shl(self, amount: u64) -> I8x16
fn shl(self, amount: u64) -> I8x16
Scalar Equivalent:
if amount >= 8 {
I8x16::ZERO
} else {
I8x16::from([
self.as_array()[0] << amount,
self.as_array()[1] << amount,
self.as_array()[2] << amount,
self.as_array()[3] << amount,
self.as_array()[4] << amount,
self.as_array()[5] << amount,
self.as_array()[6] << amount,
self.as_array()[7] << amount,
self.as_array()[8] << amount,
self.as_array()[9] << amount,
self.as_array()[10] << amount,
self.as_array()[11] << amount,
self.as_array()[12] << amount,
self.as_array()[13] << amount,
self.as_array()[14] << amount,
self.as_array()[15] << amount,
])
}
Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourceimpl ShlAssign<I8x16> for I8x16
impl ShlAssign<I8x16> for I8x16
sourcefn shl_assign(&mut self, amount: I8x16)
fn shl_assign(&mut self, amount: I8x16)
<<=
operation. Read moresourceimpl ShlAssign<u64> for I8x16
impl ShlAssign<u64> for I8x16
sourcefn shl_assign(&mut self, amount: u64)
fn shl_assign(&mut self, amount: u64)
<<=
operation. Read moresourceimpl Shr<I8x16> for I8x16
impl Shr<I8x16> for I8x16
sourcefn shr(self, amount: I8x16) -> I8x16
fn shr(self, amount: I8x16) -> I8x16
Scalar Equivalent:
let mut out = self.as_array();
for (x, amm) in out.iter_mut().zip(amount.as_array().iter().copied()) {
*x = if amm >= 8 || amm < 0 {
if *x < 0 { -1 } else { 0 }
} else {
*x >> amm
};
}
I8x16::from(out)
Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourceimpl Shr<u64> for I8x16
impl Shr<u64> for I8x16
sourcefn shr(self, amount: u64) -> I8x16
fn shr(self, amount: u64) -> I8x16
Scalar Equivalent:
if amount >= 8 {
let mut out = self.as_array();
for x in out.iter_mut() {
*x = if *x < 0 { -1 } else { 0 };
}
I8x16::from(out)
} else {
I8x16::from([
self.as_array()[0] >> amount,
self.as_array()[1] >> amount,
self.as_array()[2] >> amount,
self.as_array()[3] >> amount,
self.as_array()[4] >> amount,
self.as_array()[5] >> amount,
self.as_array()[6] >> amount,
self.as_array()[7] >> amount,
self.as_array()[8] >> amount,
self.as_array()[9] >> amount,
self.as_array()[10] >> amount,
self.as_array()[11] >> amount,
self.as_array()[12] >> amount,
self.as_array()[13] >> amount,
self.as_array()[14] >> amount,
self.as_array()[15] >> amount,
])
}
Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourceimpl ShrAssign<I8x16> for I8x16
impl ShrAssign<I8x16> for I8x16
sourcefn shr_assign(&mut self, amount: I8x16)
fn shr_assign(&mut self, amount: I8x16)
>>=
operation. Read moresourceimpl ShrAssign<u64> for I8x16
impl ShrAssign<u64> for I8x16
sourcefn shr_assign(&mut self, amount: u64)
fn shr_assign(&mut self, amount: u64)
>>=
operation. Read moresourceimpl SimdBase for I8x16
impl SimdBase for I8x16
sourcefn set_lo(scalar: i8) -> I8x16
fn set_lo(scalar: i8) -> I8x16
Scalar Equivalent:
let mut out = [0; 16];
out[0] = scalar;
I8x16::from(out)
Avx2
-
Instruction sequence.
sourcefn broadcast_lo(vector: I8x16) -> I8x16
fn broadcast_lo(vector: I8x16) -> I8x16
sourcefn cmp_eq(&self, other: I8x16) -> I8x16
fn cmp_eq(&self, other: I8x16) -> I8x16
Scalar Equivalent:
I8x16::from([
if self.as_array()[0] == other.as_array()[0] { -1 } else { 0 },
if self.as_array()[1] == other.as_array()[1] { -1 } else { 0 },
if self.as_array()[2] == other.as_array()[2] { -1 } else { 0 },
if self.as_array()[3] == other.as_array()[3] { -1 } else { 0 },
if self.as_array()[4] == other.as_array()[4] { -1 } else { 0 },
if self.as_array()[5] == other.as_array()[5] { -1 } else { 0 },
if self.as_array()[6] == other.as_array()[6] { -1 } else { 0 },
if self.as_array()[7] == other.as_array()[7] { -1 } else { 0 },
if self.as_array()[8] == other.as_array()[8] { -1 } else { 0 },
if self.as_array()[9] == other.as_array()[9] { -1 } else { 0 },
if self.as_array()[10] == other.as_array()[10] { -1 } else { 0 },
if self.as_array()[11] == other.as_array()[11] { -1 } else { 0 },
if self.as_array()[12] == other.as_array()[12] { -1 } else { 0 },
if self.as_array()[13] == other.as_array()[13] { -1 } else { 0 },
if self.as_array()[14] == other.as_array()[14] { -1 } else { 0 },
if self.as_array()[15] == other.as_array()[15] { -1 } else { 0 },
])
Avx2
-
PCMPEQB xmm, xmm
sourcefn and_not(&self, other: I8x16) -> I8x16
fn and_not(&self, other: I8x16) -> I8x16
Scalar Equivalent:
I8x16::from([
self.as_array()[0] & (!other.as_array()[0]),
self.as_array()[1] & (!other.as_array()[1]),
self.as_array()[2] & (!other.as_array()[2]),
self.as_array()[3] & (!other.as_array()[3]),
self.as_array()[4] & (!other.as_array()[4]),
self.as_array()[5] & (!other.as_array()[5]),
self.as_array()[6] & (!other.as_array()[6]),
self.as_array()[7] & (!other.as_array()[7]),
self.as_array()[8] & (!other.as_array()[8]),
self.as_array()[9] & (!other.as_array()[9]),
self.as_array()[10] & (!other.as_array()[10]),
self.as_array()[11] & (!other.as_array()[11]),
self.as_array()[12] & (!other.as_array()[12]),
self.as_array()[13] & (!other.as_array()[13]),
self.as_array()[14] & (!other.as_array()[14]),
self.as_array()[15] & (!other.as_array()[15]),
])
Avx2
-
PANDN xmm, xmm
sourcefn cmp_gt(&self, other: I8x16) -> I8x16
fn cmp_gt(&self, other: I8x16) -> I8x16
Scalar Equivalent:
I8x16::from([
if self.as_array()[0] > other.as_array()[0] { -1 } else { 0 },
if self.as_array()[1] > other.as_array()[1] { -1 } else { 0 },
if self.as_array()[2] > other.as_array()[2] { -1 } else { 0 },
if self.as_array()[3] > other.as_array()[3] { -1 } else { 0 },
if self.as_array()[4] > other.as_array()[4] { -1 } else { 0 },
if self.as_array()[5] > other.as_array()[5] { -1 } else { 0 },
if self.as_array()[6] > other.as_array()[6] { -1 } else { 0 },
if self.as_array()[7] > other.as_array()[7] { -1 } else { 0 },
if self.as_array()[8] > other.as_array()[8] { -1 } else { 0 },
if self.as_array()[9] > other.as_array()[9] { -1 } else { 0 },
if self.as_array()[10] > other.as_array()[10] { -1 } else { 0 },
if self.as_array()[11] > other.as_array()[11] { -1 } else { 0 },
if self.as_array()[12] > other.as_array()[12] { -1 } else { 0 },
if self.as_array()[13] > other.as_array()[13] { -1 } else { 0 },
if self.as_array()[14] > other.as_array()[14] { -1 } else { 0 },
if self.as_array()[15] > other.as_array()[15] { -1 } else { 0 },
])
Avx2
-
PCMPGTB xmm, xmm
sourcefn shift_left<const BITS: usize>(&self) -> I8x16
fn shift_left<const BITS: usize>(&self) -> I8x16
Scalar Equivalent:
let mut out = self.as_array();
for x in out.iter_mut() {
*x <<= BITS;
}
I8x16::from(out)
Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourcefn shift_right<const BITS: usize>(&self) -> I8x16
fn shift_right<const BITS: usize>(&self) -> I8x16
Scalar Equivalent:
let mut out = self.as_array();
for x in out.iter_mut() {
*x >>= BITS;
}
I8x16::from(out)
Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourcefn unpack_lo(&self, other: I8x16) -> I8x16
fn unpack_lo(&self, other: I8x16) -> I8x16
Scalar Equivalent:
I8x16::from([
// Lane# 0
self.as_array()[0],
other.as_array()[0],
self.as_array()[1],
other.as_array()[1],
self.as_array()[2],
other.as_array()[2],
self.as_array()[3],
other.as_array()[3],
self.as_array()[4],
other.as_array()[4],
self.as_array()[5],
other.as_array()[5],
self.as_array()[6],
other.as_array()[6],
self.as_array()[7],
other.as_array()[7],
])
Avx2
-
PUNPCKLBW xmm, xmm
sourcefn unpack_hi(&self, other: I8x16) -> I8x16
fn unpack_hi(&self, other: I8x16) -> I8x16
Scalar Equivalent:
I8x16::from([
// Lane# 0
self.as_array()[8],
other.as_array()[8],
self.as_array()[9],
other.as_array()[9],
self.as_array()[10],
other.as_array()[10],
self.as_array()[11],
other.as_array()[11],
self.as_array()[12],
other.as_array()[12],
self.as_array()[13],
other.as_array()[13],
self.as_array()[14],
other.as_array()[14],
self.as_array()[15],
other.as_array()[15],
])
Avx2
-
PUNPCKHBW xmm, xmm
sourcefn max(&self, other: I8x16) -> I8x16
fn max(&self, other: I8x16) -> I8x16
Scalar Equivalent:
I8x16::from([
self.as_array()[0].max(other.as_array()[0]),
self.as_array()[1].max(other.as_array()[1]),
self.as_array()[2].max(other.as_array()[2]),
self.as_array()[3].max(other.as_array()[3]),
self.as_array()[4].max(other.as_array()[4]),
self.as_array()[5].max(other.as_array()[5]),
self.as_array()[6].max(other.as_array()[6]),
self.as_array()[7].max(other.as_array()[7]),
self.as_array()[8].max(other.as_array()[8]),
self.as_array()[9].max(other.as_array()[9]),
self.as_array()[10].max(other.as_array()[10]),
self.as_array()[11].max(other.as_array()[11]),
self.as_array()[12].max(other.as_array()[12]),
self.as_array()[13].max(other.as_array()[13]),
self.as_array()[14].max(other.as_array()[14]),
self.as_array()[15].max(other.as_array()[15]),
])
Avx2
-
PMAXSB xmm, xmm
sourcefn min(&self, other: I8x16) -> I8x16
fn min(&self, other: I8x16) -> I8x16
Scalar Equivalent:
I8x16::from([
self.as_array()[0].min(other.as_array()[0]),
self.as_array()[1].min(other.as_array()[1]),
self.as_array()[2].min(other.as_array()[2]),
self.as_array()[3].min(other.as_array()[3]),
self.as_array()[4].min(other.as_array()[4]),
self.as_array()[5].min(other.as_array()[5]),
self.as_array()[6].min(other.as_array()[6]),
self.as_array()[7].min(other.as_array()[7]),
self.as_array()[8].min(other.as_array()[8]),
self.as_array()[9].min(other.as_array()[9]),
self.as_array()[10].min(other.as_array()[10]),
self.as_array()[11].min(other.as_array()[11]),
self.as_array()[12].min(other.as_array()[12]),
self.as_array()[13].min(other.as_array()[13]),
self.as_array()[14].min(other.as_array()[14]),
self.as_array()[15].min(other.as_array()[15]),
])
Avx2
-
PMINSB xmm, xmm
const ZERO: Self = _
type BroadcastLoInput = I8x16
sourceimpl SimdBase8 for I8x16
impl SimdBase8 for I8x16
sourcefn shift_bytes_left<const AMOUNT: usize>(&self) -> I8x16
fn shift_bytes_left<const AMOUNT: usize>(&self) -> I8x16
Scalar Equivalent:
let mut out = [0; 16];
for (out_lane, src_lane) in out
.chunks_exact_mut(16)
.zip(self.as_array().chunks_exact(16))
{
out_lane[AMOUNT..].copy_from_slice(&src_lane[0..16 - AMOUNT]);
}
I8x16::from(out)
Avx2
-
PSLLDQ xmm, imm8
sourcefn shift_bytes_right<const AMOUNT: usize>(&self) -> I8x16
fn shift_bytes_right<const AMOUNT: usize>(&self) -> I8x16
Scalar Equivalent:
let mut out = [0; 16];
for (out_lane, src_lane) in out
.chunks_exact_mut(16)
.zip(self.as_array().chunks_exact(16))
{
out_lane[0..16 - AMOUNT].copy_from_slice(&src_lane[AMOUNT..]);
}
I8x16::from(out)
Avx2
-
PSRLDQ xmm, imm8
sourcefn most_significant_bits(&self) -> u32
fn most_significant_bits(&self) -> u32
Scalar Equivalent:
let mut out: u32 = 0;
for (i, value) in self.as_array().iter().copied().enumerate() {
out |= u32::from((value as u8) >> 7) << i;
}
out
Avx2
-
PMOVMSKB r32, xmm
sourceimpl Sub<I8x16> for I8x16
impl Sub<I8x16> for I8x16
sourcefn sub(self, rhs: I8x16) -> I8x16
fn sub(self, rhs: I8x16) -> I8x16
Scalar Equivalent:
I8x16::from([
self.as_array()[0].wrapping_sub(rhs.as_array()[0]),
self.as_array()[1].wrapping_sub(rhs.as_array()[1]),
self.as_array()[2].wrapping_sub(rhs.as_array()[2]),
self.as_array()[3].wrapping_sub(rhs.as_array()[3]),
self.as_array()[4].wrapping_sub(rhs.as_array()[4]),
self.as_array()[5].wrapping_sub(rhs.as_array()[5]),
self.as_array()[6].wrapping_sub(rhs.as_array()[6]),
self.as_array()[7].wrapping_sub(rhs.as_array()[7]),
self.as_array()[8].wrapping_sub(rhs.as_array()[8]),
self.as_array()[9].wrapping_sub(rhs.as_array()[9]),
self.as_array()[10].wrapping_sub(rhs.as_array()[10]),
self.as_array()[11].wrapping_sub(rhs.as_array()[11]),
self.as_array()[12].wrapping_sub(rhs.as_array()[12]),
self.as_array()[13].wrapping_sub(rhs.as_array()[13]),
self.as_array()[14].wrapping_sub(rhs.as_array()[14]),
self.as_array()[15].wrapping_sub(rhs.as_array()[15]),
])
Avx2
-
PSUBB xmm, xmm
sourceimpl SubAssign<I8x16> for I8x16
impl SubAssign<I8x16> for I8x16
sourcefn sub_assign(&mut self, rhs: Self)
fn sub_assign(&mut self, rhs: Self)
-=
operation. Read moreimpl Copy for I8x16
impl Eq for I8x16
impl Pod for I8x16
Auto Trait Implementations
impl RefUnwindSafe for I8x16
impl Send for I8x16
impl Sync for I8x16
impl Unpin for I8x16
impl UnwindSafe for I8x16
Blanket Implementations
sourceimpl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
const: unstable · sourcefn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
type Bits = T
type Bits = T
Self
must have the same layout as the specified Bits
except for
the possible invalid bit patterns being checked during
is_valid_bit_pattern
. Read more