pub struct I8x16(/* private fields */);Expand description
[i8; 16] as a vector.
Implementations§
Source§impl I8x16
impl I8x16
Sourcepub const fn from_array(arr: [i8; 16]) -> Self
pub const fn from_array(arr: [i8; 16]) -> Self
Create a vector from an array.
Unlike the From trait function, the from_array function is const.
§Example
const MY_EXTREMELY_FUN_VALUE: I8x16 = I8x16::from_array([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
]);
for (i, value) in MY_EXTREMELY_FUN_VALUE.as_array().iter().copied().enumerate() {
assert_eq!(i as i8, value);
}Source§impl I8x16
impl I8x16
Sourcepub fn shuffle(&self, order: U8x16) -> I8x16
pub fn shuffle(&self, order: U8x16) -> I8x16
§Scalar Equivalent:
let mut arr = [0; 16];
for (lane_dst, (lane_src, order)) in
arr.chunks_exact_mut(16).zip(
self.as_array().chunks_exact(16)
.zip(order.as_array().chunks_exact(16))
)
{
for (dst, idx) in lane_dst.iter_mut().zip(order) {
let idx = *idx;
*dst = if (idx >> 7) == 1 {
0
} else {
lane_src[(idx as usize) % 16]
};
}
}
arr.into()§Avx2
-
PSHUFB xmm, xmm
Trait Implementations§
Source§impl Add for I8x16
impl Add for I8x16
Source§fn add(self, rhs: I8x16) -> I8x16
fn add(self, rhs: I8x16) -> I8x16
Perform a pairwise wrapping_add
§Scalar Equivalent
ⓘ
I8x16::from([
self.as_array()[0].wrapping_add(rhs.as_array()[0]),
self.as_array()[1].wrapping_add(rhs.as_array()[1]),
self.as_array()[2].wrapping_add(rhs.as_array()[2]),
self.as_array()[3].wrapping_add(rhs.as_array()[3]),
self.as_array()[4].wrapping_add(rhs.as_array()[4]),
self.as_array()[5].wrapping_add(rhs.as_array()[5]),
self.as_array()[6].wrapping_add(rhs.as_array()[6]),
self.as_array()[7].wrapping_add(rhs.as_array()[7]),
self.as_array()[8].wrapping_add(rhs.as_array()[8]),
self.as_array()[9].wrapping_add(rhs.as_array()[9]),
self.as_array()[10].wrapping_add(rhs.as_array()[10]),
self.as_array()[11].wrapping_add(rhs.as_array()[11]),
self.as_array()[12].wrapping_add(rhs.as_array()[12]),
self.as_array()[13].wrapping_add(rhs.as_array()[13]),
self.as_array()[14].wrapping_add(rhs.as_array()[14]),
self.as_array()[15].wrapping_add(rhs.as_array()[15]),
])§AVX2 Intrinsics Used
_mm_add_epi8PADDB xmm, xmm
§Neon Intrinsics Used
vaddq_s8- This intrinsic compiles to the following instructions:
Source§impl AddAssign for I8x16
impl AddAssign for I8x16
Source§fn add_assign(&mut self, other: I8x16)
fn add_assign(&mut self, other: I8x16)
Performs the
+= operation. Read moreSource§impl BitAnd for I8x16
impl BitAnd for I8x16
Source§fn bitand(self, rhs: I8x16) -> I8x16
fn bitand(self, rhs: I8x16) -> I8x16
Perform a pairwise bitwise and
§Scalar Equivalent
ⓘ
I8x16::from([
self.as_array()[0] & rhs.as_array()[0],
self.as_array()[1] & rhs.as_array()[1],
self.as_array()[2] & rhs.as_array()[2],
self.as_array()[3] & rhs.as_array()[3],
self.as_array()[4] & rhs.as_array()[4],
self.as_array()[5] & rhs.as_array()[5],
self.as_array()[6] & rhs.as_array()[6],
self.as_array()[7] & rhs.as_array()[7],
self.as_array()[8] & rhs.as_array()[8],
self.as_array()[9] & rhs.as_array()[9],
self.as_array()[10] & rhs.as_array()[10],
self.as_array()[11] & rhs.as_array()[11],
self.as_array()[12] & rhs.as_array()[12],
self.as_array()[13] & rhs.as_array()[13],
self.as_array()[14] & rhs.as_array()[14],
self.as_array()[15] & rhs.as_array()[15],
])§AVX2 Intrinsics Used
_mm_and_si128PAND xmm, xmm
§Neon Intrinsics Used
vandq_s8- This intrinsic compiles to the following instructions:
Source§impl BitAndAssign for I8x16
impl BitAndAssign for I8x16
Source§fn bitand_assign(&mut self, other: I8x16)
fn bitand_assign(&mut self, other: I8x16)
Performs the
&= operation. Read moreSource§impl BitOr for I8x16
impl BitOr for I8x16
Source§fn bitor(self, rhs: I8x16) -> I8x16
fn bitor(self, rhs: I8x16) -> I8x16
Perform a pairwise bitwise or
§Scalar Equivalent
ⓘ
I8x16::from([
self.as_array()[0] | rhs.as_array()[0],
self.as_array()[1] | rhs.as_array()[1],
self.as_array()[2] | rhs.as_array()[2],
self.as_array()[3] | rhs.as_array()[3],
self.as_array()[4] | rhs.as_array()[4],
self.as_array()[5] | rhs.as_array()[5],
self.as_array()[6] | rhs.as_array()[6],
self.as_array()[7] | rhs.as_array()[7],
self.as_array()[8] | rhs.as_array()[8],
self.as_array()[9] | rhs.as_array()[9],
self.as_array()[10] | rhs.as_array()[10],
self.as_array()[11] | rhs.as_array()[11],
self.as_array()[12] | rhs.as_array()[12],
self.as_array()[13] | rhs.as_array()[13],
self.as_array()[14] | rhs.as_array()[14],
self.as_array()[15] | rhs.as_array()[15],
])§AVX2 Intrinsics Used
_mm_or_si128POR xmm, xmm
§Neon Intrinsics Used
vorrq_s8- This intrinsic compiles to the following instructions:
Source§impl BitOrAssign for I8x16
impl BitOrAssign for I8x16
Source§fn bitor_assign(&mut self, other: I8x16)
fn bitor_assign(&mut self, other: I8x16)
Performs the
|= operation. Read moreSource§impl BitXor for I8x16
impl BitXor for I8x16
Source§fn bitxor(self, rhs: I8x16) -> I8x16
fn bitxor(self, rhs: I8x16) -> I8x16
Perform a pairwise bitwise xor
§Scalar Equivalent
ⓘ
I8x16::from([
self.as_array()[0] ^ rhs.as_array()[0],
self.as_array()[1] ^ rhs.as_array()[1],
self.as_array()[2] ^ rhs.as_array()[2],
self.as_array()[3] ^ rhs.as_array()[3],
self.as_array()[4] ^ rhs.as_array()[4],
self.as_array()[5] ^ rhs.as_array()[5],
self.as_array()[6] ^ rhs.as_array()[6],
self.as_array()[7] ^ rhs.as_array()[7],
self.as_array()[8] ^ rhs.as_array()[8],
self.as_array()[9] ^ rhs.as_array()[9],
self.as_array()[10] ^ rhs.as_array()[10],
self.as_array()[11] ^ rhs.as_array()[11],
self.as_array()[12] ^ rhs.as_array()[12],
self.as_array()[13] ^ rhs.as_array()[13],
self.as_array()[14] ^ rhs.as_array()[14],
self.as_array()[15] ^ rhs.as_array()[15],
])§AVX2 Intrinsics Used
_mm_xor_si128PXOR xmm, xmm
§Neon Intrinsics Used
veorq_s8- This intrinsic compiles to the following instructions:
Source§impl BitXorAssign for I8x16
impl BitXorAssign for I8x16
Source§fn bitxor_assign(&mut self, other: I8x16)
fn bitxor_assign(&mut self, other: I8x16)
Performs the
^= operation. Read moreSource§impl ConditionallySelectable for I8x16
impl ConditionallySelectable for I8x16
Source§fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
Source§fn conditional_assign(&mut self, other: &Self, choice: Choice)
fn conditional_assign(&mut self, other: &Self, choice: Choice)
Source§fn conditional_swap(a: &mut Self, b: &mut Self, choice: Choice)
fn conditional_swap(a: &mut Self, b: &mut Self, choice: Choice)
Conditionally swap
self and other if choice == 1; otherwise,
reassign both unto themselves. Read moreSource§impl ConstantTimeEq for I8x16
impl ConstantTimeEq for I8x16
Source§impl<'de> Deserialize<'de> for I8x16
impl<'de> Deserialize<'de> for I8x16
Source§fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>where
D: Deserializer<'de>,
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>where
D: Deserializer<'de>,
Deserialize this value from the given Serde deserializer. Read more
Source§impl Distribution<I8x16> for Standard
impl Distribution<I8x16> for Standard
Source§impl ExtendingCast<I8x16> for I16x16
impl ExtendingCast<I8x16> for I16x16
Source§fn extending_cast_from(vector: I8x16) -> I16x16
fn extending_cast_from(vector: I8x16) -> I16x16
§Scalar Equivalent:
I16x16::from([
i16::from(vector.as_array()[0]),
i16::from(vector.as_array()[1]),
i16::from(vector.as_array()[2]),
i16::from(vector.as_array()[3]),
i16::from(vector.as_array()[4]),
i16::from(vector.as_array()[5]),
i16::from(vector.as_array()[6]),
i16::from(vector.as_array()[7]),
i16::from(vector.as_array()[8]),
i16::from(vector.as_array()[9]),
i16::from(vector.as_array()[10]),
i16::from(vector.as_array()[11]),
i16::from(vector.as_array()[12]),
i16::from(vector.as_array()[13]),
i16::from(vector.as_array()[14]),
i16::from(vector.as_array()[15]),
])§Avx2
-
VPMOVSXBW ymm, xmm
Source§impl ExtendingCast<I8x16> for I16x8
impl ExtendingCast<I8x16> for I16x8
Source§fn extending_cast_from(vector: I8x16) -> I16x8
fn extending_cast_from(vector: I8x16) -> I16x8
§Scalar Equivalent:
I16x8::from([
i16::from(vector.as_array()[0]),
i16::from(vector.as_array()[1]),
i16::from(vector.as_array()[2]),
i16::from(vector.as_array()[3]),
i16::from(vector.as_array()[4]),
i16::from(vector.as_array()[5]),
i16::from(vector.as_array()[6]),
i16::from(vector.as_array()[7]),
])§Avx2
-
PMOVSXBW xmm, xmm
Source§impl ExtendingCast<I8x16> for I32x4
impl ExtendingCast<I8x16> for I32x4
Source§impl ExtendingCast<I8x16> for I32x8
impl ExtendingCast<I8x16> for I32x8
Source§fn extending_cast_from(vector: I8x16) -> I32x8
fn extending_cast_from(vector: I8x16) -> I32x8
§Scalar Equivalent:
I32x8::from([
i32::from(vector.as_array()[0]),
i32::from(vector.as_array()[1]),
i32::from(vector.as_array()[2]),
i32::from(vector.as_array()[3]),
i32::from(vector.as_array()[4]),
i32::from(vector.as_array()[5]),
i32::from(vector.as_array()[6]),
i32::from(vector.as_array()[7]),
])§Avx2
-
VPMOVSXBD ymm, xmm
Source§impl ExtendingCast<I8x16> for I64x2
impl ExtendingCast<I8x16> for I64x2
Source§impl ExtendingCast<I8x16> for I64x4
impl ExtendingCast<I8x16> for I64x4
Source§impl From<I8x16> for I16x16
impl From<I8x16> for I16x16
Source§fn from(vector: I8x16) -> I16x16
fn from(vector: I8x16) -> I16x16
§Scalar Equivalent:
I16x16::from([
i16::from(vector.as_array()[0]),
i16::from(vector.as_array()[1]),
i16::from(vector.as_array()[2]),
i16::from(vector.as_array()[3]),
i16::from(vector.as_array()[4]),
i16::from(vector.as_array()[5]),
i16::from(vector.as_array()[6]),
i16::from(vector.as_array()[7]),
i16::from(vector.as_array()[8]),
i16::from(vector.as_array()[9]),
i16::from(vector.as_array()[10]),
i16::from(vector.as_array()[11]),
i16::from(vector.as_array()[12]),
i16::from(vector.as_array()[13]),
i16::from(vector.as_array()[14]),
i16::from(vector.as_array()[15]),
])§Avx2
-
VPMOVSXBW ymm, xmm
Source§impl From<I8x16> for I8x32
impl From<I8x16> for I8x32
Source§fn from(vector: I8x16) -> I8x32
fn from(vector: I8x16) -> I8x32
NOTE: this will zero the upper bits of the destination. Other intrinsics are more effcient, but leave the upper bits undefined. At present, these more effcient intrinsics are not exposed.
§Scalar Equivalent:
let mut out = [0; 32];
out[0..16].copy_from_slice(&vector.as_array());
I8x32::from(out)§Avx2
Source§impl Shl<u64> for I8x16
impl Shl<u64> for I8x16
Source§fn shl(self, amount: u64) -> I8x16
fn shl(self, amount: u64) -> I8x16
§Scalar Equivalent:
if amount >= 8 {
I8x16::ZERO
} else {
I8x16::from([
self.as_array()[0] << amount,
self.as_array()[1] << amount,
self.as_array()[2] << amount,
self.as_array()[3] << amount,
self.as_array()[4] << amount,
self.as_array()[5] << amount,
self.as_array()[6] << amount,
self.as_array()[7] << amount,
self.as_array()[8] << amount,
self.as_array()[9] << amount,
self.as_array()[10] << amount,
self.as_array()[11] << amount,
self.as_array()[12] << amount,
self.as_array()[13] << amount,
self.as_array()[14] << amount,
self.as_array()[15] << amount,
])
}§Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
Source§impl ShlAssign<u64> for I8x16
impl ShlAssign<u64> for I8x16
Source§fn shl_assign(&mut self, amount: u64)
fn shl_assign(&mut self, amount: u64)
Performs the
<<= operation. Read moreSource§impl ShlAssign for I8x16
impl ShlAssign for I8x16
Source§fn shl_assign(&mut self, amount: I8x16)
fn shl_assign(&mut self, amount: I8x16)
Performs the
<<= operation. Read moreSource§impl Shr<u64> for I8x16
impl Shr<u64> for I8x16
Source§fn shr(self, amount: u64) -> I8x16
fn shr(self, amount: u64) -> I8x16
§Scalar Equivalent:
if amount >= 8 {
let mut out = self.as_array();
for x in out.iter_mut() {
*x = if *x < 0 { -1 } else { 0 };
}
I8x16::from(out)
} else {
I8x16::from([
self.as_array()[0] >> amount,
self.as_array()[1] >> amount,
self.as_array()[2] >> amount,
self.as_array()[3] >> amount,
self.as_array()[4] >> amount,
self.as_array()[5] >> amount,
self.as_array()[6] >> amount,
self.as_array()[7] >> amount,
self.as_array()[8] >> amount,
self.as_array()[9] >> amount,
self.as_array()[10] >> amount,
self.as_array()[11] >> amount,
self.as_array()[12] >> amount,
self.as_array()[13] >> amount,
self.as_array()[14] >> amount,
self.as_array()[15] >> amount,
])
}§Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
Source§impl Shr for I8x16
impl Shr for I8x16
Source§fn shr(self, amount: I8x16) -> I8x16
fn shr(self, amount: I8x16) -> I8x16
§Scalar Equivalent:
let mut out = self.as_array();
for (x, amm) in out.iter_mut().zip(amount.as_array().iter().copied()) {
*x = if (0..8).contains(&amm) {
*x >> amm
} else if *x < 0 {
-1
} else {
0
};
}
I8x16::from(out)§Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
Source§impl ShrAssign<u64> for I8x16
impl ShrAssign<u64> for I8x16
Source§fn shr_assign(&mut self, amount: u64)
fn shr_assign(&mut self, amount: u64)
Performs the
>>= operation. Read moreSource§impl ShrAssign for I8x16
impl ShrAssign for I8x16
Source§fn shr_assign(&mut self, amount: I8x16)
fn shr_assign(&mut self, amount: I8x16)
Performs the
>>= operation. Read moreSource§impl SimdBase for I8x16
impl SimdBase for I8x16
Source§fn set_lo(scalar: i8) -> I8x16
fn set_lo(scalar: i8) -> I8x16
Source§fn broadcast_lo(vector: I8x16) -> I8x16
fn broadcast_lo(vector: I8x16) -> I8x16
Source§fn cmp_eq(&self, other: I8x16) -> I8x16
fn cmp_eq(&self, other: I8x16) -> I8x16
§Scalar Equivalent:
I8x16::from([
if self.as_array()[0] == other.as_array()[0] { -1 } else { 0 },
if self.as_array()[1] == other.as_array()[1] { -1 } else { 0 },
if self.as_array()[2] == other.as_array()[2] { -1 } else { 0 },
if self.as_array()[3] == other.as_array()[3] { -1 } else { 0 },
if self.as_array()[4] == other.as_array()[4] { -1 } else { 0 },
if self.as_array()[5] == other.as_array()[5] { -1 } else { 0 },
if self.as_array()[6] == other.as_array()[6] { -1 } else { 0 },
if self.as_array()[7] == other.as_array()[7] { -1 } else { 0 },
if self.as_array()[8] == other.as_array()[8] { -1 } else { 0 },
if self.as_array()[9] == other.as_array()[9] { -1 } else { 0 },
if self.as_array()[10] == other.as_array()[10] { -1 } else { 0 },
if self.as_array()[11] == other.as_array()[11] { -1 } else { 0 },
if self.as_array()[12] == other.as_array()[12] { -1 } else { 0 },
if self.as_array()[13] == other.as_array()[13] { -1 } else { 0 },
if self.as_array()[14] == other.as_array()[14] { -1 } else { 0 },
if self.as_array()[15] == other.as_array()[15] { -1 } else { 0 },
])§Avx2
-
PCMPEQB xmm, xmm
Source§fn and_not(&self, other: I8x16) -> I8x16
fn and_not(&self, other: I8x16) -> I8x16
§Scalar Equivalent:
I8x16::from([
self.as_array()[0] & (!other.as_array()[0]),
self.as_array()[1] & (!other.as_array()[1]),
self.as_array()[2] & (!other.as_array()[2]),
self.as_array()[3] & (!other.as_array()[3]),
self.as_array()[4] & (!other.as_array()[4]),
self.as_array()[5] & (!other.as_array()[5]),
self.as_array()[6] & (!other.as_array()[6]),
self.as_array()[7] & (!other.as_array()[7]),
self.as_array()[8] & (!other.as_array()[8]),
self.as_array()[9] & (!other.as_array()[9]),
self.as_array()[10] & (!other.as_array()[10]),
self.as_array()[11] & (!other.as_array()[11]),
self.as_array()[12] & (!other.as_array()[12]),
self.as_array()[13] & (!other.as_array()[13]),
self.as_array()[14] & (!other.as_array()[14]),
self.as_array()[15] & (!other.as_array()[15]),
])§Avx2
-
PANDN xmm, xmm
Source§fn cmp_gt(&self, other: I8x16) -> I8x16
fn cmp_gt(&self, other: I8x16) -> I8x16
§Scalar Equivalent:
I8x16::from([
if self.as_array()[0] > other.as_array()[0] { -1 } else { 0 },
if self.as_array()[1] > other.as_array()[1] { -1 } else { 0 },
if self.as_array()[2] > other.as_array()[2] { -1 } else { 0 },
if self.as_array()[3] > other.as_array()[3] { -1 } else { 0 },
if self.as_array()[4] > other.as_array()[4] { -1 } else { 0 },
if self.as_array()[5] > other.as_array()[5] { -1 } else { 0 },
if self.as_array()[6] > other.as_array()[6] { -1 } else { 0 },
if self.as_array()[7] > other.as_array()[7] { -1 } else { 0 },
if self.as_array()[8] > other.as_array()[8] { -1 } else { 0 },
if self.as_array()[9] > other.as_array()[9] { -1 } else { 0 },
if self.as_array()[10] > other.as_array()[10] { -1 } else { 0 },
if self.as_array()[11] > other.as_array()[11] { -1 } else { 0 },
if self.as_array()[12] > other.as_array()[12] { -1 } else { 0 },
if self.as_array()[13] > other.as_array()[13] { -1 } else { 0 },
if self.as_array()[14] > other.as_array()[14] { -1 } else { 0 },
if self.as_array()[15] > other.as_array()[15] { -1 } else { 0 },
])§Avx2
-
PCMPGTB xmm, xmm
Source§fn shift_left<const BITS: usize>(&self) -> I8x16
fn shift_left<const BITS: usize>(&self) -> I8x16
Source§fn shift_right<const BITS: usize>(&self) -> I8x16
fn shift_right<const BITS: usize>(&self) -> I8x16
Source§fn unpack_lo(&self, other: I8x16) -> I8x16
fn unpack_lo(&self, other: I8x16) -> I8x16
§Scalar Equivalent:
I8x16::from([
// Lane# 0
self.as_array()[0],
other.as_array()[0],
self.as_array()[1],
other.as_array()[1],
self.as_array()[2],
other.as_array()[2],
self.as_array()[3],
other.as_array()[3],
self.as_array()[4],
other.as_array()[4],
self.as_array()[5],
other.as_array()[5],
self.as_array()[6],
other.as_array()[6],
self.as_array()[7],
other.as_array()[7],
])§Avx2
-
PUNPCKLBW xmm, xmm
Source§fn unpack_hi(&self, other: I8x16) -> I8x16
fn unpack_hi(&self, other: I8x16) -> I8x16
§Scalar Equivalent:
I8x16::from([
// Lane# 0
self.as_array()[8],
other.as_array()[8],
self.as_array()[9],
other.as_array()[9],
self.as_array()[10],
other.as_array()[10],
self.as_array()[11],
other.as_array()[11],
self.as_array()[12],
other.as_array()[12],
self.as_array()[13],
other.as_array()[13],
self.as_array()[14],
other.as_array()[14],
self.as_array()[15],
other.as_array()[15],
])§Avx2
-
PUNPCKHBW xmm, xmm
Source§fn max(&self, other: I8x16) -> I8x16
fn max(&self, other: I8x16) -> I8x16
§Scalar Equivalent:
I8x16::from([
self.as_array()[0].max(other.as_array()[0]),
self.as_array()[1].max(other.as_array()[1]),
self.as_array()[2].max(other.as_array()[2]),
self.as_array()[3].max(other.as_array()[3]),
self.as_array()[4].max(other.as_array()[4]),
self.as_array()[5].max(other.as_array()[5]),
self.as_array()[6].max(other.as_array()[6]),
self.as_array()[7].max(other.as_array()[7]),
self.as_array()[8].max(other.as_array()[8]),
self.as_array()[9].max(other.as_array()[9]),
self.as_array()[10].max(other.as_array()[10]),
self.as_array()[11].max(other.as_array()[11]),
self.as_array()[12].max(other.as_array()[12]),
self.as_array()[13].max(other.as_array()[13]),
self.as_array()[14].max(other.as_array()[14]),
self.as_array()[15].max(other.as_array()[15]),
])§Avx2
-
PMAXSB xmm, xmm
Source§fn min(&self, other: I8x16) -> I8x16
fn min(&self, other: I8x16) -> I8x16
§Scalar Equivalent:
I8x16::from([
self.as_array()[0].min(other.as_array()[0]),
self.as_array()[1].min(other.as_array()[1]),
self.as_array()[2].min(other.as_array()[2]),
self.as_array()[3].min(other.as_array()[3]),
self.as_array()[4].min(other.as_array()[4]),
self.as_array()[5].min(other.as_array()[5]),
self.as_array()[6].min(other.as_array()[6]),
self.as_array()[7].min(other.as_array()[7]),
self.as_array()[8].min(other.as_array()[8]),
self.as_array()[9].min(other.as_array()[9]),
self.as_array()[10].min(other.as_array()[10]),
self.as_array()[11].min(other.as_array()[11]),
self.as_array()[12].min(other.as_array()[12]),
self.as_array()[13].min(other.as_array()[13]),
self.as_array()[14].min(other.as_array()[14]),
self.as_array()[15].min(other.as_array()[15]),
])§Avx2
-
PMINSB xmm, xmm
Source§type BroadcastLoInput = I8x16
type BroadcastLoInput = I8x16
A vector of
[Self::Scalar; 128 / (8 * std::mem::size_of::<Self::Scalar>())]Source§impl SimdBase8 for I8x16
impl SimdBase8 for I8x16
Source§fn shift_bytes_left<const AMOUNT: usize>(&self) -> I8x16
fn shift_bytes_left<const AMOUNT: usize>(&self) -> I8x16
Source§fn shift_bytes_right<const AMOUNT: usize>(&self) -> I8x16
fn shift_bytes_right<const AMOUNT: usize>(&self) -> I8x16
Source§impl SimdSaturatingArithmetic for I8x16
impl SimdSaturatingArithmetic for I8x16
Source§fn saturating_add(&self, other: I8x16) -> I8x16
fn saturating_add(&self, other: I8x16) -> I8x16
Source§impl Sub for I8x16
impl Sub for I8x16
Source§fn sub(self, rhs: I8x16) -> I8x16
fn sub(self, rhs: I8x16) -> I8x16
Perform a pairwise wrapping_sub
§Scalar Equivalent
ⓘ
I8x16::from([
self.as_array()[0].wrapping_sub(rhs.as_array()[0]),
self.as_array()[1].wrapping_sub(rhs.as_array()[1]),
self.as_array()[2].wrapping_sub(rhs.as_array()[2]),
self.as_array()[3].wrapping_sub(rhs.as_array()[3]),
self.as_array()[4].wrapping_sub(rhs.as_array()[4]),
self.as_array()[5].wrapping_sub(rhs.as_array()[5]),
self.as_array()[6].wrapping_sub(rhs.as_array()[6]),
self.as_array()[7].wrapping_sub(rhs.as_array()[7]),
self.as_array()[8].wrapping_sub(rhs.as_array()[8]),
self.as_array()[9].wrapping_sub(rhs.as_array()[9]),
self.as_array()[10].wrapping_sub(rhs.as_array()[10]),
self.as_array()[11].wrapping_sub(rhs.as_array()[11]),
self.as_array()[12].wrapping_sub(rhs.as_array()[12]),
self.as_array()[13].wrapping_sub(rhs.as_array()[13]),
self.as_array()[14].wrapping_sub(rhs.as_array()[14]),
self.as_array()[15].wrapping_sub(rhs.as_array()[15]),
])§AVX2 Intrinsics Used
_mm_sub_epi8PSUBB xmm, xmm
§Neon Intrinsics Used
vsubq_s8- This intrinsic compiles to the following instructions:
Source§impl SubAssign for I8x16
impl SubAssign for I8x16
Source§fn sub_assign(&mut self, other: I8x16)
fn sub_assign(&mut self, other: I8x16)
Performs the
-= operation. Read moreimpl Copy for I8x16
impl Eq for I8x16
impl Pod for I8x16
Auto Trait Implementations§
impl Freeze for I8x16
impl RefUnwindSafe for I8x16
impl Send for I8x16
impl Sync for I8x16
impl Unpin for I8x16
impl UnwindSafe for I8x16
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
§impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
§type Bits = T
type Bits = T
Self must have the same layout as the specified Bits except for
the possible invalid bit patterns being checked during
is_valid_bit_pattern.§fn is_valid_bit_pattern(_bits: &T) -> bool
fn is_valid_bit_pattern(_bits: &T) -> bool
If this function returns true, then it must be valid to reinterpret
bits
as &Self.