pub struct I16x16(/* private fields */);Expand description
[i16; 16] as a vector.
Implementations§
Source§impl I16x16
impl I16x16
Sourcepub const fn from_array(arr: [i16; 16]) -> Self
pub const fn from_array(arr: [i16; 16]) -> Self
Create a vector from an array.
Unlike the From trait function, the from_array function is const.
§Example
const MY_EXTREMELY_FUN_VALUE: I16x16 = I16x16::from_array([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
]);
for (i, value) in MY_EXTREMELY_FUN_VALUE.as_array().iter().copied().enumerate() {
assert_eq!(i as i16, value);
}Trait Implementations§
Source§impl Add for I16x16
impl Add for I16x16
Source§fn add(self, rhs: I16x16) -> I16x16
fn add(self, rhs: I16x16) -> I16x16
Perform a pairwise wrapping_add
§Scalar Equivalent
ⓘ
I16x16::from([
self.as_array()[0].wrapping_add(rhs.as_array()[0]),
self.as_array()[1].wrapping_add(rhs.as_array()[1]),
self.as_array()[2].wrapping_add(rhs.as_array()[2]),
self.as_array()[3].wrapping_add(rhs.as_array()[3]),
self.as_array()[4].wrapping_add(rhs.as_array()[4]),
self.as_array()[5].wrapping_add(rhs.as_array()[5]),
self.as_array()[6].wrapping_add(rhs.as_array()[6]),
self.as_array()[7].wrapping_add(rhs.as_array()[7]),
self.as_array()[8].wrapping_add(rhs.as_array()[8]),
self.as_array()[9].wrapping_add(rhs.as_array()[9]),
self.as_array()[10].wrapping_add(rhs.as_array()[10]),
self.as_array()[11].wrapping_add(rhs.as_array()[11]),
self.as_array()[12].wrapping_add(rhs.as_array()[12]),
self.as_array()[13].wrapping_add(rhs.as_array()[13]),
self.as_array()[14].wrapping_add(rhs.as_array()[14]),
self.as_array()[15].wrapping_add(rhs.as_array()[15]),
])§AVX2 Intrinsics Used
_mm256_add_epi16VPADDW ymm, ymm, ymm
§Neon Intrinsics Used
vaddq_s16- This intrinsic compiles to the following instructions:
Source§impl AddAssign for I16x16
impl AddAssign for I16x16
Source§fn add_assign(&mut self, other: I16x16)
fn add_assign(&mut self, other: I16x16)
Performs the
+= operation. Read moreSource§impl BitAnd for I16x16
impl BitAnd for I16x16
Source§fn bitand(self, rhs: I16x16) -> I16x16
fn bitand(self, rhs: I16x16) -> I16x16
Perform a pairwise bitwise and
§Scalar Equivalent
ⓘ
I16x16::from([
self.as_array()[0] & rhs.as_array()[0],
self.as_array()[1] & rhs.as_array()[1],
self.as_array()[2] & rhs.as_array()[2],
self.as_array()[3] & rhs.as_array()[3],
self.as_array()[4] & rhs.as_array()[4],
self.as_array()[5] & rhs.as_array()[5],
self.as_array()[6] & rhs.as_array()[6],
self.as_array()[7] & rhs.as_array()[7],
self.as_array()[8] & rhs.as_array()[8],
self.as_array()[9] & rhs.as_array()[9],
self.as_array()[10] & rhs.as_array()[10],
self.as_array()[11] & rhs.as_array()[11],
self.as_array()[12] & rhs.as_array()[12],
self.as_array()[13] & rhs.as_array()[13],
self.as_array()[14] & rhs.as_array()[14],
self.as_array()[15] & rhs.as_array()[15],
])§AVX2 Intrinsics Used
_mm256_and_si256VPAND ymm, ymm, ymm
§Neon Intrinsics Used
vandq_s16- This intrinsic compiles to the following instructions:
Source§impl BitAndAssign for I16x16
impl BitAndAssign for I16x16
Source§fn bitand_assign(&mut self, other: I16x16)
fn bitand_assign(&mut self, other: I16x16)
Performs the
&= operation. Read moreSource§impl BitOr for I16x16
impl BitOr for I16x16
Source§fn bitor(self, rhs: I16x16) -> I16x16
fn bitor(self, rhs: I16x16) -> I16x16
Perform a pairwise bitwise or
§Scalar Equivalent
ⓘ
I16x16::from([
self.as_array()[0] | rhs.as_array()[0],
self.as_array()[1] | rhs.as_array()[1],
self.as_array()[2] | rhs.as_array()[2],
self.as_array()[3] | rhs.as_array()[3],
self.as_array()[4] | rhs.as_array()[4],
self.as_array()[5] | rhs.as_array()[5],
self.as_array()[6] | rhs.as_array()[6],
self.as_array()[7] | rhs.as_array()[7],
self.as_array()[8] | rhs.as_array()[8],
self.as_array()[9] | rhs.as_array()[9],
self.as_array()[10] | rhs.as_array()[10],
self.as_array()[11] | rhs.as_array()[11],
self.as_array()[12] | rhs.as_array()[12],
self.as_array()[13] | rhs.as_array()[13],
self.as_array()[14] | rhs.as_array()[14],
self.as_array()[15] | rhs.as_array()[15],
])§AVX2 Intrinsics Used
_mm256_or_si256VPOR ymm, ymm, ymm
§Neon Intrinsics Used
vorrq_s16- This intrinsic compiles to the following instructions:
Source§impl BitOrAssign for I16x16
impl BitOrAssign for I16x16
Source§fn bitor_assign(&mut self, other: I16x16)
fn bitor_assign(&mut self, other: I16x16)
Performs the
|= operation. Read moreSource§impl BitXor for I16x16
impl BitXor for I16x16
Source§fn bitxor(self, rhs: I16x16) -> I16x16
fn bitxor(self, rhs: I16x16) -> I16x16
Perform a pairwise bitwise xor
§Scalar Equivalent
ⓘ
I16x16::from([
self.as_array()[0] ^ rhs.as_array()[0],
self.as_array()[1] ^ rhs.as_array()[1],
self.as_array()[2] ^ rhs.as_array()[2],
self.as_array()[3] ^ rhs.as_array()[3],
self.as_array()[4] ^ rhs.as_array()[4],
self.as_array()[5] ^ rhs.as_array()[5],
self.as_array()[6] ^ rhs.as_array()[6],
self.as_array()[7] ^ rhs.as_array()[7],
self.as_array()[8] ^ rhs.as_array()[8],
self.as_array()[9] ^ rhs.as_array()[9],
self.as_array()[10] ^ rhs.as_array()[10],
self.as_array()[11] ^ rhs.as_array()[11],
self.as_array()[12] ^ rhs.as_array()[12],
self.as_array()[13] ^ rhs.as_array()[13],
self.as_array()[14] ^ rhs.as_array()[14],
self.as_array()[15] ^ rhs.as_array()[15],
])§AVX2 Intrinsics Used
_mm256_xor_si256VPXOR ymm, ymm, ymm
§Neon Intrinsics Used
veorq_s16- This intrinsic compiles to the following instructions:
Source§impl BitXorAssign for I16x16
impl BitXorAssign for I16x16
Source§fn bitxor_assign(&mut self, other: I16x16)
fn bitxor_assign(&mut self, other: I16x16)
Performs the
^= operation. Read moreSource§impl ConditionallySelectable for I16x16
impl ConditionallySelectable for I16x16
Source§fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
Source§fn conditional_assign(&mut self, other: &Self, choice: Choice)
fn conditional_assign(&mut self, other: &Self, choice: Choice)
Source§fn conditional_swap(a: &mut Self, b: &mut Self, choice: Choice)
fn conditional_swap(a: &mut Self, b: &mut Self, choice: Choice)
Conditionally swap
self and other if choice == 1; otherwise,
reassign both unto themselves. Read moreSource§impl ConstantTimeEq for I16x16
impl ConstantTimeEq for I16x16
Source§impl<'de> Deserialize<'de> for I16x16
impl<'de> Deserialize<'de> for I16x16
Source§fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>where
D: Deserializer<'de>,
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>where
D: Deserializer<'de>,
Deserialize this value from the given Serde deserializer. Read more
Source§impl Distribution<I16x16> for Standard
impl Distribution<I16x16> for Standard
Source§impl ExtendingCast<I8x16> for I16x16
impl ExtendingCast<I8x16> for I16x16
Source§fn extending_cast_from(vector: I8x16) -> I16x16
fn extending_cast_from(vector: I8x16) -> I16x16
§Scalar Equivalent:
I16x16::from([
i16::from(vector.as_array()[0]),
i16::from(vector.as_array()[1]),
i16::from(vector.as_array()[2]),
i16::from(vector.as_array()[3]),
i16::from(vector.as_array()[4]),
i16::from(vector.as_array()[5]),
i16::from(vector.as_array()[6]),
i16::from(vector.as_array()[7]),
i16::from(vector.as_array()[8]),
i16::from(vector.as_array()[9]),
i16::from(vector.as_array()[10]),
i16::from(vector.as_array()[11]),
i16::from(vector.as_array()[12]),
i16::from(vector.as_array()[13]),
i16::from(vector.as_array()[14]),
i16::from(vector.as_array()[15]),
])§Avx2
-
VPMOVSXBW ymm, xmm
Source§impl From<I16x8> for I16x16
impl From<I16x8> for I16x16
Source§fn from(vector: I16x8) -> I16x16
fn from(vector: I16x8) -> I16x16
NOTE: this will zero the upper bits of the destination. Other intrinsics are more effcient, but leave the upper bits undefined. At present, these more effcient intrinsics are not exposed.
§Scalar Equivalent:
let mut out = [0; 16];
out[0..8].copy_from_slice(&vector.as_array());
I16x16::from(out)§Avx2
Source§impl From<I8x16> for I16x16
impl From<I8x16> for I16x16
Source§fn from(vector: I8x16) -> I16x16
fn from(vector: I8x16) -> I16x16
§Scalar Equivalent:
I16x16::from([
i16::from(vector.as_array()[0]),
i16::from(vector.as_array()[1]),
i16::from(vector.as_array()[2]),
i16::from(vector.as_array()[3]),
i16::from(vector.as_array()[4]),
i16::from(vector.as_array()[5]),
i16::from(vector.as_array()[6]),
i16::from(vector.as_array()[7]),
i16::from(vector.as_array()[8]),
i16::from(vector.as_array()[9]),
i16::from(vector.as_array()[10]),
i16::from(vector.as_array()[11]),
i16::from(vector.as_array()[12]),
i16::from(vector.as_array()[13]),
i16::from(vector.as_array()[14]),
i16::from(vector.as_array()[15]),
])§Avx2
-
VPMOVSXBW ymm, xmm
Source§impl Shl<u64> for I16x16
impl Shl<u64> for I16x16
Source§fn shl(self, amount: u64) -> I16x16
fn shl(self, amount: u64) -> I16x16
§Scalar Equivalent:
if amount >= 16 {
I16x16::ZERO
} else {
I16x16::from([
self.as_array()[0] << amount,
self.as_array()[1] << amount,
self.as_array()[2] << amount,
self.as_array()[3] << amount,
self.as_array()[4] << amount,
self.as_array()[5] << amount,
self.as_array()[6] << amount,
self.as_array()[7] << amount,
self.as_array()[8] << amount,
self.as_array()[9] << amount,
self.as_array()[10] << amount,
self.as_array()[11] << amount,
self.as_array()[12] << amount,
self.as_array()[13] << amount,
self.as_array()[14] << amount,
self.as_array()[15] << amount,
])
}§Avx2
-
VPSLLW ymm, ymm, xmm
-
Instruction sequence.
Source§impl ShlAssign<u64> for I16x16
impl ShlAssign<u64> for I16x16
Source§fn shl_assign(&mut self, amount: u64)
fn shl_assign(&mut self, amount: u64)
Performs the
<<= operation. Read moreSource§impl ShlAssign for I16x16
impl ShlAssign for I16x16
Source§fn shl_assign(&mut self, amount: I16x16)
fn shl_assign(&mut self, amount: I16x16)
Performs the
<<= operation. Read moreSource§impl Shr<u64> for I16x16
impl Shr<u64> for I16x16
Source§fn shr(self, amount: u64) -> I16x16
fn shr(self, amount: u64) -> I16x16
§Scalar Equivalent:
if amount >= 16 {
let mut out = self.as_array();
for x in out.iter_mut() {
*x = if *x < 0 { -1 } else { 0 };
}
I16x16::from(out)
} else {
I16x16::from([
self.as_array()[0] >> amount,
self.as_array()[1] >> amount,
self.as_array()[2] >> amount,
self.as_array()[3] >> amount,
self.as_array()[4] >> amount,
self.as_array()[5] >> amount,
self.as_array()[6] >> amount,
self.as_array()[7] >> amount,
self.as_array()[8] >> amount,
self.as_array()[9] >> amount,
self.as_array()[10] >> amount,
self.as_array()[11] >> amount,
self.as_array()[12] >> amount,
self.as_array()[13] >> amount,
self.as_array()[14] >> amount,
self.as_array()[15] >> amount,
])
}§Avx2
-
VPSRAW ymm, ymm, xmm
-
Instruction sequence.
Source§impl Shr for I16x16
impl Shr for I16x16
Source§fn shr(self, amount: I16x16) -> I16x16
fn shr(self, amount: I16x16) -> I16x16
§Scalar Equivalent:
let mut out = self.as_array();
for (x, amm) in out.iter_mut().zip(amount.as_array().iter().copied()) {
*x = if (0..16).contains(&amm) {
*x >> amm
} else if *x < 0 {
-1
} else {
0
};
}
I16x16::from(out)§Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
Source§impl ShrAssign<u64> for I16x16
impl ShrAssign<u64> for I16x16
Source§fn shr_assign(&mut self, amount: u64)
fn shr_assign(&mut self, amount: u64)
Performs the
>>= operation. Read moreSource§impl ShrAssign for I16x16
impl ShrAssign for I16x16
Source§fn shr_assign(&mut self, amount: I16x16)
fn shr_assign(&mut self, amount: I16x16)
Performs the
>>= operation. Read moreSource§impl SimdBase for I16x16
impl SimdBase for I16x16
Source§fn is_zero(&self) -> bool
fn is_zero(&self) -> bool
Source§fn set_lo(scalar: i16) -> I16x16
fn set_lo(scalar: i16) -> I16x16
Source§fn broadcast_lo(vector: I16x8) -> I16x16
fn broadcast_lo(vector: I16x8) -> I16x16
Source§fn cmp_eq(&self, other: I16x16) -> I16x16
fn cmp_eq(&self, other: I16x16) -> I16x16
§Scalar Equivalent:
I16x16::from([
if self.as_array()[0] == other.as_array()[0] { -1 } else { 0 },
if self.as_array()[1] == other.as_array()[1] { -1 } else { 0 },
if self.as_array()[2] == other.as_array()[2] { -1 } else { 0 },
if self.as_array()[3] == other.as_array()[3] { -1 } else { 0 },
if self.as_array()[4] == other.as_array()[4] { -1 } else { 0 },
if self.as_array()[5] == other.as_array()[5] { -1 } else { 0 },
if self.as_array()[6] == other.as_array()[6] { -1 } else { 0 },
if self.as_array()[7] == other.as_array()[7] { -1 } else { 0 },
if self.as_array()[8] == other.as_array()[8] { -1 } else { 0 },
if self.as_array()[9] == other.as_array()[9] { -1 } else { 0 },
if self.as_array()[10] == other.as_array()[10] { -1 } else { 0 },
if self.as_array()[11] == other.as_array()[11] { -1 } else { 0 },
if self.as_array()[12] == other.as_array()[12] { -1 } else { 0 },
if self.as_array()[13] == other.as_array()[13] { -1 } else { 0 },
if self.as_array()[14] == other.as_array()[14] { -1 } else { 0 },
if self.as_array()[15] == other.as_array()[15] { -1 } else { 0 },
])§Avx2
-
VPCMPEQW ymm, ymm, ymm
Source§fn and_not(&self, other: I16x16) -> I16x16
fn and_not(&self, other: I16x16) -> I16x16
§Scalar Equivalent:
I16x16::from([
self.as_array()[0] & (!other.as_array()[0]),
self.as_array()[1] & (!other.as_array()[1]),
self.as_array()[2] & (!other.as_array()[2]),
self.as_array()[3] & (!other.as_array()[3]),
self.as_array()[4] & (!other.as_array()[4]),
self.as_array()[5] & (!other.as_array()[5]),
self.as_array()[6] & (!other.as_array()[6]),
self.as_array()[7] & (!other.as_array()[7]),
self.as_array()[8] & (!other.as_array()[8]),
self.as_array()[9] & (!other.as_array()[9]),
self.as_array()[10] & (!other.as_array()[10]),
self.as_array()[11] & (!other.as_array()[11]),
self.as_array()[12] & (!other.as_array()[12]),
self.as_array()[13] & (!other.as_array()[13]),
self.as_array()[14] & (!other.as_array()[14]),
self.as_array()[15] & (!other.as_array()[15]),
])§Avx2
-
VPANDN ymm, ymm, ymm
Source§fn cmp_gt(&self, other: I16x16) -> I16x16
fn cmp_gt(&self, other: I16x16) -> I16x16
§Scalar Equivalent:
I16x16::from([
if self.as_array()[0] > other.as_array()[0] { -1 } else { 0 },
if self.as_array()[1] > other.as_array()[1] { -1 } else { 0 },
if self.as_array()[2] > other.as_array()[2] { -1 } else { 0 },
if self.as_array()[3] > other.as_array()[3] { -1 } else { 0 },
if self.as_array()[4] > other.as_array()[4] { -1 } else { 0 },
if self.as_array()[5] > other.as_array()[5] { -1 } else { 0 },
if self.as_array()[6] > other.as_array()[6] { -1 } else { 0 },
if self.as_array()[7] > other.as_array()[7] { -1 } else { 0 },
if self.as_array()[8] > other.as_array()[8] { -1 } else { 0 },
if self.as_array()[9] > other.as_array()[9] { -1 } else { 0 },
if self.as_array()[10] > other.as_array()[10] { -1 } else { 0 },
if self.as_array()[11] > other.as_array()[11] { -1 } else { 0 },
if self.as_array()[12] > other.as_array()[12] { -1 } else { 0 },
if self.as_array()[13] > other.as_array()[13] { -1 } else { 0 },
if self.as_array()[14] > other.as_array()[14] { -1 } else { 0 },
if self.as_array()[15] > other.as_array()[15] { -1 } else { 0 },
])§Avx2
-
VPCMPGTW ymm, ymm, ymm
Source§fn shift_left<const BITS: usize>(&self) -> I16x16
fn shift_left<const BITS: usize>(&self) -> I16x16
Source§fn shift_right<const BITS: usize>(&self) -> I16x16
fn shift_right<const BITS: usize>(&self) -> I16x16
Source§fn unpack_lo(&self, other: I16x16) -> I16x16
fn unpack_lo(&self, other: I16x16) -> I16x16
§Scalar Equivalent:
I16x16::from([
// Lane# 0
self.as_array()[0],
other.as_array()[0],
self.as_array()[1],
other.as_array()[1],
self.as_array()[2],
other.as_array()[2],
self.as_array()[3],
other.as_array()[3],
// Lane# 1
self.as_array()[8],
other.as_array()[8],
self.as_array()[9],
other.as_array()[9],
self.as_array()[10],
other.as_array()[10],
self.as_array()[11],
other.as_array()[11],
])§Avx2
-
VPUNPCKLWD ymm, ymm, ymm
Source§fn unpack_hi(&self, other: I16x16) -> I16x16
fn unpack_hi(&self, other: I16x16) -> I16x16
§Scalar Equivalent:
I16x16::from([
// Lane# 0
self.as_array()[4],
other.as_array()[4],
self.as_array()[5],
other.as_array()[5],
self.as_array()[6],
other.as_array()[6],
self.as_array()[7],
other.as_array()[7],
// Lane# 1
self.as_array()[12],
other.as_array()[12],
self.as_array()[13],
other.as_array()[13],
self.as_array()[14],
other.as_array()[14],
self.as_array()[15],
other.as_array()[15],
])§Avx2
-
VPUNPCKHWD ymm, ymm, ymm
Source§fn max(&self, other: I16x16) -> I16x16
fn max(&self, other: I16x16) -> I16x16
§Scalar Equivalent:
I16x16::from([
self.as_array()[0].max(other.as_array()[0]),
self.as_array()[1].max(other.as_array()[1]),
self.as_array()[2].max(other.as_array()[2]),
self.as_array()[3].max(other.as_array()[3]),
self.as_array()[4].max(other.as_array()[4]),
self.as_array()[5].max(other.as_array()[5]),
self.as_array()[6].max(other.as_array()[6]),
self.as_array()[7].max(other.as_array()[7]),
self.as_array()[8].max(other.as_array()[8]),
self.as_array()[9].max(other.as_array()[9]),
self.as_array()[10].max(other.as_array()[10]),
self.as_array()[11].max(other.as_array()[11]),
self.as_array()[12].max(other.as_array()[12]),
self.as_array()[13].max(other.as_array()[13]),
self.as_array()[14].max(other.as_array()[14]),
self.as_array()[15].max(other.as_array()[15]),
])§Avx2
-
VPMAXSW ymm, ymm, ymm
Source§fn min(&self, other: I16x16) -> I16x16
fn min(&self, other: I16x16) -> I16x16
§Scalar Equivalent:
I16x16::from([
self.as_array()[0].min(other.as_array()[0]),
self.as_array()[1].min(other.as_array()[1]),
self.as_array()[2].min(other.as_array()[2]),
self.as_array()[3].min(other.as_array()[3]),
self.as_array()[4].min(other.as_array()[4]),
self.as_array()[5].min(other.as_array()[5]),
self.as_array()[6].min(other.as_array()[6]),
self.as_array()[7].min(other.as_array()[7]),
self.as_array()[8].min(other.as_array()[8]),
self.as_array()[9].min(other.as_array()[9]),
self.as_array()[10].min(other.as_array()[10]),
self.as_array()[11].min(other.as_array()[11]),
self.as_array()[12].min(other.as_array()[12]),
self.as_array()[13].min(other.as_array()[13]),
self.as_array()[14].min(other.as_array()[14]),
self.as_array()[15].min(other.as_array()[15]),
])§Avx2
-
VPMINSW ymm, ymm, ymm
Source§type BroadcastLoInput = I16x8
type BroadcastLoInput = I16x8
A vector of
[Self::Scalar; 128 / (8 * std::mem::size_of::<Self::Scalar>())]Source§impl SimdBase16 for I16x16
impl SimdBase16 for I16x16
Source§fn shuffle_lo<const I3: usize, const I2: usize, const I1: usize, const I0: usize>(
&self,
) -> I16x16
fn shuffle_lo<const I3: usize, const I2: usize, const I1: usize, const I0: usize>( &self, ) -> I16x16
§Scalar Equivalent:
I16x16::from([
// 128-bit Lane #0
self.as_array()[I0 + 0 * 8],
self.as_array()[I1 + 0 * 8],
self.as_array()[I2 + 0 * 8],
self.as_array()[I3 + 0 * 8],
self.as_array()[4 + 0 * 8],
self.as_array()[5 + 0 * 8],
self.as_array()[6 + 0 * 8],
self.as_array()[7 + 0 * 8],
// 128-bit Lane #1
self.as_array()[I0 + 1 * 8],
self.as_array()[I1 + 1 * 8],
self.as_array()[I2 + 1 * 8],
self.as_array()[I3 + 1 * 8],
self.as_array()[4 + 1 * 8],
self.as_array()[5 + 1 * 8],
self.as_array()[6 + 1 * 8],
self.as_array()[7 + 1 * 8],
])§Avx2
-
VPSHUFLW ymm, ymm, imm8
Source§fn shuffle_hi<const I3: usize, const I2: usize, const I1: usize, const I0: usize>(
&self,
) -> I16x16
fn shuffle_hi<const I3: usize, const I2: usize, const I1: usize, const I0: usize>( &self, ) -> I16x16
§Scalar Equivalent:
I16x16::from([
// 128-bit Lane #0
self.as_array()[0 + 0 * 8],
self.as_array()[1 + 0 * 8],
self.as_array()[2 + 0 * 8],
self.as_array()[3 + 0 * 8],
self.as_array()[I0 + 4 + 0 * 8],
self.as_array()[I1 + 4 + 0 * 8],
self.as_array()[I2 + 4 + 0 * 8],
self.as_array()[I3 + 4 + 0 * 8],
// 128-bit Lane #1
self.as_array()[0 + 1 * 8],
self.as_array()[1 + 1 * 8],
self.as_array()[2 + 1 * 8],
self.as_array()[3 + 1 * 8],
self.as_array()[I0 + 4 + 1 * 8],
self.as_array()[I1 + 4 + 1 * 8],
self.as_array()[I2 + 4 + 1 * 8],
self.as_array()[I3 + 4 + 1 * 8],
])§Avx2
-
VPSHUFHW ymm, ymm, imm8
Source§impl SimdSaturatingArithmetic for I16x16
impl SimdSaturatingArithmetic for I16x16
Source§fn saturating_add(&self, other: I16x16) -> I16x16
fn saturating_add(&self, other: I16x16) -> I16x16
Source§impl Sub for I16x16
impl Sub for I16x16
Source§fn sub(self, rhs: I16x16) -> I16x16
fn sub(self, rhs: I16x16) -> I16x16
Perform a pairwise wrapping_sub
§Scalar Equivalent
ⓘ
I16x16::from([
self.as_array()[0].wrapping_sub(rhs.as_array()[0]),
self.as_array()[1].wrapping_sub(rhs.as_array()[1]),
self.as_array()[2].wrapping_sub(rhs.as_array()[2]),
self.as_array()[3].wrapping_sub(rhs.as_array()[3]),
self.as_array()[4].wrapping_sub(rhs.as_array()[4]),
self.as_array()[5].wrapping_sub(rhs.as_array()[5]),
self.as_array()[6].wrapping_sub(rhs.as_array()[6]),
self.as_array()[7].wrapping_sub(rhs.as_array()[7]),
self.as_array()[8].wrapping_sub(rhs.as_array()[8]),
self.as_array()[9].wrapping_sub(rhs.as_array()[9]),
self.as_array()[10].wrapping_sub(rhs.as_array()[10]),
self.as_array()[11].wrapping_sub(rhs.as_array()[11]),
self.as_array()[12].wrapping_sub(rhs.as_array()[12]),
self.as_array()[13].wrapping_sub(rhs.as_array()[13]),
self.as_array()[14].wrapping_sub(rhs.as_array()[14]),
self.as_array()[15].wrapping_sub(rhs.as_array()[15]),
])§AVX2 Intrinsics Used
_mm256_sub_epi16VPSUBW ymm, ymm, ymm
§Neon Intrinsics Used
vsubq_s16- This intrinsic compiles to the following instructions:
Source§impl SubAssign for I16x16
impl SubAssign for I16x16
Source§fn sub_assign(&mut self, other: I16x16)
fn sub_assign(&mut self, other: I16x16)
Performs the
-= operation. Read moreimpl Copy for I16x16
impl Eq for I16x16
impl Pod for I16x16
Auto Trait Implementations§
impl Freeze for I16x16
impl RefUnwindSafe for I16x16
impl Send for I16x16
impl Sync for I16x16
impl Unpin for I16x16
impl UnwindSafe for I16x16
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
§impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
§type Bits = T
type Bits = T
Self must have the same layout as the specified Bits except for
the possible invalid bit patterns being checked during
is_valid_bit_pattern.§fn is_valid_bit_pattern(_bits: &T) -> bool
fn is_valid_bit_pattern(_bits: &T) -> bool
If this function returns true, then it must be valid to reinterpret
bits
as &Self.