pub struct U16x8(/* private fields */);Expand description
[u16; 8] as a vector.
Implementations§
Source§impl U16x8
impl U16x8
Sourcepub const fn from_array(arr: [u16; 8]) -> Self
pub const fn from_array(arr: [u16; 8]) -> Self
Create a vector from an array.
Unlike the From trait function, the from_array function is const.
§Example
const MY_EXTREMELY_FUN_VALUE: U16x8 = U16x8::from_array([0, 1, 2, 3, 4, 5, 6, 7]);
for (i, value) in MY_EXTREMELY_FUN_VALUE.as_array().iter().copied().enumerate() {
assert_eq!(i as u16, value);
}Trait Implementations§
Source§impl Add for U16x8
impl Add for U16x8
Source§fn add(self, rhs: U16x8) -> U16x8
fn add(self, rhs: U16x8) -> U16x8
Perform a pairwise wrapping_add
§Scalar Equivalent
ⓘ
U16x8::from([
self.as_array()[0].wrapping_add(rhs.as_array()[0]),
self.as_array()[1].wrapping_add(rhs.as_array()[1]),
self.as_array()[2].wrapping_add(rhs.as_array()[2]),
self.as_array()[3].wrapping_add(rhs.as_array()[3]),
self.as_array()[4].wrapping_add(rhs.as_array()[4]),
self.as_array()[5].wrapping_add(rhs.as_array()[5]),
self.as_array()[6].wrapping_add(rhs.as_array()[6]),
self.as_array()[7].wrapping_add(rhs.as_array()[7]),
])§AVX2 Intrinsics Used
_mm_add_epi16PADDW xmm, xmm
§Neon Intrinsics Used
vaddq_u16- This intrinsic compiles to the following instructions:
Source§impl AddAssign for U16x8
impl AddAssign for U16x8
Source§fn add_assign(&mut self, other: U16x8)
fn add_assign(&mut self, other: U16x8)
Performs the
+= operation. Read moreSource§impl BitAnd for U16x8
impl BitAnd for U16x8
Source§fn bitand(self, rhs: U16x8) -> U16x8
fn bitand(self, rhs: U16x8) -> U16x8
Perform a pairwise bitwise and
§Scalar Equivalent
ⓘ
U16x8::from([
self.as_array()[0] & rhs.as_array()[0],
self.as_array()[1] & rhs.as_array()[1],
self.as_array()[2] & rhs.as_array()[2],
self.as_array()[3] & rhs.as_array()[3],
self.as_array()[4] & rhs.as_array()[4],
self.as_array()[5] & rhs.as_array()[5],
self.as_array()[6] & rhs.as_array()[6],
self.as_array()[7] & rhs.as_array()[7],
])§AVX2 Intrinsics Used
_mm_and_si128PAND xmm, xmm
§Neon Intrinsics Used
vandq_u16- This intrinsic compiles to the following instructions:
Source§impl BitAndAssign for U16x8
impl BitAndAssign for U16x8
Source§fn bitand_assign(&mut self, other: U16x8)
fn bitand_assign(&mut self, other: U16x8)
Performs the
&= operation. Read moreSource§impl BitOr for U16x8
impl BitOr for U16x8
Source§fn bitor(self, rhs: U16x8) -> U16x8
fn bitor(self, rhs: U16x8) -> U16x8
Perform a pairwise bitwise or
§Scalar Equivalent
ⓘ
U16x8::from([
self.as_array()[0] | rhs.as_array()[0],
self.as_array()[1] | rhs.as_array()[1],
self.as_array()[2] | rhs.as_array()[2],
self.as_array()[3] | rhs.as_array()[3],
self.as_array()[4] | rhs.as_array()[4],
self.as_array()[5] | rhs.as_array()[5],
self.as_array()[6] | rhs.as_array()[6],
self.as_array()[7] | rhs.as_array()[7],
])§AVX2 Intrinsics Used
_mm_or_si128POR xmm, xmm
§Neon Intrinsics Used
vorrq_u16- This intrinsic compiles to the following instructions:
Source§impl BitOrAssign for U16x8
impl BitOrAssign for U16x8
Source§fn bitor_assign(&mut self, other: U16x8)
fn bitor_assign(&mut self, other: U16x8)
Performs the
|= operation. Read moreSource§impl BitXor for U16x8
impl BitXor for U16x8
Source§fn bitxor(self, rhs: U16x8) -> U16x8
fn bitxor(self, rhs: U16x8) -> U16x8
Perform a pairwise bitwise xor
§Scalar Equivalent
ⓘ
U16x8::from([
self.as_array()[0] ^ rhs.as_array()[0],
self.as_array()[1] ^ rhs.as_array()[1],
self.as_array()[2] ^ rhs.as_array()[2],
self.as_array()[3] ^ rhs.as_array()[3],
self.as_array()[4] ^ rhs.as_array()[4],
self.as_array()[5] ^ rhs.as_array()[5],
self.as_array()[6] ^ rhs.as_array()[6],
self.as_array()[7] ^ rhs.as_array()[7],
])§AVX2 Intrinsics Used
_mm_xor_si128PXOR xmm, xmm
§Neon Intrinsics Used
veorq_u16- This intrinsic compiles to the following instructions:
Source§impl BitXorAssign for U16x8
impl BitXorAssign for U16x8
Source§fn bitxor_assign(&mut self, other: U16x8)
fn bitxor_assign(&mut self, other: U16x8)
Performs the
^= operation. Read moreSource§impl ConditionallySelectable for U16x8
impl ConditionallySelectable for U16x8
Source§fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
Source§fn conditional_assign(&mut self, other: &Self, choice: Choice)
fn conditional_assign(&mut self, other: &Self, choice: Choice)
Source§fn conditional_swap(a: &mut Self, b: &mut Self, choice: Choice)
fn conditional_swap(a: &mut Self, b: &mut Self, choice: Choice)
Conditionally swap
self and other if choice == 1; otherwise,
reassign both unto themselves. Read moreSource§impl ConstantTimeEq for U16x8
impl ConstantTimeEq for U16x8
Source§impl<'de> Deserialize<'de> for U16x8
impl<'de> Deserialize<'de> for U16x8
Source§fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>where
D: Deserializer<'de>,
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>where
D: Deserializer<'de>,
Deserialize this value from the given Serde deserializer. Read more
Source§impl Distribution<U16x8> for Standard
impl Distribution<U16x8> for Standard
Source§impl ExtendingCast<U16x8> for U32x4
impl ExtendingCast<U16x8> for U32x4
Source§impl ExtendingCast<U16x8> for U32x8
impl ExtendingCast<U16x8> for U32x8
Source§fn extending_cast_from(vector: U16x8) -> U32x8
fn extending_cast_from(vector: U16x8) -> U32x8
§Scalar Equivalent:
U32x8::from([
u32::from(vector.as_array()[0]),
u32::from(vector.as_array()[1]),
u32::from(vector.as_array()[2]),
u32::from(vector.as_array()[3]),
u32::from(vector.as_array()[4]),
u32::from(vector.as_array()[5]),
u32::from(vector.as_array()[6]),
u32::from(vector.as_array()[7]),
])§Avx2
-
VPMOVZXWD ymm, xmm
Source§impl ExtendingCast<U16x8> for U64x2
impl ExtendingCast<U16x8> for U64x2
Source§impl ExtendingCast<U16x8> for U64x4
impl ExtendingCast<U16x8> for U64x4
Source§impl ExtendingCast<U8x16> for U16x8
impl ExtendingCast<U8x16> for U16x8
Source§fn extending_cast_from(vector: U8x16) -> U16x8
fn extending_cast_from(vector: U8x16) -> U16x8
§Scalar Equivalent:
U16x8::from([
u16::from(vector.as_array()[0]),
u16::from(vector.as_array()[1]),
u16::from(vector.as_array()[2]),
u16::from(vector.as_array()[3]),
u16::from(vector.as_array()[4]),
u16::from(vector.as_array()[5]),
u16::from(vector.as_array()[6]),
u16::from(vector.as_array()[7]),
])§Avx2
-
PMOVZXBW xmm, xmm
Source§impl From<U16x8> for U16x16
impl From<U16x8> for U16x16
Source§fn from(vector: U16x8) -> U16x16
fn from(vector: U16x8) -> U16x16
NOTE: this will zero the upper bits of the destination. Other intrinsics are more effcient, but leave the upper bits undefined. At present, these more effcient intrinsics are not exposed.
§Scalar Equivalent:
let mut out = [0; 16];
out[0..8].copy_from_slice(&vector.as_array());
U16x16::from(out)§Avx2
Source§impl From<U16x8> for U32x8
impl From<U16x8> for U32x8
Source§fn from(vector: U16x8) -> U32x8
fn from(vector: U16x8) -> U32x8
§Scalar Equivalent:
U32x8::from([
u32::from(vector.as_array()[0]),
u32::from(vector.as_array()[1]),
u32::from(vector.as_array()[2]),
u32::from(vector.as_array()[3]),
u32::from(vector.as_array()[4]),
u32::from(vector.as_array()[5]),
u32::from(vector.as_array()[6]),
u32::from(vector.as_array()[7]),
])§Avx2
-
VPMOVZXWD ymm, xmm
Source§impl Shl<u64> for U16x8
impl Shl<u64> for U16x8
Source§fn shl(self, amount: u64) -> U16x8
fn shl(self, amount: u64) -> U16x8
§Scalar Equivalent:
if amount >= 16 {
U16x8::ZERO
} else {
U16x8::from([
self.as_array()[0] << amount,
self.as_array()[1] << amount,
self.as_array()[2] << amount,
self.as_array()[3] << amount,
self.as_array()[4] << amount,
self.as_array()[5] << amount,
self.as_array()[6] << amount,
self.as_array()[7] << amount,
])
}§Avx2
-
Instruction sequence.
-
PSLLW xmm, xmm
Source§impl ShlAssign<u64> for U16x8
impl ShlAssign<u64> for U16x8
Source§fn shl_assign(&mut self, amount: u64)
fn shl_assign(&mut self, amount: u64)
Performs the
<<= operation. Read moreSource§impl ShlAssign for U16x8
impl ShlAssign for U16x8
Source§fn shl_assign(&mut self, amount: U16x8)
fn shl_assign(&mut self, amount: U16x8)
Performs the
<<= operation. Read moreSource§impl Shr<u64> for U16x8
impl Shr<u64> for U16x8
Source§fn shr(self, amount: u64) -> U16x8
fn shr(self, amount: u64) -> U16x8
§Scalar Equivalent:
if amount >= 16 {
U16x8::ZERO
} else {
U16x8::from([
self.as_array()[0] >> amount,
self.as_array()[1] >> amount,
self.as_array()[2] >> amount,
self.as_array()[3] >> amount,
self.as_array()[4] >> amount,
self.as_array()[5] >> amount,
self.as_array()[6] >> amount,
self.as_array()[7] >> amount,
])
}§Avx2
-
Instruction sequence.
-
PSRLW xmm, xmm
Source§impl ShrAssign<u64> for U16x8
impl ShrAssign<u64> for U16x8
Source§fn shr_assign(&mut self, amount: u64)
fn shr_assign(&mut self, amount: u64)
Performs the
>>= operation. Read moreSource§impl ShrAssign for U16x8
impl ShrAssign for U16x8
Source§fn shr_assign(&mut self, amount: U16x8)
fn shr_assign(&mut self, amount: U16x8)
Performs the
>>= operation. Read moreSource§impl SimdBase for U16x8
impl SimdBase for U16x8
Source§fn set_lo(scalar: u16) -> U16x8
fn set_lo(scalar: u16) -> U16x8
Source§fn broadcast_lo(vector: U16x8) -> U16x8
fn broadcast_lo(vector: U16x8) -> U16x8
Source§fn cmp_eq(&self, other: U16x8) -> U16x8
fn cmp_eq(&self, other: U16x8) -> U16x8
§Scalar Equivalent:
U16x8::from([
if self.as_array()[0] == other.as_array()[0] { u16::MAX } else { 0 },
if self.as_array()[1] == other.as_array()[1] { u16::MAX } else { 0 },
if self.as_array()[2] == other.as_array()[2] { u16::MAX } else { 0 },
if self.as_array()[3] == other.as_array()[3] { u16::MAX } else { 0 },
if self.as_array()[4] == other.as_array()[4] { u16::MAX } else { 0 },
if self.as_array()[5] == other.as_array()[5] { u16::MAX } else { 0 },
if self.as_array()[6] == other.as_array()[6] { u16::MAX } else { 0 },
if self.as_array()[7] == other.as_array()[7] { u16::MAX } else { 0 },
])§Avx2
-
PCMPEQW xmm, xmm
Source§fn and_not(&self, other: U16x8) -> U16x8
fn and_not(&self, other: U16x8) -> U16x8
§Scalar Equivalent:
U16x8::from([
self.as_array()[0] & (!other.as_array()[0]),
self.as_array()[1] & (!other.as_array()[1]),
self.as_array()[2] & (!other.as_array()[2]),
self.as_array()[3] & (!other.as_array()[3]),
self.as_array()[4] & (!other.as_array()[4]),
self.as_array()[5] & (!other.as_array()[5]),
self.as_array()[6] & (!other.as_array()[6]),
self.as_array()[7] & (!other.as_array()[7]),
])§Avx2
-
PANDN xmm, xmm
Source§fn cmp_gt(&self, other: U16x8) -> U16x8
fn cmp_gt(&self, other: U16x8) -> U16x8
§Scalar Equivalent:
U16x8::from([
if self.as_array()[0] > other.as_array()[0] { u16::MAX } else { 0 },
if self.as_array()[1] > other.as_array()[1] { u16::MAX } else { 0 },
if self.as_array()[2] > other.as_array()[2] { u16::MAX } else { 0 },
if self.as_array()[3] > other.as_array()[3] { u16::MAX } else { 0 },
if self.as_array()[4] > other.as_array()[4] { u16::MAX } else { 0 },
if self.as_array()[5] > other.as_array()[5] { u16::MAX } else { 0 },
if self.as_array()[6] > other.as_array()[6] { u16::MAX } else { 0 },
if self.as_array()[7] > other.as_array()[7] { u16::MAX } else { 0 },
])§Avx2
NOTE: this implementation uses an efficient vector polyfill, though this operation is not natively supported.
ⓘ
// Based on https://stackoverflow.com/a/33173643 and https://git.io/JmghK
let sign_bit = Self::broadcast(1 << 15);
Self::from(I16x8::from(*self ^ sign_bit).cmp_gt(
I16x8::from(other ^ sign_bit)
))Source§fn shift_left<const BITS: usize>(&self) -> U16x8
fn shift_left<const BITS: usize>(&self) -> U16x8
Source§fn shift_right<const BITS: usize>(&self) -> U16x8
fn shift_right<const BITS: usize>(&self) -> U16x8
Source§fn unpack_lo(&self, other: U16x8) -> U16x8
fn unpack_lo(&self, other: U16x8) -> U16x8
Source§fn unpack_hi(&self, other: U16x8) -> U16x8
fn unpack_hi(&self, other: U16x8) -> U16x8
Source§fn max(&self, other: U16x8) -> U16x8
fn max(&self, other: U16x8) -> U16x8
§Scalar Equivalent:
U16x8::from([
self.as_array()[0].max(other.as_array()[0]),
self.as_array()[1].max(other.as_array()[1]),
self.as_array()[2].max(other.as_array()[2]),
self.as_array()[3].max(other.as_array()[3]),
self.as_array()[4].max(other.as_array()[4]),
self.as_array()[5].max(other.as_array()[5]),
self.as_array()[6].max(other.as_array()[6]),
self.as_array()[7].max(other.as_array()[7]),
])§Avx2
-
PMAXUW xmm, xmm
Source§fn min(&self, other: U16x8) -> U16x8
fn min(&self, other: U16x8) -> U16x8
§Scalar Equivalent:
U16x8::from([
self.as_array()[0].min(other.as_array()[0]),
self.as_array()[1].min(other.as_array()[1]),
self.as_array()[2].min(other.as_array()[2]),
self.as_array()[3].min(other.as_array()[3]),
self.as_array()[4].min(other.as_array()[4]),
self.as_array()[5].min(other.as_array()[5]),
self.as_array()[6].min(other.as_array()[6]),
self.as_array()[7].min(other.as_array()[7]),
])§Avx2
-
PMINUW xmm, xmm
Source§type BroadcastLoInput = U16x8
type BroadcastLoInput = U16x8
A vector of
[Self::Scalar; 128 / (8 * std::mem::size_of::<Self::Scalar>())]Source§impl SimdBase16 for U16x8
impl SimdBase16 for U16x8
Source§fn shuffle_lo<const I3: usize, const I2: usize, const I1: usize, const I0: usize>(
&self,
) -> U16x8
fn shuffle_lo<const I3: usize, const I2: usize, const I1: usize, const I0: usize>( &self, ) -> U16x8
§Scalar Equivalent:
U16x8::from([
// 128-bit Lane #0
self.as_array()[I0 + 0 * 8],
self.as_array()[I1 + 0 * 8],
self.as_array()[I2 + 0 * 8],
self.as_array()[I3 + 0 * 8],
self.as_array()[4 + 0 * 8],
self.as_array()[5 + 0 * 8],
self.as_array()[6 + 0 * 8],
self.as_array()[7 + 0 * 8],
])§Avx2
-
PSHUFLW xmm, xmm, imm8
Source§fn shuffle_hi<const I3: usize, const I2: usize, const I1: usize, const I0: usize>(
&self,
) -> U16x8
fn shuffle_hi<const I3: usize, const I2: usize, const I1: usize, const I0: usize>( &self, ) -> U16x8
§Scalar Equivalent:
U16x8::from([
// 128-bit Lane #0
self.as_array()[0 + 0 * 8],
self.as_array()[1 + 0 * 8],
self.as_array()[2 + 0 * 8],
self.as_array()[3 + 0 * 8],
self.as_array()[I0 + 4 + 0 * 8],
self.as_array()[I1 + 4 + 0 * 8],
self.as_array()[I2 + 4 + 0 * 8],
self.as_array()[I3 + 4 + 0 * 8],
])§Avx2
-
PSHUFHW xmm, xmm, imm8
Source§impl SimdBase8x for U16x8
impl SimdBase8x for U16x8
Source§fn blend<const B7: bool, const B6: bool, const B5: bool, const B4: bool, const B3: bool, const B2: bool, const B1: bool, const B0: bool>(
&self,
if_true: U16x8,
) -> U16x8
fn blend<const B7: bool, const B6: bool, const B5: bool, const B4: bool, const B3: bool, const B2: bool, const B1: bool, const B0: bool>( &self, if_true: U16x8, ) -> U16x8
§Scalar Equivalent:
U16x8::from([
(if B0 { if_true } else { *self }).as_array()[0],
(if B1 { if_true } else { *self }).as_array()[1],
(if B2 { if_true } else { *self }).as_array()[2],
(if B3 { if_true } else { *self }).as_array()[3],
(if B4 { if_true } else { *self }).as_array()[4],
(if B5 { if_true } else { *self }).as_array()[5],
(if B6 { if_true } else { *self }).as_array()[6],
(if B7 { if_true } else { *self }).as_array()[7],
])§Avx2
-
PBLENDW xmm, xmm, imm8
Source§impl SimdSaturatingArithmetic for U16x8
impl SimdSaturatingArithmetic for U16x8
Source§fn saturating_add(&self, other: U16x8) -> U16x8
fn saturating_add(&self, other: U16x8) -> U16x8
Source§impl Sub for U16x8
impl Sub for U16x8
Source§fn sub(self, rhs: U16x8) -> U16x8
fn sub(self, rhs: U16x8) -> U16x8
Perform a pairwise wrapping_sub
§Scalar Equivalent
ⓘ
U16x8::from([
self.as_array()[0].wrapping_sub(rhs.as_array()[0]),
self.as_array()[1].wrapping_sub(rhs.as_array()[1]),
self.as_array()[2].wrapping_sub(rhs.as_array()[2]),
self.as_array()[3].wrapping_sub(rhs.as_array()[3]),
self.as_array()[4].wrapping_sub(rhs.as_array()[4]),
self.as_array()[5].wrapping_sub(rhs.as_array()[5]),
self.as_array()[6].wrapping_sub(rhs.as_array()[6]),
self.as_array()[7].wrapping_sub(rhs.as_array()[7]),
])§AVX2 Intrinsics Used
_mm_sub_epi16PSUBW xmm, xmm
§Neon Intrinsics Used
vsubq_u16- This intrinsic compiles to the following instructions:
Source§impl SubAssign for U16x8
impl SubAssign for U16x8
Source§fn sub_assign(&mut self, other: U16x8)
fn sub_assign(&mut self, other: U16x8)
Performs the
-= operation. Read moreimpl Copy for U16x8
impl Eq for U16x8
impl Pod for U16x8
Auto Trait Implementations§
impl Freeze for U16x8
impl RefUnwindSafe for U16x8
impl Send for U16x8
impl Sync for U16x8
impl Unpin for U16x8
impl UnwindSafe for U16x8
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
§impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
§type Bits = T
type Bits = T
Self must have the same layout as the specified Bits except for
the possible invalid bit patterns being checked during
is_valid_bit_pattern.§fn is_valid_bit_pattern(_bits: &T) -> bool
fn is_valid_bit_pattern(_bits: &T) -> bool
If this function returns true, then it must be valid to reinterpret
bits
as &Self.