Struct vectoreyes::U16x8
source · [−]#[repr(transparent)]pub struct U16x8(_);
Expand description
[u16; 8]
as a vector.
Implementations
sourceimpl U16x8
impl U16x8
sourcepub const fn from_array(array: [u16; 8]) -> U16x8
pub const fn from_array(array: [u16; 8]) -> U16x8
Create a vector from an array.
Unlike the From
trait function, the from_array
function is const
.
Example
const MY_EXTREMELY_FUN_VALUE: U16x8 =
U16x8::from_array([0, 1, 2, 3, 4, 5, 6, 7]);
for (i, value) in MY_EXTREMELY_FUN_VALUE.as_array().iter().copied().enumerate() {
assert_eq!(i as u16, value);
}
Avx2
Trait Implementations
sourceimpl Add<U16x8> for U16x8
impl Add<U16x8> for U16x8
sourcefn add(self, rhs: U16x8) -> U16x8
fn add(self, rhs: U16x8) -> U16x8
Scalar Equivalent:
U16x8::from([
self.as_array()[0].wrapping_add(rhs.as_array()[0]),
self.as_array()[1].wrapping_add(rhs.as_array()[1]),
self.as_array()[2].wrapping_add(rhs.as_array()[2]),
self.as_array()[3].wrapping_add(rhs.as_array()[3]),
self.as_array()[4].wrapping_add(rhs.as_array()[4]),
self.as_array()[5].wrapping_add(rhs.as_array()[5]),
self.as_array()[6].wrapping_add(rhs.as_array()[6]),
self.as_array()[7].wrapping_add(rhs.as_array()[7]),
])
Avx2
-
PADDW xmm, xmm
sourceimpl AddAssign<U16x8> for U16x8
impl AddAssign<U16x8> for U16x8
sourcefn add_assign(&mut self, rhs: Self)
fn add_assign(&mut self, rhs: Self)
Performs the
+=
operation. Read moresourceimpl BitAnd<U16x8> for U16x8
impl BitAnd<U16x8> for U16x8
sourcefn bitand(self, rhs: U16x8) -> U16x8
fn bitand(self, rhs: U16x8) -> U16x8
Scalar Equivalent:
U16x8::from([
self.as_array()[0] & rhs.as_array()[0],
self.as_array()[1] & rhs.as_array()[1],
self.as_array()[2] & rhs.as_array()[2],
self.as_array()[3] & rhs.as_array()[3],
self.as_array()[4] & rhs.as_array()[4],
self.as_array()[5] & rhs.as_array()[5],
self.as_array()[6] & rhs.as_array()[6],
self.as_array()[7] & rhs.as_array()[7],
])
Avx2
-
PAND xmm, xmm
sourceimpl BitAndAssign<U16x8> for U16x8
impl BitAndAssign<U16x8> for U16x8
sourcefn bitand_assign(&mut self, rhs: Self)
fn bitand_assign(&mut self, rhs: Self)
Performs the
&=
operation. Read moresourceimpl BitOr<U16x8> for U16x8
impl BitOr<U16x8> for U16x8
sourcefn bitor(self, rhs: U16x8) -> U16x8
fn bitor(self, rhs: U16x8) -> U16x8
Scalar Equivalent:
U16x8::from([
self.as_array()[0] | rhs.as_array()[0],
self.as_array()[1] | rhs.as_array()[1],
self.as_array()[2] | rhs.as_array()[2],
self.as_array()[3] | rhs.as_array()[3],
self.as_array()[4] | rhs.as_array()[4],
self.as_array()[5] | rhs.as_array()[5],
self.as_array()[6] | rhs.as_array()[6],
self.as_array()[7] | rhs.as_array()[7],
])
Avx2
-
POR xmm, xmm
sourceimpl BitOrAssign<U16x8> for U16x8
impl BitOrAssign<U16x8> for U16x8
sourcefn bitor_assign(&mut self, rhs: Self)
fn bitor_assign(&mut self, rhs: Self)
Performs the
|=
operation. Read moresourceimpl BitXor<U16x8> for U16x8
impl BitXor<U16x8> for U16x8
sourcefn bitxor(self, rhs: U16x8) -> U16x8
fn bitxor(self, rhs: U16x8) -> U16x8
Scalar Equivalent:
U16x8::from([
self.as_array()[0] ^ rhs.as_array()[0],
self.as_array()[1] ^ rhs.as_array()[1],
self.as_array()[2] ^ rhs.as_array()[2],
self.as_array()[3] ^ rhs.as_array()[3],
self.as_array()[4] ^ rhs.as_array()[4],
self.as_array()[5] ^ rhs.as_array()[5],
self.as_array()[6] ^ rhs.as_array()[6],
self.as_array()[7] ^ rhs.as_array()[7],
])
Avx2
-
PXOR xmm, xmm
sourceimpl BitXorAssign<U16x8> for U16x8
impl BitXorAssign<U16x8> for U16x8
sourcefn bitxor_assign(&mut self, rhs: Self)
fn bitxor_assign(&mut self, rhs: Self)
Performs the
^=
operation. Read moresourceimpl ConditionallySelectable for U16x8
impl ConditionallySelectable for U16x8
sourcefn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
sourcefn conditional_assign(&mut self, other: &Self, choice: Choice)
fn conditional_assign(&mut self, other: &Self, choice: Choice)
sourceimpl ConstantTimeEq for U16x8
impl ConstantTimeEq for U16x8
sourceimpl ExtendingCast<U16x8> for U32x4
impl ExtendingCast<U16x8> for U32x4
sourcefn extending_cast_from(vector: U16x8) -> U32x4
fn extending_cast_from(vector: U16x8) -> U32x4
Scalar Equivalent:
U32x4::from([
u32::from(vector.as_array()[0]),
u32::from(vector.as_array()[1]),
u32::from(vector.as_array()[2]),
u32::from(vector.as_array()[3]),
])
Avx2
-
PMOVZXWD xmm, xmm
sourceimpl ExtendingCast<U16x8> for U32x8
impl ExtendingCast<U16x8> for U32x8
sourcefn extending_cast_from(vector: U16x8) -> U32x8
fn extending_cast_from(vector: U16x8) -> U32x8
Scalar Equivalent:
U32x8::from([
u32::from(vector.as_array()[0]),
u32::from(vector.as_array()[1]),
u32::from(vector.as_array()[2]),
u32::from(vector.as_array()[3]),
u32::from(vector.as_array()[4]),
u32::from(vector.as_array()[5]),
u32::from(vector.as_array()[6]),
u32::from(vector.as_array()[7]),
])
Avx2
-
VPMOVZXWD ymm, xmm
sourceimpl ExtendingCast<U16x8> for U64x2
impl ExtendingCast<U16x8> for U64x2
sourcefn extending_cast_from(vector: U16x8) -> U64x2
fn extending_cast_from(vector: U16x8) -> U64x2
Scalar Equivalent:
U64x2::from([
u64::from(vector.as_array()[0]),
u64::from(vector.as_array()[1]),
])
Avx2
-
PMOVZXWQ xmm, xmm
sourceimpl ExtendingCast<U16x8> for U64x4
impl ExtendingCast<U16x8> for U64x4
sourcefn extending_cast_from(vector: U16x8) -> U64x4
fn extending_cast_from(vector: U16x8) -> U64x4
Scalar Equivalent:
U64x4::from([
u64::from(vector.as_array()[0]),
u64::from(vector.as_array()[1]),
u64::from(vector.as_array()[2]),
u64::from(vector.as_array()[3]),
])
Avx2
-
VPMOVZXWQ ymm, xmm
sourceimpl ExtendingCast<U8x16> for U16x8
impl ExtendingCast<U8x16> for U16x8
sourcefn extending_cast_from(vector: U8x16) -> U16x8
fn extending_cast_from(vector: U8x16) -> U16x8
Scalar Equivalent:
U16x8::from([
u16::from(vector.as_array()[0]),
u16::from(vector.as_array()[1]),
u16::from(vector.as_array()[2]),
u16::from(vector.as_array()[3]),
u16::from(vector.as_array()[4]),
u16::from(vector.as_array()[5]),
u16::from(vector.as_array()[6]),
u16::from(vector.as_array()[7]),
])
Avx2
-
PMOVZXBW xmm, xmm
sourceimpl From<U16x8> for U16x16
impl From<U16x8> for U16x16
sourcefn from(vector: U16x8) -> U16x16
fn from(vector: U16x8) -> U16x16
NOTE: this will zero the upper bits of the destination. Other intrinsics are more effcient, but leave the upper bits undefined. At present, these more effcient intrinsics are not exposed.
Scalar Equivalent:
let mut out = [0; 16];
out[0..8].copy_from_slice(&vector.as_array());
U16x16::from(out)
Avx2
sourceimpl From<U16x8> for U32x8
impl From<U16x8> for U32x8
sourcefn from(vector: U16x8) -> U32x8
fn from(vector: U16x8) -> U32x8
Scalar Equivalent:
U32x8::from([
u32::from(vector.as_array()[0]),
u32::from(vector.as_array()[1]),
u32::from(vector.as_array()[2]),
u32::from(vector.as_array()[3]),
u32::from(vector.as_array()[4]),
u32::from(vector.as_array()[5]),
u32::from(vector.as_array()[6]),
u32::from(vector.as_array()[7]),
])
Avx2
-
VPMOVZXWD ymm, xmm
sourceimpl Shl<U16x8> for U16x8
impl Shl<U16x8> for U16x8
sourcefn shl(self, amount: U16x8) -> U16x8
fn shl(self, amount: U16x8) -> U16x8
Scalar Equivalent:
let mut out = self.as_array();
for (x, amm) in out.iter_mut().zip(amount.as_array().iter().copied()) {
*x = if amm >= 16 {
0
} else {
*x << amm
};
}
U16x8::from(out)
Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourceimpl Shl<u64> for U16x8
impl Shl<u64> for U16x8
sourcefn shl(self, amount: u64) -> U16x8
fn shl(self, amount: u64) -> U16x8
Scalar Equivalent:
if amount >= 16 {
U16x8::ZERO
} else {
U16x8::from([
self.as_array()[0] << amount,
self.as_array()[1] << amount,
self.as_array()[2] << amount,
self.as_array()[3] << amount,
self.as_array()[4] << amount,
self.as_array()[5] << amount,
self.as_array()[6] << amount,
self.as_array()[7] << amount,
])
}
Avx2
-
Instruction sequence.
-
PSLLW xmm, xmm
sourceimpl ShlAssign<U16x8> for U16x8
impl ShlAssign<U16x8> for U16x8
sourcefn shl_assign(&mut self, amount: U16x8)
fn shl_assign(&mut self, amount: U16x8)
Performs the
<<=
operation. Read moresourceimpl ShlAssign<u64> for U16x8
impl ShlAssign<u64> for U16x8
sourcefn shl_assign(&mut self, amount: u64)
fn shl_assign(&mut self, amount: u64)
Performs the
<<=
operation. Read moresourceimpl Shr<U16x8> for U16x8
impl Shr<U16x8> for U16x8
sourcefn shr(self, amount: U16x8) -> U16x8
fn shr(self, amount: U16x8) -> U16x8
Scalar Equivalent:
let mut out = self.as_array();
for (x, amm) in out.iter_mut().zip(amount.as_array().iter().copied()) {
*x = if amm >= 16 {
0
} else {
*x >> amm
};
}
U16x8::from(out)
Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourceimpl Shr<u64> for U16x8
impl Shr<u64> for U16x8
sourcefn shr(self, amount: u64) -> U16x8
fn shr(self, amount: u64) -> U16x8
Scalar Equivalent:
if amount >= 16 {
U16x8::ZERO
} else {
U16x8::from([
self.as_array()[0] >> amount,
self.as_array()[1] >> amount,
self.as_array()[2] >> amount,
self.as_array()[3] >> amount,
self.as_array()[4] >> amount,
self.as_array()[5] >> amount,
self.as_array()[6] >> amount,
self.as_array()[7] >> amount,
])
}
Avx2
-
Instruction sequence.
-
PSRLW xmm, xmm
sourceimpl ShrAssign<U16x8> for U16x8
impl ShrAssign<U16x8> for U16x8
sourcefn shr_assign(&mut self, amount: U16x8)
fn shr_assign(&mut self, amount: U16x8)
Performs the
>>=
operation. Read moresourceimpl ShrAssign<u64> for U16x8
impl ShrAssign<u64> for U16x8
sourcefn shr_assign(&mut self, amount: u64)
fn shr_assign(&mut self, amount: u64)
Performs the
>>=
operation. Read moresourceimpl SimdBase for U16x8
impl SimdBase for U16x8
sourcefn set_lo(scalar: u16) -> U16x8
fn set_lo(scalar: u16) -> U16x8
Scalar Equivalent:
let mut out = [0; 8];
out[0] = scalar;
U16x8::from(out)
Avx2
-
Instruction sequence.
sourcefn broadcast_lo(vector: U16x8) -> U16x8
fn broadcast_lo(vector: U16x8) -> U16x8
sourcefn cmp_eq(&self, other: U16x8) -> U16x8
fn cmp_eq(&self, other: U16x8) -> U16x8
Scalar Equivalent:
U16x8::from([
if self.as_array()[0] == other.as_array()[0] { u16::MAX } else { 0 },
if self.as_array()[1] == other.as_array()[1] { u16::MAX } else { 0 },
if self.as_array()[2] == other.as_array()[2] { u16::MAX } else { 0 },
if self.as_array()[3] == other.as_array()[3] { u16::MAX } else { 0 },
if self.as_array()[4] == other.as_array()[4] { u16::MAX } else { 0 },
if self.as_array()[5] == other.as_array()[5] { u16::MAX } else { 0 },
if self.as_array()[6] == other.as_array()[6] { u16::MAX } else { 0 },
if self.as_array()[7] == other.as_array()[7] { u16::MAX } else { 0 },
])
Avx2
-
PCMPEQW xmm, xmm
sourcefn and_not(&self, other: U16x8) -> U16x8
fn and_not(&self, other: U16x8) -> U16x8
Scalar Equivalent:
U16x8::from([
self.as_array()[0] & (!other.as_array()[0]),
self.as_array()[1] & (!other.as_array()[1]),
self.as_array()[2] & (!other.as_array()[2]),
self.as_array()[3] & (!other.as_array()[3]),
self.as_array()[4] & (!other.as_array()[4]),
self.as_array()[5] & (!other.as_array()[5]),
self.as_array()[6] & (!other.as_array()[6]),
self.as_array()[7] & (!other.as_array()[7]),
])
Avx2
-
PANDN xmm, xmm
sourcefn cmp_gt(&self, other: U16x8) -> U16x8
fn cmp_gt(&self, other: U16x8) -> U16x8
Scalar Equivalent:
U16x8::from([
if self.as_array()[0] > other.as_array()[0] { u16::MAX } else { 0 },
if self.as_array()[1] > other.as_array()[1] { u16::MAX } else { 0 },
if self.as_array()[2] > other.as_array()[2] { u16::MAX } else { 0 },
if self.as_array()[3] > other.as_array()[3] { u16::MAX } else { 0 },
if self.as_array()[4] > other.as_array()[4] { u16::MAX } else { 0 },
if self.as_array()[5] > other.as_array()[5] { u16::MAX } else { 0 },
if self.as_array()[6] > other.as_array()[6] { u16::MAX } else { 0 },
if self.as_array()[7] > other.as_array()[7] { u16::MAX } else { 0 },
])
Avx2
NOTE: this implementation uses an efficient vector polyfill, though this operation is not natively supported.
ⓘ
// Based on https://stackoverflow.com/a/33173643 and https://git.io/JmghK
let sign_bit = Self::broadcast(1 << 15);
Self::from(I16x8::from(*self ^ sign_bit).cmp_gt(
I16x8::from(other ^ sign_bit)
))
sourcefn shift_left<const BITS: usize>(&self) -> U16x8
fn shift_left<const BITS: usize>(&self) -> U16x8
Scalar Equivalent:
let mut out = self.as_array();
for x in out.iter_mut() {
*x <<= BITS;
}
U16x8::from(out)
Avx2
-
PSLLW xmm, imm8
sourcefn shift_right<const BITS: usize>(&self) -> U16x8
fn shift_right<const BITS: usize>(&self) -> U16x8
Scalar Equivalent:
let mut out = self.as_array();
for x in out.iter_mut() {
*x >>= BITS;
}
U16x8::from(out)
Avx2
-
PSRLW xmm, imm8
sourcefn unpack_lo(&self, other: U16x8) -> U16x8
fn unpack_lo(&self, other: U16x8) -> U16x8
Scalar Equivalent:
U16x8::from([
// Lane# 0
self.as_array()[0],
other.as_array()[0],
self.as_array()[1],
other.as_array()[1],
self.as_array()[2],
other.as_array()[2],
self.as_array()[3],
other.as_array()[3],
])
Avx2
-
PUNPCKLWD xmm, xmm
sourcefn unpack_hi(&self, other: U16x8) -> U16x8
fn unpack_hi(&self, other: U16x8) -> U16x8
Scalar Equivalent:
U16x8::from([
// Lane# 0
self.as_array()[4],
other.as_array()[4],
self.as_array()[5],
other.as_array()[5],
self.as_array()[6],
other.as_array()[6],
self.as_array()[7],
other.as_array()[7],
])
Avx2
-
PUNPCKHWD xmm, xmm
sourcefn max(&self, other: U16x8) -> U16x8
fn max(&self, other: U16x8) -> U16x8
Scalar Equivalent:
U16x8::from([
self.as_array()[0].max(other.as_array()[0]),
self.as_array()[1].max(other.as_array()[1]),
self.as_array()[2].max(other.as_array()[2]),
self.as_array()[3].max(other.as_array()[3]),
self.as_array()[4].max(other.as_array()[4]),
self.as_array()[5].max(other.as_array()[5]),
self.as_array()[6].max(other.as_array()[6]),
self.as_array()[7].max(other.as_array()[7]),
])
Avx2
-
PMAXUW xmm, xmm
sourcefn min(&self, other: U16x8) -> U16x8
fn min(&self, other: U16x8) -> U16x8
Scalar Equivalent:
U16x8::from([
self.as_array()[0].min(other.as_array()[0]),
self.as_array()[1].min(other.as_array()[1]),
self.as_array()[2].min(other.as_array()[2]),
self.as_array()[3].min(other.as_array()[3]),
self.as_array()[4].min(other.as_array()[4]),
self.as_array()[5].min(other.as_array()[5]),
self.as_array()[6].min(other.as_array()[6]),
self.as_array()[7].min(other.as_array()[7]),
])
Avx2
-
PMINUW xmm, xmm
const ZERO: Self = _
type BroadcastLoInput = U16x8
sourceimpl SimdBase8x for U16x8
impl SimdBase8x for U16x8
sourcefn blend<const B7: bool, const B6: bool, const B5: bool, const B4: bool, const B3: bool, const B2: bool, const B1: bool, const B0: bool>(
&self,
if_true: U16x8
) -> U16x8
fn blend<const B7: bool, const B6: bool, const B5: bool, const B4: bool, const B3: bool, const B2: bool, const B1: bool, const B0: bool>(
&self,
if_true: U16x8
) -> U16x8
Scalar Equivalent:
U16x8::from([
(if B0 { if_true } else { *self }).as_array()[0],
(if B1 { if_true } else { *self }).as_array()[1],
(if B2 { if_true } else { *self }).as_array()[2],
(if B3 { if_true } else { *self }).as_array()[3],
(if B4 { if_true } else { *self }).as_array()[4],
(if B5 { if_true } else { *self }).as_array()[5],
(if B6 { if_true } else { *self }).as_array()[6],
(if B7 { if_true } else { *self }).as_array()[7],
])
Avx2
-
PBLENDW xmm, xmm, imm8
sourceimpl Sub<U16x8> for U16x8
impl Sub<U16x8> for U16x8
sourcefn sub(self, rhs: U16x8) -> U16x8
fn sub(self, rhs: U16x8) -> U16x8
Scalar Equivalent:
U16x8::from([
self.as_array()[0].wrapping_sub(rhs.as_array()[0]),
self.as_array()[1].wrapping_sub(rhs.as_array()[1]),
self.as_array()[2].wrapping_sub(rhs.as_array()[2]),
self.as_array()[3].wrapping_sub(rhs.as_array()[3]),
self.as_array()[4].wrapping_sub(rhs.as_array()[4]),
self.as_array()[5].wrapping_sub(rhs.as_array()[5]),
self.as_array()[6].wrapping_sub(rhs.as_array()[6]),
self.as_array()[7].wrapping_sub(rhs.as_array()[7]),
])
Avx2
-
PSUBW xmm, xmm
sourceimpl SubAssign<U16x8> for U16x8
impl SubAssign<U16x8> for U16x8
sourcefn sub_assign(&mut self, rhs: Self)
fn sub_assign(&mut self, rhs: Self)
Performs the
-=
operation. Read moreimpl Copy for U16x8
impl Eq for U16x8
impl Pod for U16x8
Auto Trait Implementations
impl RefUnwindSafe for U16x8
impl Send for U16x8
impl Sync for U16x8
impl Unpin for U16x8
impl UnwindSafe for U16x8
Blanket Implementations
sourceimpl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
const: unstable · sourcefn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
type Bits = T
type Bits = T
Self
must have the same layout as the specified Bits
except for
the possible invalid bit patterns being checked during
is_valid_bit_pattern
. Read more