Struct vectoreyes::U16x16 
source · [−]#[repr(transparent)]pub struct U16x16(_);Expand description
[u16; 16] as a vector.
Implementations
sourceimpl U16x16
 
impl U16x16
sourcepub const fn from_array(array: [u16; 16]) -> U16x16
 
pub const fn from_array(array: [u16; 16]) -> U16x16
Create a vector from an array.
Unlike the From trait function, the from_array function is const.
Example
const MY_EXTREMELY_FUN_VALUE: U16x16 =
    U16x16::from_array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
for (i, value) in MY_EXTREMELY_FUN_VALUE.as_array().iter().copied().enumerate() {
    assert_eq!(i as u16, value);
}Avx2
Trait Implementations
sourceimpl Add<U16x16> for U16x16
 
impl Add<U16x16> for U16x16
sourcefn add(self, rhs: U16x16) -> U16x16
 
fn add(self, rhs: U16x16) -> U16x16
Scalar Equivalent:
U16x16::from([
    self.as_array()[0].wrapping_add(rhs.as_array()[0]),
    self.as_array()[1].wrapping_add(rhs.as_array()[1]),
    self.as_array()[2].wrapping_add(rhs.as_array()[2]),
    self.as_array()[3].wrapping_add(rhs.as_array()[3]),
    self.as_array()[4].wrapping_add(rhs.as_array()[4]),
    self.as_array()[5].wrapping_add(rhs.as_array()[5]),
    self.as_array()[6].wrapping_add(rhs.as_array()[6]),
    self.as_array()[7].wrapping_add(rhs.as_array()[7]),
    self.as_array()[8].wrapping_add(rhs.as_array()[8]),
    self.as_array()[9].wrapping_add(rhs.as_array()[9]),
    self.as_array()[10].wrapping_add(rhs.as_array()[10]),
    self.as_array()[11].wrapping_add(rhs.as_array()[11]),
    self.as_array()[12].wrapping_add(rhs.as_array()[12]),
    self.as_array()[13].wrapping_add(rhs.as_array()[13]),
    self.as_array()[14].wrapping_add(rhs.as_array()[14]),
    self.as_array()[15].wrapping_add(rhs.as_array()[15]),
])Avx2
- 
VPADDW ymm, ymm, ymm
 
sourceimpl AddAssign<U16x16> for U16x16
 
impl AddAssign<U16x16> for U16x16
sourcefn add_assign(&mut self, rhs: Self)
 
fn add_assign(&mut self, rhs: Self)
Performs the 
+= operation. Read moresourceimpl BitAnd<U16x16> for U16x16
 
impl BitAnd<U16x16> for U16x16
sourcefn bitand(self, rhs: U16x16) -> U16x16
 
fn bitand(self, rhs: U16x16) -> U16x16
Scalar Equivalent:
U16x16::from([
    self.as_array()[0] & rhs.as_array()[0],
    self.as_array()[1] & rhs.as_array()[1],
    self.as_array()[2] & rhs.as_array()[2],
    self.as_array()[3] & rhs.as_array()[3],
    self.as_array()[4] & rhs.as_array()[4],
    self.as_array()[5] & rhs.as_array()[5],
    self.as_array()[6] & rhs.as_array()[6],
    self.as_array()[7] & rhs.as_array()[7],
    self.as_array()[8] & rhs.as_array()[8],
    self.as_array()[9] & rhs.as_array()[9],
    self.as_array()[10] & rhs.as_array()[10],
    self.as_array()[11] & rhs.as_array()[11],
    self.as_array()[12] & rhs.as_array()[12],
    self.as_array()[13] & rhs.as_array()[13],
    self.as_array()[14] & rhs.as_array()[14],
    self.as_array()[15] & rhs.as_array()[15],
])Avx2
- 
VPAND ymm, ymm, ymm
 
sourceimpl BitAndAssign<U16x16> for U16x16
 
impl BitAndAssign<U16x16> for U16x16
sourcefn bitand_assign(&mut self, rhs: Self)
 
fn bitand_assign(&mut self, rhs: Self)
Performs the 
&= operation. Read moresourceimpl BitOr<U16x16> for U16x16
 
impl BitOr<U16x16> for U16x16
sourcefn bitor(self, rhs: U16x16) -> U16x16
 
fn bitor(self, rhs: U16x16) -> U16x16
Scalar Equivalent:
U16x16::from([
    self.as_array()[0] | rhs.as_array()[0],
    self.as_array()[1] | rhs.as_array()[1],
    self.as_array()[2] | rhs.as_array()[2],
    self.as_array()[3] | rhs.as_array()[3],
    self.as_array()[4] | rhs.as_array()[4],
    self.as_array()[5] | rhs.as_array()[5],
    self.as_array()[6] | rhs.as_array()[6],
    self.as_array()[7] | rhs.as_array()[7],
    self.as_array()[8] | rhs.as_array()[8],
    self.as_array()[9] | rhs.as_array()[9],
    self.as_array()[10] | rhs.as_array()[10],
    self.as_array()[11] | rhs.as_array()[11],
    self.as_array()[12] | rhs.as_array()[12],
    self.as_array()[13] | rhs.as_array()[13],
    self.as_array()[14] | rhs.as_array()[14],
    self.as_array()[15] | rhs.as_array()[15],
])Avx2
- 
VPOR ymm, ymm, ymm
 
sourceimpl BitOrAssign<U16x16> for U16x16
 
impl BitOrAssign<U16x16> for U16x16
sourcefn bitor_assign(&mut self, rhs: Self)
 
fn bitor_assign(&mut self, rhs: Self)
Performs the 
|= operation. Read moresourceimpl BitXor<U16x16> for U16x16
 
impl BitXor<U16x16> for U16x16
sourcefn bitxor(self, rhs: U16x16) -> U16x16
 
fn bitxor(self, rhs: U16x16) -> U16x16
Scalar Equivalent:
U16x16::from([
    self.as_array()[0] ^ rhs.as_array()[0],
    self.as_array()[1] ^ rhs.as_array()[1],
    self.as_array()[2] ^ rhs.as_array()[2],
    self.as_array()[3] ^ rhs.as_array()[3],
    self.as_array()[4] ^ rhs.as_array()[4],
    self.as_array()[5] ^ rhs.as_array()[5],
    self.as_array()[6] ^ rhs.as_array()[6],
    self.as_array()[7] ^ rhs.as_array()[7],
    self.as_array()[8] ^ rhs.as_array()[8],
    self.as_array()[9] ^ rhs.as_array()[9],
    self.as_array()[10] ^ rhs.as_array()[10],
    self.as_array()[11] ^ rhs.as_array()[11],
    self.as_array()[12] ^ rhs.as_array()[12],
    self.as_array()[13] ^ rhs.as_array()[13],
    self.as_array()[14] ^ rhs.as_array()[14],
    self.as_array()[15] ^ rhs.as_array()[15],
])Avx2
- 
VPXOR ymm, ymm, ymm
 
sourceimpl BitXorAssign<U16x16> for U16x16
 
impl BitXorAssign<U16x16> for U16x16
sourcefn bitxor_assign(&mut self, rhs: Self)
 
fn bitxor_assign(&mut self, rhs: Self)
Performs the 
^= operation. Read moresourceimpl ConditionallySelectable for U16x16
 
impl ConditionallySelectable for U16x16
sourcefn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
 
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
sourcefn conditional_assign(&mut self, other: &Self, choice: Choice)
 
fn conditional_assign(&mut self, other: &Self, choice: Choice)
sourceimpl ConstantTimeEq for U16x16
 
impl ConstantTimeEq for U16x16
sourceimpl ExtendingCast<U8x16> for U16x16
 
impl ExtendingCast<U8x16> for U16x16
sourcefn extending_cast_from(vector: U8x16) -> U16x16
 
fn extending_cast_from(vector: U8x16) -> U16x16
Scalar Equivalent:
U16x16::from([
        u16::from(vector.as_array()[0]),
        u16::from(vector.as_array()[1]),
        u16::from(vector.as_array()[2]),
        u16::from(vector.as_array()[3]),
        u16::from(vector.as_array()[4]),
        u16::from(vector.as_array()[5]),
        u16::from(vector.as_array()[6]),
        u16::from(vector.as_array()[7]),
        u16::from(vector.as_array()[8]),
        u16::from(vector.as_array()[9]),
        u16::from(vector.as_array()[10]),
        u16::from(vector.as_array()[11]),
        u16::from(vector.as_array()[12]),
        u16::from(vector.as_array()[13]),
        u16::from(vector.as_array()[14]),
        u16::from(vector.as_array()[15]),
])Avx2
- 
VPMOVZXBW ymm, xmm
 
sourceimpl From<[U16x8; 2]> for U16x16
 
impl From<[U16x8; 2]> for U16x16
sourceimpl From<U16x16> for [U16x8; 2]
 
impl From<U16x16> for [U16x8; 2]
sourceimpl From<U16x8> for U16x16
 
impl From<U16x8> for U16x16
sourcefn from(vector: U16x8) -> U16x16
 
fn from(vector: U16x8) -> U16x16
NOTE: this will zero the upper bits of the destination. Other intrinsics are more effcient, but leave the upper bits undefined. At present, these more effcient intrinsics are not exposed.
Scalar Equivalent:
let mut out = [0; 16];
out[0..8].copy_from_slice(&vector.as_array());
U16x16::from(out)Avx2
sourceimpl From<U8x16> for U16x16
 
impl From<U8x16> for U16x16
sourcefn from(vector: U8x16) -> U16x16
 
fn from(vector: U8x16) -> U16x16
Scalar Equivalent:
U16x16::from([
        u16::from(vector.as_array()[0]),
        u16::from(vector.as_array()[1]),
        u16::from(vector.as_array()[2]),
        u16::from(vector.as_array()[3]),
        u16::from(vector.as_array()[4]),
        u16::from(vector.as_array()[5]),
        u16::from(vector.as_array()[6]),
        u16::from(vector.as_array()[7]),
        u16::from(vector.as_array()[8]),
        u16::from(vector.as_array()[9]),
        u16::from(vector.as_array()[10]),
        u16::from(vector.as_array()[11]),
        u16::from(vector.as_array()[12]),
        u16::from(vector.as_array()[13]),
        u16::from(vector.as_array()[14]),
        u16::from(vector.as_array()[15]),
])Avx2
- 
VPMOVZXBW ymm, xmm
 
sourceimpl Shl<U16x16> for U16x16
 
impl Shl<U16x16> for U16x16
sourcefn shl(self, amount: U16x16) -> U16x16
 
fn shl(self, amount: U16x16) -> U16x16
Scalar Equivalent:
let mut out = self.as_array();
for (x, amm) in out.iter_mut().zip(amount.as_array().iter().copied()) {
    *x = if amm >= 16  {
        0
    } else {
        *x << amm
    };
}
U16x16::from(out)Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourceimpl Shl<u64> for U16x16
 
impl Shl<u64> for U16x16
sourcefn shl(self, amount: u64) -> U16x16
 
fn shl(self, amount: u64) -> U16x16
Scalar Equivalent:
if amount >= 16 {
    U16x16::ZERO
} else {
    U16x16::from([
        self.as_array()[0] << amount,
        self.as_array()[1] << amount,
        self.as_array()[2] << amount,
        self.as_array()[3] << amount,
        self.as_array()[4] << amount,
        self.as_array()[5] << amount,
        self.as_array()[6] << amount,
        self.as_array()[7] << amount,
        self.as_array()[8] << amount,
        self.as_array()[9] << amount,
        self.as_array()[10] << amount,
        self.as_array()[11] << amount,
        self.as_array()[12] << amount,
        self.as_array()[13] << amount,
        self.as_array()[14] << amount,
        self.as_array()[15] << amount,
    ])
}Avx2
- 
VPSLLW ymm, ymm, xmm
 - 
Instruction sequence.
 
sourceimpl ShlAssign<U16x16> for U16x16
 
impl ShlAssign<U16x16> for U16x16
sourcefn shl_assign(&mut self, amount: U16x16)
 
fn shl_assign(&mut self, amount: U16x16)
Performs the 
<<= operation. Read moresourceimpl ShlAssign<u64> for U16x16
 
impl ShlAssign<u64> for U16x16
sourcefn shl_assign(&mut self, amount: u64)
 
fn shl_assign(&mut self, amount: u64)
Performs the 
<<= operation. Read moresourceimpl Shr<U16x16> for U16x16
 
impl Shr<U16x16> for U16x16
sourcefn shr(self, amount: U16x16) -> U16x16
 
fn shr(self, amount: U16x16) -> U16x16
Scalar Equivalent:
let mut out = self.as_array();
for (x, amm) in out.iter_mut().zip(amount.as_array().iter().copied()) {
    *x = if amm >= 16  {
        0
    } else {
        *x >> amm
    };
}
U16x16::from(out)Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourceimpl Shr<u64> for U16x16
 
impl Shr<u64> for U16x16
sourcefn shr(self, amount: u64) -> U16x16
 
fn shr(self, amount: u64) -> U16x16
Scalar Equivalent:
if amount >= 16 {
    U16x16::ZERO
} else {
    U16x16::from([
        self.as_array()[0] >> amount,
        self.as_array()[1] >> amount,
        self.as_array()[2] >> amount,
        self.as_array()[3] >> amount,
        self.as_array()[4] >> amount,
        self.as_array()[5] >> amount,
        self.as_array()[6] >> amount,
        self.as_array()[7] >> amount,
        self.as_array()[8] >> amount,
        self.as_array()[9] >> amount,
        self.as_array()[10] >> amount,
        self.as_array()[11] >> amount,
        self.as_array()[12] >> amount,
        self.as_array()[13] >> amount,
        self.as_array()[14] >> amount,
        self.as_array()[15] >> amount,
    ])
}Avx2
- 
VPSRLW ymm, ymm, xmm
 - 
Instruction sequence.
 
sourceimpl ShrAssign<U16x16> for U16x16
 
impl ShrAssign<U16x16> for U16x16
sourcefn shr_assign(&mut self, amount: U16x16)
 
fn shr_assign(&mut self, amount: U16x16)
Performs the 
>>= operation. Read moresourceimpl ShrAssign<u64> for U16x16
 
impl ShrAssign<u64> for U16x16
sourcefn shr_assign(&mut self, amount: u64)
 
fn shr_assign(&mut self, amount: u64)
Performs the 
>>= operation. Read moresourceimpl SimdBase for U16x16
 
impl SimdBase for U16x16
sourcefn set_lo(scalar: u16) -> U16x16
 
fn set_lo(scalar: u16) -> U16x16
Scalar Equivalent:
let mut out = [0; 16];
out[0] = scalar;
U16x16::from(out)Avx2
- 
Instruction sequence.
 
sourcefn broadcast_lo(vector: U16x8) -> U16x16
 
fn broadcast_lo(vector: U16x8) -> U16x16
sourcefn cmp_eq(&self, other: U16x16) -> U16x16
 
fn cmp_eq(&self, other: U16x16) -> U16x16
Scalar Equivalent:
U16x16::from([
    if self.as_array()[0] == other.as_array()[0] {  u16::MAX  } else { 0 },
    if self.as_array()[1] == other.as_array()[1] {  u16::MAX  } else { 0 },
    if self.as_array()[2] == other.as_array()[2] {  u16::MAX  } else { 0 },
    if self.as_array()[3] == other.as_array()[3] {  u16::MAX  } else { 0 },
    if self.as_array()[4] == other.as_array()[4] {  u16::MAX  } else { 0 },
    if self.as_array()[5] == other.as_array()[5] {  u16::MAX  } else { 0 },
    if self.as_array()[6] == other.as_array()[6] {  u16::MAX  } else { 0 },
    if self.as_array()[7] == other.as_array()[7] {  u16::MAX  } else { 0 },
    if self.as_array()[8] == other.as_array()[8] {  u16::MAX  } else { 0 },
    if self.as_array()[9] == other.as_array()[9] {  u16::MAX  } else { 0 },
    if self.as_array()[10] == other.as_array()[10] {  u16::MAX  } else { 0 },
    if self.as_array()[11] == other.as_array()[11] {  u16::MAX  } else { 0 },
    if self.as_array()[12] == other.as_array()[12] {  u16::MAX  } else { 0 },
    if self.as_array()[13] == other.as_array()[13] {  u16::MAX  } else { 0 },
    if self.as_array()[14] == other.as_array()[14] {  u16::MAX  } else { 0 },
    if self.as_array()[15] == other.as_array()[15] {  u16::MAX  } else { 0 },
])Avx2
- 
VPCMPEQW ymm, ymm, ymm
 
sourcefn and_not(&self, other: U16x16) -> U16x16
 
fn and_not(&self, other: U16x16) -> U16x16
Scalar Equivalent:
U16x16::from([
    self.as_array()[0] & (!other.as_array()[0]),
    self.as_array()[1] & (!other.as_array()[1]),
    self.as_array()[2] & (!other.as_array()[2]),
    self.as_array()[3] & (!other.as_array()[3]),
    self.as_array()[4] & (!other.as_array()[4]),
    self.as_array()[5] & (!other.as_array()[5]),
    self.as_array()[6] & (!other.as_array()[6]),
    self.as_array()[7] & (!other.as_array()[7]),
    self.as_array()[8] & (!other.as_array()[8]),
    self.as_array()[9] & (!other.as_array()[9]),
    self.as_array()[10] & (!other.as_array()[10]),
    self.as_array()[11] & (!other.as_array()[11]),
    self.as_array()[12] & (!other.as_array()[12]),
    self.as_array()[13] & (!other.as_array()[13]),
    self.as_array()[14] & (!other.as_array()[14]),
    self.as_array()[15] & (!other.as_array()[15]),
])Avx2
- 
VPANDN ymm, ymm, ymm
 
sourcefn cmp_gt(&self, other: U16x16) -> U16x16
 
fn cmp_gt(&self, other: U16x16) -> U16x16
Scalar Equivalent:
U16x16::from([
    if self.as_array()[0] > other.as_array()[0] {  u16::MAX  } else { 0 },
    if self.as_array()[1] > other.as_array()[1] {  u16::MAX  } else { 0 },
    if self.as_array()[2] > other.as_array()[2] {  u16::MAX  } else { 0 },
    if self.as_array()[3] > other.as_array()[3] {  u16::MAX  } else { 0 },
    if self.as_array()[4] > other.as_array()[4] {  u16::MAX  } else { 0 },
    if self.as_array()[5] > other.as_array()[5] {  u16::MAX  } else { 0 },
    if self.as_array()[6] > other.as_array()[6] {  u16::MAX  } else { 0 },
    if self.as_array()[7] > other.as_array()[7] {  u16::MAX  } else { 0 },
    if self.as_array()[8] > other.as_array()[8] {  u16::MAX  } else { 0 },
    if self.as_array()[9] > other.as_array()[9] {  u16::MAX  } else { 0 },
    if self.as_array()[10] > other.as_array()[10] {  u16::MAX  } else { 0 },
    if self.as_array()[11] > other.as_array()[11] {  u16::MAX  } else { 0 },
    if self.as_array()[12] > other.as_array()[12] {  u16::MAX  } else { 0 },
    if self.as_array()[13] > other.as_array()[13] {  u16::MAX  } else { 0 },
    if self.as_array()[14] > other.as_array()[14] {  u16::MAX  } else { 0 },
    if self.as_array()[15] > other.as_array()[15] {  u16::MAX  } else { 0 },
])Avx2
NOTE: this implementation uses an efficient vector polyfill, though this operation is not natively supported.
ⓘ
// Based on https://stackoverflow.com/a/33173643 and https://git.io/JmghK
let sign_bit = Self::broadcast(1 << 15);
Self::from(I16x16::from(*self ^ sign_bit).cmp_gt(
    I16x16::from(other ^ sign_bit)
))sourcefn shift_left<const BITS: usize>(&self) -> U16x16
 
fn shift_left<const BITS: usize>(&self) -> U16x16
Scalar Equivalent:
let mut out = self.as_array();
for x in out.iter_mut() {
    *x <<= BITS;
}
U16x16::from(out)Avx2
- 
VPSLLW ymm, ymm, imm8
 
sourcefn shift_right<const BITS: usize>(&self) -> U16x16
 
fn shift_right<const BITS: usize>(&self) -> U16x16
Scalar Equivalent:
let mut out = self.as_array();
for x in out.iter_mut() {
    *x >>= BITS;
}
U16x16::from(out)Avx2
- 
VPSRLW ymm, ymm, imm8
 
sourcefn unpack_lo(&self, other: U16x16) -> U16x16
 
fn unpack_lo(&self, other: U16x16) -> U16x16
Scalar Equivalent:
U16x16::from([
    // Lane# 0
    self.as_array()[0],
    other.as_array()[0],
    self.as_array()[1],
    other.as_array()[1],
    self.as_array()[2],
    other.as_array()[2],
    self.as_array()[3],
    other.as_array()[3],
    // Lane# 1
    self.as_array()[8],
    other.as_array()[8],
    self.as_array()[9],
    other.as_array()[9],
    self.as_array()[10],
    other.as_array()[10],
    self.as_array()[11],
    other.as_array()[11],
])Avx2
- 
VPUNPCKLWD ymm, ymm, ymm
 
sourcefn unpack_hi(&self, other: U16x16) -> U16x16
 
fn unpack_hi(&self, other: U16x16) -> U16x16
Scalar Equivalent:
U16x16::from([
    // Lane# 0
    self.as_array()[4],
    other.as_array()[4],
    self.as_array()[5],
    other.as_array()[5],
    self.as_array()[6],
    other.as_array()[6],
    self.as_array()[7],
    other.as_array()[7],
    // Lane# 1
    self.as_array()[12],
    other.as_array()[12],
    self.as_array()[13],
    other.as_array()[13],
    self.as_array()[14],
    other.as_array()[14],
    self.as_array()[15],
    other.as_array()[15],
])Avx2
- 
VPUNPCKHWD ymm, ymm, ymm
 
sourcefn max(&self, other: U16x16) -> U16x16
 
fn max(&self, other: U16x16) -> U16x16
Scalar Equivalent:
U16x16::from([
    self.as_array()[0].max(other.as_array()[0]),
    self.as_array()[1].max(other.as_array()[1]),
    self.as_array()[2].max(other.as_array()[2]),
    self.as_array()[3].max(other.as_array()[3]),
    self.as_array()[4].max(other.as_array()[4]),
    self.as_array()[5].max(other.as_array()[5]),
    self.as_array()[6].max(other.as_array()[6]),
    self.as_array()[7].max(other.as_array()[7]),
    self.as_array()[8].max(other.as_array()[8]),
    self.as_array()[9].max(other.as_array()[9]),
    self.as_array()[10].max(other.as_array()[10]),
    self.as_array()[11].max(other.as_array()[11]),
    self.as_array()[12].max(other.as_array()[12]),
    self.as_array()[13].max(other.as_array()[13]),
    self.as_array()[14].max(other.as_array()[14]),
    self.as_array()[15].max(other.as_array()[15]),
])Avx2
- 
VPMAXUW ymm, ymm, ymm
 
sourcefn min(&self, other: U16x16) -> U16x16
 
fn min(&self, other: U16x16) -> U16x16
Scalar Equivalent:
U16x16::from([
    self.as_array()[0].min(other.as_array()[0]),
    self.as_array()[1].min(other.as_array()[1]),
    self.as_array()[2].min(other.as_array()[2]),
    self.as_array()[3].min(other.as_array()[3]),
    self.as_array()[4].min(other.as_array()[4]),
    self.as_array()[5].min(other.as_array()[5]),
    self.as_array()[6].min(other.as_array()[6]),
    self.as_array()[7].min(other.as_array()[7]),
    self.as_array()[8].min(other.as_array()[8]),
    self.as_array()[9].min(other.as_array()[9]),
    self.as_array()[10].min(other.as_array()[10]),
    self.as_array()[11].min(other.as_array()[11]),
    self.as_array()[12].min(other.as_array()[12]),
    self.as_array()[13].min(other.as_array()[13]),
    self.as_array()[14].min(other.as_array()[14]),
    self.as_array()[15].min(other.as_array()[15]),
])Avx2
- 
VPMINUW ymm, ymm, ymm
 
const ZERO: Self = _
type BroadcastLoInput = U16x8
sourceimpl Sub<U16x16> for U16x16
 
impl Sub<U16x16> for U16x16
sourcefn sub(self, rhs: U16x16) -> U16x16
 
fn sub(self, rhs: U16x16) -> U16x16
Scalar Equivalent:
U16x16::from([
    self.as_array()[0].wrapping_sub(rhs.as_array()[0]),
    self.as_array()[1].wrapping_sub(rhs.as_array()[1]),
    self.as_array()[2].wrapping_sub(rhs.as_array()[2]),
    self.as_array()[3].wrapping_sub(rhs.as_array()[3]),
    self.as_array()[4].wrapping_sub(rhs.as_array()[4]),
    self.as_array()[5].wrapping_sub(rhs.as_array()[5]),
    self.as_array()[6].wrapping_sub(rhs.as_array()[6]),
    self.as_array()[7].wrapping_sub(rhs.as_array()[7]),
    self.as_array()[8].wrapping_sub(rhs.as_array()[8]),
    self.as_array()[9].wrapping_sub(rhs.as_array()[9]),
    self.as_array()[10].wrapping_sub(rhs.as_array()[10]),
    self.as_array()[11].wrapping_sub(rhs.as_array()[11]),
    self.as_array()[12].wrapping_sub(rhs.as_array()[12]),
    self.as_array()[13].wrapping_sub(rhs.as_array()[13]),
    self.as_array()[14].wrapping_sub(rhs.as_array()[14]),
    self.as_array()[15].wrapping_sub(rhs.as_array()[15]),
])Avx2
- 
VPSUBW ymm, ymm, ymm
 
sourceimpl SubAssign<U16x16> for U16x16
 
impl SubAssign<U16x16> for U16x16
sourcefn sub_assign(&mut self, rhs: Self)
 
fn sub_assign(&mut self, rhs: Self)
Performs the 
-= operation. Read moreimpl Copy for U16x16
impl Eq for U16x16
impl Pod for U16x16
Auto Trait Implementations
impl RefUnwindSafe for U16x16
impl Send for U16x16
impl Sync for U16x16
impl Unpin for U16x16
impl UnwindSafe for U16x16
Blanket Implementations
sourceimpl<T> BorrowMut<T> for Twhere
    T: ?Sized,
 
impl<T> BorrowMut<T> for Twhere
    T: ?Sized,
const: unstable · sourcefn borrow_mut(&mut self) -> &mut T
 
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
impl<T> CheckedBitPattern for Twhere
    T: AnyBitPattern,
impl<T> CheckedBitPattern for Twhere
    T: AnyBitPattern,
type Bits = T
type Bits = T
Self must have the same layout as the specified Bits except for
the possible invalid bit patterns being checked during
is_valid_bit_pattern. Read more