Struct vectoreyes::U32x4 
source · [−]#[repr(transparent)]pub struct U32x4(_);Expand description
[u32; 4] as a vector.
Implementations
Trait Implementations
sourceimpl Add<U32x4> for U32x4
 
impl Add<U32x4> for U32x4
sourcefn add(self, rhs: U32x4) -> U32x4
 
fn add(self, rhs: U32x4) -> U32x4
Scalar Equivalent:
U32x4::from([
    self.as_array()[0].wrapping_add(rhs.as_array()[0]),
    self.as_array()[1].wrapping_add(rhs.as_array()[1]),
    self.as_array()[2].wrapping_add(rhs.as_array()[2]),
    self.as_array()[3].wrapping_add(rhs.as_array()[3]),
])Avx2
- 
PADDD xmm, xmm
 
sourceimpl AddAssign<U32x4> for U32x4
 
impl AddAssign<U32x4> for U32x4
sourcefn add_assign(&mut self, rhs: Self)
 
fn add_assign(&mut self, rhs: Self)
Performs the 
+= operation. Read moresourceimpl BitAnd<U32x4> for U32x4
 
impl BitAnd<U32x4> for U32x4
sourceimpl BitAndAssign<U32x4> for U32x4
 
impl BitAndAssign<U32x4> for U32x4
sourcefn bitand_assign(&mut self, rhs: Self)
 
fn bitand_assign(&mut self, rhs: Self)
Performs the 
&= operation. Read moresourceimpl BitOr<U32x4> for U32x4
 
impl BitOr<U32x4> for U32x4
sourceimpl BitOrAssign<U32x4> for U32x4
 
impl BitOrAssign<U32x4> for U32x4
sourcefn bitor_assign(&mut self, rhs: Self)
 
fn bitor_assign(&mut self, rhs: Self)
Performs the 
|= operation. Read moresourceimpl BitXor<U32x4> for U32x4
 
impl BitXor<U32x4> for U32x4
sourceimpl BitXorAssign<U32x4> for U32x4
 
impl BitXorAssign<U32x4> for U32x4
sourcefn bitxor_assign(&mut self, rhs: Self)
 
fn bitxor_assign(&mut self, rhs: Self)
Performs the 
^= operation. Read moresourceimpl ConditionallySelectable for U32x4
 
impl ConditionallySelectable for U32x4
sourcefn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
 
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
sourcefn conditional_assign(&mut self, other: &Self, choice: Choice)
 
fn conditional_assign(&mut self, other: &Self, choice: Choice)
sourceimpl ConstantTimeEq for U32x4
 
impl ConstantTimeEq for U32x4
sourceimpl ExtendingCast<U16x8> for U32x4
 
impl ExtendingCast<U16x8> for U32x4
sourcefn extending_cast_from(vector: U16x8) -> U32x4
 
fn extending_cast_from(vector: U16x8) -> U32x4
Scalar Equivalent:
U32x4::from([
        u32::from(vector.as_array()[0]),
        u32::from(vector.as_array()[1]),
        u32::from(vector.as_array()[2]),
        u32::from(vector.as_array()[3]),
])Avx2
- 
PMOVZXWD xmm, xmm
 
sourceimpl ExtendingCast<U32x4> for U64x2
 
impl ExtendingCast<U32x4> for U64x2
sourcefn extending_cast_from(vector: U32x4) -> U64x2
 
fn extending_cast_from(vector: U32x4) -> U64x2
Scalar Equivalent:
U64x2::from([
        u64::from(vector.as_array()[0]),
        u64::from(vector.as_array()[1]),
])Avx2
- 
PMOVZXDQ xmm, xmm
 
sourceimpl ExtendingCast<U32x4> for U64x4
 
impl ExtendingCast<U32x4> for U64x4
sourcefn extending_cast_from(vector: U32x4) -> U64x4
 
fn extending_cast_from(vector: U32x4) -> U64x4
Scalar Equivalent:
U64x4::from([
        u64::from(vector.as_array()[0]),
        u64::from(vector.as_array()[1]),
        u64::from(vector.as_array()[2]),
        u64::from(vector.as_array()[3]),
])Avx2
- 
VPMOVZXDQ ymm, xmm
 
sourceimpl ExtendingCast<U8x16> for U32x4
 
impl ExtendingCast<U8x16> for U32x4
sourcefn extending_cast_from(vector: U8x16) -> U32x4
 
fn extending_cast_from(vector: U8x16) -> U32x4
Scalar Equivalent:
U32x4::from([
        u32::from(vector.as_array()[0]),
        u32::from(vector.as_array()[1]),
        u32::from(vector.as_array()[2]),
        u32::from(vector.as_array()[3]),
])Avx2
- 
PMOVZXBD xmm, xmm
 
sourceimpl From<U32x4> for U32x8
 
impl From<U32x4> for U32x8
sourcefn from(vector: U32x4) -> U32x8
 
fn from(vector: U32x4) -> U32x8
NOTE: this will zero the upper bits of the destination. Other intrinsics are more effcient, but leave the upper bits undefined. At present, these more effcient intrinsics are not exposed.
Scalar Equivalent:
let mut out = [0; 8];
out[0..4].copy_from_slice(&vector.as_array());
U32x8::from(out)Avx2
sourceimpl From<U32x4> for U64x4
 
impl From<U32x4> for U64x4
sourceimpl Shl<U32x4> for U32x4
 
impl Shl<U32x4> for U32x4
sourceimpl Shl<u64> for U32x4
 
impl Shl<u64> for U32x4
sourceimpl ShlAssign<U32x4> for U32x4
 
impl ShlAssign<U32x4> for U32x4
sourcefn shl_assign(&mut self, amount: U32x4)
 
fn shl_assign(&mut self, amount: U32x4)
Performs the 
<<= operation. Read moresourceimpl ShlAssign<u64> for U32x4
 
impl ShlAssign<u64> for U32x4
sourcefn shl_assign(&mut self, amount: u64)
 
fn shl_assign(&mut self, amount: u64)
Performs the 
<<= operation. Read moresourceimpl Shr<U32x4> for U32x4
 
impl Shr<U32x4> for U32x4
sourceimpl Shr<u64> for U32x4
 
impl Shr<u64> for U32x4
sourceimpl ShrAssign<U32x4> for U32x4
 
impl ShrAssign<U32x4> for U32x4
sourcefn shr_assign(&mut self, amount: U32x4)
 
fn shr_assign(&mut self, amount: U32x4)
Performs the 
>>= operation. Read moresourceimpl ShrAssign<u64> for U32x4
 
impl ShrAssign<u64> for U32x4
sourcefn shr_assign(&mut self, amount: u64)
 
fn shr_assign(&mut self, amount: u64)
Performs the 
>>= operation. Read moresourceimpl SimdBase for U32x4
 
impl SimdBase for U32x4
sourcefn set_lo(scalar: u32) -> U32x4
 
fn set_lo(scalar: u32) -> U32x4
Scalar Equivalent:
let mut out = [0; 4];
out[0] = scalar;
U32x4::from(out)Avx2
- 
Instruction sequence.
 
sourcefn broadcast_lo(vector: U32x4) -> U32x4
 
fn broadcast_lo(vector: U32x4) -> U32x4
sourcefn cmp_eq(&self, other: U32x4) -> U32x4
 
fn cmp_eq(&self, other: U32x4) -> U32x4
Scalar Equivalent:
U32x4::from([
    if self.as_array()[0] == other.as_array()[0] {  u32::MAX  } else { 0 },
    if self.as_array()[1] == other.as_array()[1] {  u32::MAX  } else { 0 },
    if self.as_array()[2] == other.as_array()[2] {  u32::MAX  } else { 0 },
    if self.as_array()[3] == other.as_array()[3] {  u32::MAX  } else { 0 },
])Avx2
- 
PCMPEQD xmm, xmm
 
sourcefn and_not(&self, other: U32x4) -> U32x4
 
fn and_not(&self, other: U32x4) -> U32x4
Scalar Equivalent:
U32x4::from([
    self.as_array()[0] & (!other.as_array()[0]),
    self.as_array()[1] & (!other.as_array()[1]),
    self.as_array()[2] & (!other.as_array()[2]),
    self.as_array()[3] & (!other.as_array()[3]),
])Avx2
- 
PANDN xmm, xmm
 
sourcefn cmp_gt(&self, other: U32x4) -> U32x4
 
fn cmp_gt(&self, other: U32x4) -> U32x4
Scalar Equivalent:
U32x4::from([
    if self.as_array()[0] > other.as_array()[0] {  u32::MAX  } else { 0 },
    if self.as_array()[1] > other.as_array()[1] {  u32::MAX  } else { 0 },
    if self.as_array()[2] > other.as_array()[2] {  u32::MAX  } else { 0 },
    if self.as_array()[3] > other.as_array()[3] {  u32::MAX  } else { 0 },
])Avx2
NOTE: this implementation uses an efficient vector polyfill, though this operation is not natively supported.
ⓘ
// Based on https://stackoverflow.com/a/33173643 and https://git.io/JmghK
let sign_bit = Self::broadcast(1 << 31);
Self::from(I32x4::from(*self ^ sign_bit).cmp_gt(
    I32x4::from(other ^ sign_bit)
))sourcefn shift_left<const BITS: usize>(&self) -> U32x4
 
fn shift_left<const BITS: usize>(&self) -> U32x4
Scalar Equivalent:
let mut out = self.as_array();
for x in out.iter_mut() {
    *x <<= BITS;
}
U32x4::from(out)Avx2
- 
PSLLD xmm, imm8
 
sourcefn shift_right<const BITS: usize>(&self) -> U32x4
 
fn shift_right<const BITS: usize>(&self) -> U32x4
Scalar Equivalent:
let mut out = self.as_array();
for x in out.iter_mut() {
    *x >>= BITS;
}
U32x4::from(out)Avx2
- 
PSRLD xmm, imm8
 
sourcefn unpack_lo(&self, other: U32x4) -> U32x4
 
fn unpack_lo(&self, other: U32x4) -> U32x4
Scalar Equivalent:
U32x4::from([
    // Lane# 0
    self.as_array()[0],
    other.as_array()[0],
    self.as_array()[1],
    other.as_array()[1],
])Avx2
- 
PUNPCKLDQ xmm, xmm
 
sourcefn unpack_hi(&self, other: U32x4) -> U32x4
 
fn unpack_hi(&self, other: U32x4) -> U32x4
Scalar Equivalent:
U32x4::from([
    // Lane# 0
    self.as_array()[2],
    other.as_array()[2],
    self.as_array()[3],
    other.as_array()[3],
])Avx2
- 
PUNPCKHDQ xmm, xmm
 
sourcefn max(&self, other: U32x4) -> U32x4
 
fn max(&self, other: U32x4) -> U32x4
Scalar Equivalent:
U32x4::from([
    self.as_array()[0].max(other.as_array()[0]),
    self.as_array()[1].max(other.as_array()[1]),
    self.as_array()[2].max(other.as_array()[2]),
    self.as_array()[3].max(other.as_array()[3]),
])Avx2
- 
PMAXUD xmm, xmm
 
sourcefn min(&self, other: U32x4) -> U32x4
 
fn min(&self, other: U32x4) -> U32x4
Scalar Equivalent:
U32x4::from([
    self.as_array()[0].min(other.as_array()[0]),
    self.as_array()[1].min(other.as_array()[1]),
    self.as_array()[2].min(other.as_array()[2]),
    self.as_array()[3].min(other.as_array()[3]),
])Avx2
- 
PMINUD xmm, xmm
 
const ZERO: Self = _
type BroadcastLoInput = U32x4
sourceimpl SimdBase32 for U32x4
 
impl SimdBase32 for U32x4
sourceimpl SimdBase4x for U32x4
 
impl SimdBase4x for U32x4
sourcefn blend<const B3: bool, const B2: bool, const B1: bool, const B0: bool>(
    &self,
    if_true: U32x4
) -> U32x4
 
fn blend<const B3: bool, const B2: bool, const B1: bool, const B0: bool>(
    &self,
    if_true: U32x4
) -> U32x4
Scalar Equivalent:
U32x4::from([
        (if B0 { if_true } else { *self }).as_array()[0],
        (if B1 { if_true } else { *self }).as_array()[1],
        (if B2 { if_true } else { *self }).as_array()[2],
        (if B3 { if_true } else { *self }).as_array()[3],
])Avx2
- 
VPBLENDD xmm, xmm, xmm, imm8
 
sourceimpl SimdBaseGatherable<I32x4> for U32x4
 
impl SimdBaseGatherable<I32x4> for U32x4
Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
sourceunsafe fn gather(base: *const u32, indices: I32x4) -> U32x4
 
unsafe fn gather(base: *const u32, indices: I32x4) -> U32x4
Scalar Equivalent:
U32x4::from([
    base.offset(indices.as_array()[0] as isize).read_unaligned(),
    base.offset(indices.as_array()[1] as isize).read_unaligned(),
    base.offset(indices.as_array()[2] as isize).read_unaligned(),
    base.offset(indices.as_array()[3] as isize).read_unaligned(),
])Avx2
- 
VPGATHERDD xmm, vm32x, xmm
 
sourceunsafe fn gather_masked(
    base: *const u32,
    indices: I32x4,
    mask: U32x4,
    src: U32x4
) -> U32x4
 
unsafe fn gather_masked(
    base: *const u32,
    indices: I32x4,
    mask: U32x4,
    src: U32x4
) -> U32x4
Scalar Equivalent:
U32x4::from([
    if (mask.as_array()[0] >> 31) == 1 {
        base.offset(indices.as_array()[0] as isize).read_unaligned()
    } else {
        src.as_array()[0]
    },
    if (mask.as_array()[1] >> 31) == 1 {
        base.offset(indices.as_array()[1] as isize).read_unaligned()
    } else {
        src.as_array()[1]
    },
    if (mask.as_array()[2] >> 31) == 1 {
        base.offset(indices.as_array()[2] as isize).read_unaligned()
    } else {
        src.as_array()[2]
    },
    if (mask.as_array()[3] >> 31) == 1 {
        base.offset(indices.as_array()[3] as isize).read_unaligned()
    } else {
        src.as_array()[3]
    },
])Avx2
- 
VPGATHERDD xmm, vm32x, xmm
 
sourceimpl SimdBaseGatherable<I64x4> for U32x4
 
impl SimdBaseGatherable<I64x4> for U32x4
Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
sourceunsafe fn gather(base: *const u32, indices: I64x4) -> U32x4
 
unsafe fn gather(base: *const u32, indices: I64x4) -> U32x4
Scalar Equivalent:
U32x4::from([
    base.offset(indices.as_array()[0] as isize).read_unaligned(),
    base.offset(indices.as_array()[1] as isize).read_unaligned(),
    base.offset(indices.as_array()[2] as isize).read_unaligned(),
    base.offset(indices.as_array()[3] as isize).read_unaligned(),
])Avx2
- 
VPGATHERQD xmm, vm64y, xmm
 
sourceunsafe fn gather_masked(
    base: *const u32,
    indices: I64x4,
    mask: U32x4,
    src: U32x4
) -> U32x4
 
unsafe fn gather_masked(
    base: *const u32,
    indices: I64x4,
    mask: U32x4,
    src: U32x4
) -> U32x4
Scalar Equivalent:
U32x4::from([
    if (mask.as_array()[0] >> 31) == 1 {
        base.offset(indices.as_array()[0] as isize).read_unaligned()
    } else {
        src.as_array()[0]
    },
    if (mask.as_array()[1] >> 31) == 1 {
        base.offset(indices.as_array()[1] as isize).read_unaligned()
    } else {
        src.as_array()[1]
    },
    if (mask.as_array()[2] >> 31) == 1 {
        base.offset(indices.as_array()[2] as isize).read_unaligned()
    } else {
        src.as_array()[2]
    },
    if (mask.as_array()[3] >> 31) == 1 {
        base.offset(indices.as_array()[3] as isize).read_unaligned()
    } else {
        src.as_array()[3]
    },
])Avx2
- 
VPGATHERQD xmm, vm64y, xmm
 
sourceimpl SimdBaseGatherable<U64x4> for U32x4
 
impl SimdBaseGatherable<U64x4> for U32x4
Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
sourceunsafe fn gather(base: *const u32, indices: U64x4) -> U32x4
 
unsafe fn gather(base: *const u32, indices: U64x4) -> U32x4
Scalar Equivalent:
U32x4::from([
    base.offset(indices.as_array()[0] as isize).read_unaligned(),
    base.offset(indices.as_array()[1] as isize).read_unaligned(),
    base.offset(indices.as_array()[2] as isize).read_unaligned(),
    base.offset(indices.as_array()[3] as isize).read_unaligned(),
])Avx2
- 
VPGATHERQD xmm, vm64y, xmm
 
sourceunsafe fn gather_masked(
    base: *const u32,
    indices: U64x4,
    mask: U32x4,
    src: U32x4
) -> U32x4
 
unsafe fn gather_masked(
    base: *const u32,
    indices: U64x4,
    mask: U32x4,
    src: U32x4
) -> U32x4
Scalar Equivalent:
U32x4::from([
    if (mask.as_array()[0] >> 31) == 1 {
        base.offset(indices.as_array()[0] as isize).read_unaligned()
    } else {
        src.as_array()[0]
    },
    if (mask.as_array()[1] >> 31) == 1 {
        base.offset(indices.as_array()[1] as isize).read_unaligned()
    } else {
        src.as_array()[1]
    },
    if (mask.as_array()[2] >> 31) == 1 {
        base.offset(indices.as_array()[2] as isize).read_unaligned()
    } else {
        src.as_array()[2]
    },
    if (mask.as_array()[3] >> 31) == 1 {
        base.offset(indices.as_array()[3] as isize).read_unaligned()
    } else {
        src.as_array()[3]
    },
])Avx2
- 
VPGATHERQD xmm, vm64y, xmm
 
sourceimpl Sub<U32x4> for U32x4
 
impl Sub<U32x4> for U32x4
sourcefn sub(self, rhs: U32x4) -> U32x4
 
fn sub(self, rhs: U32x4) -> U32x4
Scalar Equivalent:
U32x4::from([
    self.as_array()[0].wrapping_sub(rhs.as_array()[0]),
    self.as_array()[1].wrapping_sub(rhs.as_array()[1]),
    self.as_array()[2].wrapping_sub(rhs.as_array()[2]),
    self.as_array()[3].wrapping_sub(rhs.as_array()[3]),
])Avx2
- 
PSUBD xmm, xmm
 
sourceimpl SubAssign<U32x4> for U32x4
 
impl SubAssign<U32x4> for U32x4
sourcefn sub_assign(&mut self, rhs: Self)
 
fn sub_assign(&mut self, rhs: Self)
Performs the 
-= operation. Read moreimpl Copy for U32x4
impl Eq for U32x4
impl Pod for U32x4
Auto Trait Implementations
impl RefUnwindSafe for U32x4
impl Send for U32x4
impl Sync for U32x4
impl Unpin for U32x4
impl UnwindSafe for U32x4
Blanket Implementations
sourceimpl<T> BorrowMut<T> for Twhere
    T: ?Sized,
 
impl<T> BorrowMut<T> for Twhere
    T: ?Sized,
const: unstable · sourcefn borrow_mut(&mut self) -> &mut T
 
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
impl<T> CheckedBitPattern for Twhere
    T: AnyBitPattern,
impl<T> CheckedBitPattern for Twhere
    T: AnyBitPattern,
type Bits = T
type Bits = T
Self must have the same layout as the specified Bits except for
the possible invalid bit patterns being checked during
is_valid_bit_pattern. Read more