Struct vectoreyes::I32x8
source · [−]#[repr(transparent)]pub struct I32x8(_);
Expand description
[i32; 8]
as a vector.
Implementations
sourceimpl I32x8
impl I32x8
sourcepub const fn from_array(array: [i32; 8]) -> I32x8
pub const fn from_array(array: [i32; 8]) -> I32x8
Create a vector from an array.
Unlike the From
trait function, the from_array
function is const
.
Example
const MY_EXTREMELY_FUN_VALUE: I32x8 =
I32x8::from_array([0, 1, 2, 3, 4, 5, 6, 7]);
for (i, value) in MY_EXTREMELY_FUN_VALUE.as_array().iter().copied().enumerate() {
assert_eq!(i as i32, value);
}
Avx2
Trait Implementations
sourceimpl Add<I32x8> for I32x8
impl Add<I32x8> for I32x8
sourcefn add(self, rhs: I32x8) -> I32x8
fn add(self, rhs: I32x8) -> I32x8
Scalar Equivalent:
I32x8::from([
self.as_array()[0].wrapping_add(rhs.as_array()[0]),
self.as_array()[1].wrapping_add(rhs.as_array()[1]),
self.as_array()[2].wrapping_add(rhs.as_array()[2]),
self.as_array()[3].wrapping_add(rhs.as_array()[3]),
self.as_array()[4].wrapping_add(rhs.as_array()[4]),
self.as_array()[5].wrapping_add(rhs.as_array()[5]),
self.as_array()[6].wrapping_add(rhs.as_array()[6]),
self.as_array()[7].wrapping_add(rhs.as_array()[7]),
])
Avx2
-
VPADDD ymm, ymm, ymm
sourceimpl AddAssign<I32x8> for I32x8
impl AddAssign<I32x8> for I32x8
sourcefn add_assign(&mut self, rhs: Self)
fn add_assign(&mut self, rhs: Self)
Performs the
+=
operation. Read moresourceimpl BitAnd<I32x8> for I32x8
impl BitAnd<I32x8> for I32x8
sourcefn bitand(self, rhs: I32x8) -> I32x8
fn bitand(self, rhs: I32x8) -> I32x8
Scalar Equivalent:
I32x8::from([
self.as_array()[0] & rhs.as_array()[0],
self.as_array()[1] & rhs.as_array()[1],
self.as_array()[2] & rhs.as_array()[2],
self.as_array()[3] & rhs.as_array()[3],
self.as_array()[4] & rhs.as_array()[4],
self.as_array()[5] & rhs.as_array()[5],
self.as_array()[6] & rhs.as_array()[6],
self.as_array()[7] & rhs.as_array()[7],
])
Avx2
-
VPAND ymm, ymm, ymm
sourceimpl BitAndAssign<I32x8> for I32x8
impl BitAndAssign<I32x8> for I32x8
sourcefn bitand_assign(&mut self, rhs: Self)
fn bitand_assign(&mut self, rhs: Self)
Performs the
&=
operation. Read moresourceimpl BitOr<I32x8> for I32x8
impl BitOr<I32x8> for I32x8
sourcefn bitor(self, rhs: I32x8) -> I32x8
fn bitor(self, rhs: I32x8) -> I32x8
Scalar Equivalent:
I32x8::from([
self.as_array()[0] | rhs.as_array()[0],
self.as_array()[1] | rhs.as_array()[1],
self.as_array()[2] | rhs.as_array()[2],
self.as_array()[3] | rhs.as_array()[3],
self.as_array()[4] | rhs.as_array()[4],
self.as_array()[5] | rhs.as_array()[5],
self.as_array()[6] | rhs.as_array()[6],
self.as_array()[7] | rhs.as_array()[7],
])
Avx2
-
VPOR ymm, ymm, ymm
sourceimpl BitOrAssign<I32x8> for I32x8
impl BitOrAssign<I32x8> for I32x8
sourcefn bitor_assign(&mut self, rhs: Self)
fn bitor_assign(&mut self, rhs: Self)
Performs the
|=
operation. Read moresourceimpl BitXor<I32x8> for I32x8
impl BitXor<I32x8> for I32x8
sourcefn bitxor(self, rhs: I32x8) -> I32x8
fn bitxor(self, rhs: I32x8) -> I32x8
Scalar Equivalent:
I32x8::from([
self.as_array()[0] ^ rhs.as_array()[0],
self.as_array()[1] ^ rhs.as_array()[1],
self.as_array()[2] ^ rhs.as_array()[2],
self.as_array()[3] ^ rhs.as_array()[3],
self.as_array()[4] ^ rhs.as_array()[4],
self.as_array()[5] ^ rhs.as_array()[5],
self.as_array()[6] ^ rhs.as_array()[6],
self.as_array()[7] ^ rhs.as_array()[7],
])
Avx2
-
VPXOR ymm, ymm, ymm
sourceimpl BitXorAssign<I32x8> for I32x8
impl BitXorAssign<I32x8> for I32x8
sourcefn bitxor_assign(&mut self, rhs: Self)
fn bitxor_assign(&mut self, rhs: Self)
Performs the
^=
operation. Read moresourceimpl ConditionallySelectable for I32x8
impl ConditionallySelectable for I32x8
sourcefn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
sourcefn conditional_assign(&mut self, other: &Self, choice: Choice)
fn conditional_assign(&mut self, other: &Self, choice: Choice)
sourceimpl ConstantTimeEq for I32x8
impl ConstantTimeEq for I32x8
sourceimpl ExtendingCast<I16x8> for I32x8
impl ExtendingCast<I16x8> for I32x8
sourcefn extending_cast_from(vector: I16x8) -> I32x8
fn extending_cast_from(vector: I16x8) -> I32x8
Scalar Equivalent:
I32x8::from([
i32::from(vector.as_array()[0]),
i32::from(vector.as_array()[1]),
i32::from(vector.as_array()[2]),
i32::from(vector.as_array()[3]),
i32::from(vector.as_array()[4]),
i32::from(vector.as_array()[5]),
i32::from(vector.as_array()[6]),
i32::from(vector.as_array()[7]),
])
Avx2
-
VPMOVSXWD ymm, xmm
sourceimpl ExtendingCast<I8x16> for I32x8
impl ExtendingCast<I8x16> for I32x8
sourcefn extending_cast_from(vector: I8x16) -> I32x8
fn extending_cast_from(vector: I8x16) -> I32x8
Scalar Equivalent:
I32x8::from([
i32::from(vector.as_array()[0]),
i32::from(vector.as_array()[1]),
i32::from(vector.as_array()[2]),
i32::from(vector.as_array()[3]),
i32::from(vector.as_array()[4]),
i32::from(vector.as_array()[5]),
i32::from(vector.as_array()[6]),
i32::from(vector.as_array()[7]),
])
Avx2
-
VPMOVSXBD ymm, xmm
sourceimpl From<[I32x4; 2]> for I32x8
impl From<[I32x4; 2]> for I32x8
sourceimpl From<I16x8> for I32x8
impl From<I16x8> for I32x8
sourcefn from(vector: I16x8) -> I32x8
fn from(vector: I16x8) -> I32x8
Scalar Equivalent:
I32x8::from([
i32::from(vector.as_array()[0]),
i32::from(vector.as_array()[1]),
i32::from(vector.as_array()[2]),
i32::from(vector.as_array()[3]),
i32::from(vector.as_array()[4]),
i32::from(vector.as_array()[5]),
i32::from(vector.as_array()[6]),
i32::from(vector.as_array()[7]),
])
Avx2
-
VPMOVSXWD ymm, xmm
sourceimpl From<I32x4> for I32x8
impl From<I32x4> for I32x8
sourcefn from(vector: I32x4) -> I32x8
fn from(vector: I32x4) -> I32x8
NOTE: this will zero the upper bits of the destination. Other intrinsics are more effcient, but leave the upper bits undefined. At present, these more effcient intrinsics are not exposed.
Scalar Equivalent:
let mut out = [0; 8];
out[0..4].copy_from_slice(&vector.as_array());
I32x8::from(out)
Avx2
sourceimpl From<I32x8> for [I32x4; 2]
impl From<I32x8> for [I32x4; 2]
sourceimpl Shl<I32x8> for I32x8
impl Shl<I32x8> for I32x8
sourceimpl Shl<u64> for I32x8
impl Shl<u64> for I32x8
sourcefn shl(self, amount: u64) -> I32x8
fn shl(self, amount: u64) -> I32x8
Scalar Equivalent:
if amount >= 32 {
I32x8::ZERO
} else {
I32x8::from([
self.as_array()[0] << amount,
self.as_array()[1] << amount,
self.as_array()[2] << amount,
self.as_array()[3] << amount,
self.as_array()[4] << amount,
self.as_array()[5] << amount,
self.as_array()[6] << amount,
self.as_array()[7] << amount,
])
}
Avx2
-
VPSLLD ymm, ymm, xmm
-
Instruction sequence.
sourceimpl ShlAssign<I32x8> for I32x8
impl ShlAssign<I32x8> for I32x8
sourcefn shl_assign(&mut self, amount: I32x8)
fn shl_assign(&mut self, amount: I32x8)
Performs the
<<=
operation. Read moresourceimpl ShlAssign<u64> for I32x8
impl ShlAssign<u64> for I32x8
sourcefn shl_assign(&mut self, amount: u64)
fn shl_assign(&mut self, amount: u64)
Performs the
<<=
operation. Read moresourceimpl Shr<I32x8> for I32x8
impl Shr<I32x8> for I32x8
sourceimpl Shr<u64> for I32x8
impl Shr<u64> for I32x8
sourcefn shr(self, amount: u64) -> I32x8
fn shr(self, amount: u64) -> I32x8
Scalar Equivalent:
if amount >= 32 {
let mut out = self.as_array();
for x in out.iter_mut() {
*x = if *x < 0 { -1 } else { 0 };
}
I32x8::from(out)
} else {
I32x8::from([
self.as_array()[0] >> amount,
self.as_array()[1] >> amount,
self.as_array()[2] >> amount,
self.as_array()[3] >> amount,
self.as_array()[4] >> amount,
self.as_array()[5] >> amount,
self.as_array()[6] >> amount,
self.as_array()[7] >> amount,
])
}
Avx2
-
VPSRAD ymm, ymm, xmm
-
Instruction sequence.
sourceimpl ShrAssign<I32x8> for I32x8
impl ShrAssign<I32x8> for I32x8
sourcefn shr_assign(&mut self, amount: I32x8)
fn shr_assign(&mut self, amount: I32x8)
Performs the
>>=
operation. Read moresourceimpl ShrAssign<u64> for I32x8
impl ShrAssign<u64> for I32x8
sourcefn shr_assign(&mut self, amount: u64)
fn shr_assign(&mut self, amount: u64)
Performs the
>>=
operation. Read moresourceimpl SimdBase for I32x8
impl SimdBase for I32x8
sourcefn set_lo(scalar: i32) -> I32x8
fn set_lo(scalar: i32) -> I32x8
Scalar Equivalent:
let mut out = [0; 8];
out[0] = scalar;
I32x8::from(out)
Avx2
-
Instruction sequence.
sourcefn broadcast_lo(vector: I32x4) -> I32x8
fn broadcast_lo(vector: I32x4) -> I32x8
sourcefn cmp_eq(&self, other: I32x8) -> I32x8
fn cmp_eq(&self, other: I32x8) -> I32x8
Scalar Equivalent:
I32x8::from([
if self.as_array()[0] == other.as_array()[0] { -1 } else { 0 },
if self.as_array()[1] == other.as_array()[1] { -1 } else { 0 },
if self.as_array()[2] == other.as_array()[2] { -1 } else { 0 },
if self.as_array()[3] == other.as_array()[3] { -1 } else { 0 },
if self.as_array()[4] == other.as_array()[4] { -1 } else { 0 },
if self.as_array()[5] == other.as_array()[5] { -1 } else { 0 },
if self.as_array()[6] == other.as_array()[6] { -1 } else { 0 },
if self.as_array()[7] == other.as_array()[7] { -1 } else { 0 },
])
Avx2
-
VPCMPEQD ymm, ymm, ymm
sourcefn and_not(&self, other: I32x8) -> I32x8
fn and_not(&self, other: I32x8) -> I32x8
Scalar Equivalent:
I32x8::from([
self.as_array()[0] & (!other.as_array()[0]),
self.as_array()[1] & (!other.as_array()[1]),
self.as_array()[2] & (!other.as_array()[2]),
self.as_array()[3] & (!other.as_array()[3]),
self.as_array()[4] & (!other.as_array()[4]),
self.as_array()[5] & (!other.as_array()[5]),
self.as_array()[6] & (!other.as_array()[6]),
self.as_array()[7] & (!other.as_array()[7]),
])
Avx2
-
VPANDN ymm, ymm, ymm
sourcefn cmp_gt(&self, other: I32x8) -> I32x8
fn cmp_gt(&self, other: I32x8) -> I32x8
Scalar Equivalent:
I32x8::from([
if self.as_array()[0] > other.as_array()[0] { -1 } else { 0 },
if self.as_array()[1] > other.as_array()[1] { -1 } else { 0 },
if self.as_array()[2] > other.as_array()[2] { -1 } else { 0 },
if self.as_array()[3] > other.as_array()[3] { -1 } else { 0 },
if self.as_array()[4] > other.as_array()[4] { -1 } else { 0 },
if self.as_array()[5] > other.as_array()[5] { -1 } else { 0 },
if self.as_array()[6] > other.as_array()[6] { -1 } else { 0 },
if self.as_array()[7] > other.as_array()[7] { -1 } else { 0 },
])
Avx2
-
VPCMPGTD ymm, ymm, ymm
sourcefn shift_left<const BITS: usize>(&self) -> I32x8
fn shift_left<const BITS: usize>(&self) -> I32x8
Scalar Equivalent:
let mut out = self.as_array();
for x in out.iter_mut() {
*x <<= BITS;
}
I32x8::from(out)
Avx2
-
VPSLLD ymm, ymm, imm8
sourcefn shift_right<const BITS: usize>(&self) -> I32x8
fn shift_right<const BITS: usize>(&self) -> I32x8
Scalar Equivalent:
let mut out = self.as_array();
for x in out.iter_mut() {
*x >>= BITS;
}
I32x8::from(out)
Avx2
-
VPSRAD ymm, ymm, imm8
sourcefn unpack_lo(&self, other: I32x8) -> I32x8
fn unpack_lo(&self, other: I32x8) -> I32x8
Scalar Equivalent:
I32x8::from([
// Lane# 0
self.as_array()[0],
other.as_array()[0],
self.as_array()[1],
other.as_array()[1],
// Lane# 1
self.as_array()[4],
other.as_array()[4],
self.as_array()[5],
other.as_array()[5],
])
Avx2
-
VPUNPCKLDQ ymm, ymm, ymm
sourcefn unpack_hi(&self, other: I32x8) -> I32x8
fn unpack_hi(&self, other: I32x8) -> I32x8
Scalar Equivalent:
I32x8::from([
// Lane# 0
self.as_array()[2],
other.as_array()[2],
self.as_array()[3],
other.as_array()[3],
// Lane# 1
self.as_array()[6],
other.as_array()[6],
self.as_array()[7],
other.as_array()[7],
])
Avx2
-
VPUNPCKHDQ ymm, ymm, ymm
sourcefn max(&self, other: I32x8) -> I32x8
fn max(&self, other: I32x8) -> I32x8
Scalar Equivalent:
I32x8::from([
self.as_array()[0].max(other.as_array()[0]),
self.as_array()[1].max(other.as_array()[1]),
self.as_array()[2].max(other.as_array()[2]),
self.as_array()[3].max(other.as_array()[3]),
self.as_array()[4].max(other.as_array()[4]),
self.as_array()[5].max(other.as_array()[5]),
self.as_array()[6].max(other.as_array()[6]),
self.as_array()[7].max(other.as_array()[7]),
])
Avx2
-
VPMAXSD ymm, ymm, ymm
sourcefn min(&self, other: I32x8) -> I32x8
fn min(&self, other: I32x8) -> I32x8
Scalar Equivalent:
I32x8::from([
self.as_array()[0].min(other.as_array()[0]),
self.as_array()[1].min(other.as_array()[1]),
self.as_array()[2].min(other.as_array()[2]),
self.as_array()[3].min(other.as_array()[3]),
self.as_array()[4].min(other.as_array()[4]),
self.as_array()[5].min(other.as_array()[5]),
self.as_array()[6].min(other.as_array()[6]),
self.as_array()[7].min(other.as_array()[7]),
])
Avx2
-
VPMINSD ymm, ymm, ymm
const ZERO: Self = _
type BroadcastLoInput = I32x4
sourceimpl SimdBase32 for I32x8
impl SimdBase32 for I32x8
sourcefn shuffle<const I3: usize, const I2: usize, const I1: usize, const I0: usize>(
&self
) -> I32x8
fn shuffle<const I3: usize, const I2: usize, const I1: usize, const I0: usize>(
&self
) -> I32x8
Scalar Equivalent:
I32x8::from([
// 128-bit Lane #0
self.as_array()[I0 + 0 * 4],
self.as_array()[I1 + 0 * 4],
self.as_array()[I2 + 0 * 4],
self.as_array()[I3 + 0 * 4],
// 128-bit Lane #1
self.as_array()[I0 + 1 * 4],
self.as_array()[I1 + 1 * 4],
self.as_array()[I2 + 1 * 4],
self.as_array()[I3 + 1 * 4],
])
Avx2
-
VPSHUFD ymm, ymm, imm8
sourceimpl SimdBase8x for I32x8
impl SimdBase8x for I32x8
sourcefn blend<const B7: bool, const B6: bool, const B5: bool, const B4: bool, const B3: bool, const B2: bool, const B1: bool, const B0: bool>(
&self,
if_true: I32x8
) -> I32x8
fn blend<const B7: bool, const B6: bool, const B5: bool, const B4: bool, const B3: bool, const B2: bool, const B1: bool, const B0: bool>(
&self,
if_true: I32x8
) -> I32x8
Scalar Equivalent:
I32x8::from([
(if B0 { if_true } else { *self }).as_array()[0],
(if B1 { if_true } else { *self }).as_array()[1],
(if B2 { if_true } else { *self }).as_array()[2],
(if B3 { if_true } else { *self }).as_array()[3],
(if B4 { if_true } else { *self }).as_array()[4],
(if B5 { if_true } else { *self }).as_array()[5],
(if B6 { if_true } else { *self }).as_array()[6],
(if B7 { if_true } else { *self }).as_array()[7],
])
Avx2
-
VPBLENDD ymm, ymm, ymm, imm8
sourceimpl SimdBaseGatherable<I32x8> for I32x8
impl SimdBaseGatherable<I32x8> for I32x8
Safety
base
does not need to be aligned. Forall i
, base + indices[i]
must meet
the safety requirements of std::ptr::read_unaligned
sourceunsafe fn gather(base: *const i32, indices: I32x8) -> I32x8
unsafe fn gather(base: *const i32, indices: I32x8) -> I32x8
Scalar Equivalent:
I32x8::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
base.offset(indices.as_array()[4] as isize).read_unaligned(),
base.offset(indices.as_array()[5] as isize).read_unaligned(),
base.offset(indices.as_array()[6] as isize).read_unaligned(),
base.offset(indices.as_array()[7] as isize).read_unaligned(),
])
Avx2
-
VPGATHERDD ymm, vm32x, ymm
sourceunsafe fn gather_masked(
base: *const i32,
indices: I32x8,
mask: I32x8,
src: I32x8
) -> I32x8
unsafe fn gather_masked(
base: *const i32,
indices: I32x8,
mask: I32x8,
src: I32x8
) -> I32x8
Scalar Equivalent:
I32x8::from([
if ((mask.as_array()[0] as u32) >> 31) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if ((mask.as_array()[1] as u32) >> 31) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if ((mask.as_array()[2] as u32) >> 31) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if ((mask.as_array()[3] as u32) >> 31) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
if ((mask.as_array()[4] as u32) >> 31) == 1 {
base.offset(indices.as_array()[4] as isize).read_unaligned()
} else {
src.as_array()[4]
},
if ((mask.as_array()[5] as u32) >> 31) == 1 {
base.offset(indices.as_array()[5] as isize).read_unaligned()
} else {
src.as_array()[5]
},
if ((mask.as_array()[6] as u32) >> 31) == 1 {
base.offset(indices.as_array()[6] as isize).read_unaligned()
} else {
src.as_array()[6]
},
if ((mask.as_array()[7] as u32) >> 31) == 1 {
base.offset(indices.as_array()[7] as isize).read_unaligned()
} else {
src.as_array()[7]
},
])
Avx2
-
VPGATHERDD ymm, vm32x, ymm
sourceimpl SimdBaseGatherable<I32x8> for U32x8
impl SimdBaseGatherable<I32x8> for U32x8
Safety
base
does not need to be aligned. Forall i
, base + indices[i]
must meet
the safety requirements of std::ptr::read_unaligned
sourceunsafe fn gather(base: *const u32, indices: I32x8) -> U32x8
unsafe fn gather(base: *const u32, indices: I32x8) -> U32x8
Scalar Equivalent:
U32x8::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
base.offset(indices.as_array()[4] as isize).read_unaligned(),
base.offset(indices.as_array()[5] as isize).read_unaligned(),
base.offset(indices.as_array()[6] as isize).read_unaligned(),
base.offset(indices.as_array()[7] as isize).read_unaligned(),
])
Avx2
-
VPGATHERDD ymm, vm32x, ymm
sourceunsafe fn gather_masked(
base: *const u32,
indices: I32x8,
mask: U32x8,
src: U32x8
) -> U32x8
unsafe fn gather_masked(
base: *const u32,
indices: I32x8,
mask: U32x8,
src: U32x8
) -> U32x8
Scalar Equivalent:
U32x8::from([
if (mask.as_array()[0] >> 31) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if (mask.as_array()[1] >> 31) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if (mask.as_array()[2] >> 31) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if (mask.as_array()[3] >> 31) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
if (mask.as_array()[4] >> 31) == 1 {
base.offset(indices.as_array()[4] as isize).read_unaligned()
} else {
src.as_array()[4]
},
if (mask.as_array()[5] >> 31) == 1 {
base.offset(indices.as_array()[5] as isize).read_unaligned()
} else {
src.as_array()[5]
},
if (mask.as_array()[6] >> 31) == 1 {
base.offset(indices.as_array()[6] as isize).read_unaligned()
} else {
src.as_array()[6]
},
if (mask.as_array()[7] >> 31) == 1 {
base.offset(indices.as_array()[7] as isize).read_unaligned()
} else {
src.as_array()[7]
},
])
Avx2
-
VPGATHERDD ymm, vm32x, ymm
sourceimpl Sub<I32x8> for I32x8
impl Sub<I32x8> for I32x8
sourcefn sub(self, rhs: I32x8) -> I32x8
fn sub(self, rhs: I32x8) -> I32x8
Scalar Equivalent:
I32x8::from([
self.as_array()[0].wrapping_sub(rhs.as_array()[0]),
self.as_array()[1].wrapping_sub(rhs.as_array()[1]),
self.as_array()[2].wrapping_sub(rhs.as_array()[2]),
self.as_array()[3].wrapping_sub(rhs.as_array()[3]),
self.as_array()[4].wrapping_sub(rhs.as_array()[4]),
self.as_array()[5].wrapping_sub(rhs.as_array()[5]),
self.as_array()[6].wrapping_sub(rhs.as_array()[6]),
self.as_array()[7].wrapping_sub(rhs.as_array()[7]),
])
Avx2
-
VPSUBD ymm, ymm, ymm
sourceimpl SubAssign<I32x8> for I32x8
impl SubAssign<I32x8> for I32x8
sourcefn sub_assign(&mut self, rhs: Self)
fn sub_assign(&mut self, rhs: Self)
Performs the
-=
operation. Read moreimpl Copy for I32x8
impl Eq for I32x8
impl Pod for I32x8
Auto Trait Implementations
impl RefUnwindSafe for I32x8
impl Send for I32x8
impl Sync for I32x8
impl Unpin for I32x8
impl UnwindSafe for I32x8
Blanket Implementations
sourceimpl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
const: unstable · sourcefn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
type Bits = T
type Bits = T
Self
must have the same layout as the specified Bits
except for
the possible invalid bit patterns being checked during
is_valid_bit_pattern
. Read more