Struct vectoreyes::I64x4
source · [−]#[repr(transparent)]pub struct I64x4(_);
Expand description
[i64; 4]
as a vector.
Implementations
Trait Implementations
sourceimpl Add<I64x4> for I64x4
impl Add<I64x4> for I64x4
sourcefn add(self, rhs: I64x4) -> I64x4
fn add(self, rhs: I64x4) -> I64x4
Scalar Equivalent:
I64x4::from([
self.as_array()[0].wrapping_add(rhs.as_array()[0]),
self.as_array()[1].wrapping_add(rhs.as_array()[1]),
self.as_array()[2].wrapping_add(rhs.as_array()[2]),
self.as_array()[3].wrapping_add(rhs.as_array()[3]),
])
Avx2
-
VPADDQ ymm, ymm, ymm
sourceimpl AddAssign<I64x4> for I64x4
impl AddAssign<I64x4> for I64x4
sourcefn add_assign(&mut self, rhs: Self)
fn add_assign(&mut self, rhs: Self)
+=
operation. Read moresourceimpl BitAnd<I64x4> for I64x4
impl BitAnd<I64x4> for I64x4
sourceimpl BitAndAssign<I64x4> for I64x4
impl BitAndAssign<I64x4> for I64x4
sourcefn bitand_assign(&mut self, rhs: Self)
fn bitand_assign(&mut self, rhs: Self)
&=
operation. Read moresourceimpl BitOr<I64x4> for I64x4
impl BitOr<I64x4> for I64x4
sourceimpl BitOrAssign<I64x4> for I64x4
impl BitOrAssign<I64x4> for I64x4
sourcefn bitor_assign(&mut self, rhs: Self)
fn bitor_assign(&mut self, rhs: Self)
|=
operation. Read moresourceimpl BitXor<I64x4> for I64x4
impl BitXor<I64x4> for I64x4
sourceimpl BitXorAssign<I64x4> for I64x4
impl BitXorAssign<I64x4> for I64x4
sourcefn bitxor_assign(&mut self, rhs: Self)
fn bitxor_assign(&mut self, rhs: Self)
^=
operation. Read moresourceimpl ConditionallySelectable for I64x4
impl ConditionallySelectable for I64x4
sourcefn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
sourcefn conditional_assign(&mut self, other: &Self, choice: Choice)
fn conditional_assign(&mut self, other: &Self, choice: Choice)
sourceimpl ConstantTimeEq for I64x4
impl ConstantTimeEq for I64x4
sourceimpl ExtendingCast<I16x8> for I64x4
impl ExtendingCast<I16x8> for I64x4
sourcefn extending_cast_from(vector: I16x8) -> I64x4
fn extending_cast_from(vector: I16x8) -> I64x4
Scalar Equivalent:
I64x4::from([
i64::from(vector.as_array()[0]),
i64::from(vector.as_array()[1]),
i64::from(vector.as_array()[2]),
i64::from(vector.as_array()[3]),
])
Avx2
-
VPMOVSXWQ ymm, xmm
sourceimpl ExtendingCast<I32x4> for I64x4
impl ExtendingCast<I32x4> for I64x4
sourcefn extending_cast_from(vector: I32x4) -> I64x4
fn extending_cast_from(vector: I32x4) -> I64x4
Scalar Equivalent:
I64x4::from([
i64::from(vector.as_array()[0]),
i64::from(vector.as_array()[1]),
i64::from(vector.as_array()[2]),
i64::from(vector.as_array()[3]),
])
Avx2
-
VPMOVSXDQ ymm, xmm
sourceimpl ExtendingCast<I8x16> for I64x4
impl ExtendingCast<I8x16> for I64x4
sourcefn extending_cast_from(vector: I8x16) -> I64x4
fn extending_cast_from(vector: I8x16) -> I64x4
Scalar Equivalent:
I64x4::from([
i64::from(vector.as_array()[0]),
i64::from(vector.as_array()[1]),
i64::from(vector.as_array()[2]),
i64::from(vector.as_array()[3]),
])
Avx2
-
VPMOVSXBQ ymm, xmm
sourceimpl From<[I64x2; 2]> for I64x4
impl From<[I64x2; 2]> for I64x4
sourceimpl From<I32x4> for I64x4
impl From<I32x4> for I64x4
sourceimpl From<I64x2> for I64x4
impl From<I64x2> for I64x4
sourcefn from(vector: I64x2) -> I64x4
fn from(vector: I64x2) -> I64x4
NOTE: this will zero the upper bits of the destination. Other intrinsics are more effcient, but leave the upper bits undefined. At present, these more effcient intrinsics are not exposed.
Scalar Equivalent:
let mut out = [0; 4];
out[0..2].copy_from_slice(&vector.as_array());
I64x4::from(out)
Avx2
sourceimpl From<I64x4> for [I64x2; 2]
impl From<I64x4> for [I64x2; 2]
sourceimpl Shl<I64x4> for I64x4
impl Shl<I64x4> for I64x4
sourcefn shl(self, amount: I64x4) -> I64x4
fn shl(self, amount: I64x4) -> I64x4
Scalar Equivalent:
let mut out = self.as_array();
for (x, amm) in out.iter_mut().zip(amount.as_array().iter().copied()) {
*x = if amm >= 64 || amm < 0 {
0
} else {
*x << amm
};
}
I64x4::from(out)
Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourceimpl Shl<u64> for I64x4
impl Shl<u64> for I64x4
sourcefn shl(self, amount: u64) -> I64x4
fn shl(self, amount: u64) -> I64x4
Scalar Equivalent:
if amount >= 64 {
I64x4::ZERO
} else {
I64x4::from([
self.as_array()[0] << amount,
self.as_array()[1] << amount,
self.as_array()[2] << amount,
self.as_array()[3] << amount,
])
}
Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourceimpl ShlAssign<I64x4> for I64x4
impl ShlAssign<I64x4> for I64x4
sourcefn shl_assign(&mut self, amount: I64x4)
fn shl_assign(&mut self, amount: I64x4)
<<=
operation. Read moresourceimpl ShlAssign<u64> for I64x4
impl ShlAssign<u64> for I64x4
sourcefn shl_assign(&mut self, amount: u64)
fn shl_assign(&mut self, amount: u64)
<<=
operation. Read moresourceimpl Shr<I64x4> for I64x4
impl Shr<I64x4> for I64x4
sourcefn shr(self, amount: I64x4) -> I64x4
fn shr(self, amount: I64x4) -> I64x4
Scalar Equivalent:
let mut out = self.as_array();
for (x, amm) in out.iter_mut().zip(amount.as_array().iter().copied()) {
*x = if amm >= 64 || amm < 0 {
if *x < 0 { -1 } else { 0 }
} else {
*x >> amm
};
}
I64x4::from(out)
Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourceimpl Shr<u64> for I64x4
impl Shr<u64> for I64x4
sourcefn shr(self, amount: u64) -> I64x4
fn shr(self, amount: u64) -> I64x4
Scalar Equivalent:
if amount >= 64 {
let mut out = self.as_array();
for x in out.iter_mut() {
*x = if *x < 0 { -1 } else { 0 };
}
I64x4::from(out)
} else {
I64x4::from([
self.as_array()[0] >> amount,
self.as_array()[1] >> amount,
self.as_array()[2] >> amount,
self.as_array()[3] >> amount,
])
}
Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourceimpl ShrAssign<I64x4> for I64x4
impl ShrAssign<I64x4> for I64x4
sourcefn shr_assign(&mut self, amount: I64x4)
fn shr_assign(&mut self, amount: I64x4)
>>=
operation. Read moresourceimpl ShrAssign<u64> for I64x4
impl ShrAssign<u64> for I64x4
sourcefn shr_assign(&mut self, amount: u64)
fn shr_assign(&mut self, amount: u64)
>>=
operation. Read moresourceimpl SimdBase for I64x4
impl SimdBase for I64x4
sourcefn set_lo(scalar: i64) -> I64x4
fn set_lo(scalar: i64) -> I64x4
Scalar Equivalent:
let mut out = [0; 4];
out[0] = scalar;
I64x4::from(out)
Avx2
-
Instruction sequence.
sourcefn broadcast_lo(vector: I64x2) -> I64x4
fn broadcast_lo(vector: I64x2) -> I64x4
sourcefn cmp_eq(&self, other: I64x4) -> I64x4
fn cmp_eq(&self, other: I64x4) -> I64x4
Scalar Equivalent:
I64x4::from([
if self.as_array()[0] == other.as_array()[0] { -1 } else { 0 },
if self.as_array()[1] == other.as_array()[1] { -1 } else { 0 },
if self.as_array()[2] == other.as_array()[2] { -1 } else { 0 },
if self.as_array()[3] == other.as_array()[3] { -1 } else { 0 },
])
Avx2
-
VPCMPEQQ ymm, ymm, ymm
sourcefn and_not(&self, other: I64x4) -> I64x4
fn and_not(&self, other: I64x4) -> I64x4
Scalar Equivalent:
I64x4::from([
self.as_array()[0] & (!other.as_array()[0]),
self.as_array()[1] & (!other.as_array()[1]),
self.as_array()[2] & (!other.as_array()[2]),
self.as_array()[3] & (!other.as_array()[3]),
])
Avx2
-
VPANDN ymm, ymm, ymm
sourcefn cmp_gt(&self, other: I64x4) -> I64x4
fn cmp_gt(&self, other: I64x4) -> I64x4
Scalar Equivalent:
I64x4::from([
if self.as_array()[0] > other.as_array()[0] { -1 } else { 0 },
if self.as_array()[1] > other.as_array()[1] { -1 } else { 0 },
if self.as_array()[2] > other.as_array()[2] { -1 } else { 0 },
if self.as_array()[3] > other.as_array()[3] { -1 } else { 0 },
])
Avx2
-
VPCMPGTQ ymm, ymm, ymm
sourcefn shift_left<const BITS: usize>(&self) -> I64x4
fn shift_left<const BITS: usize>(&self) -> I64x4
Scalar Equivalent:
let mut out = self.as_array();
for x in out.iter_mut() {
*x <<= BITS;
}
I64x4::from(out)
Avx2
-
VPSLLQ ymm, ymm, imm8
sourcefn shift_right<const BITS: usize>(&self) -> I64x4
fn shift_right<const BITS: usize>(&self) -> I64x4
Scalar Equivalent:
let mut out = self.as_array();
for x in out.iter_mut() {
*x >>= BITS;
}
I64x4::from(out)
Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourcefn unpack_lo(&self, other: I64x4) -> I64x4
fn unpack_lo(&self, other: I64x4) -> I64x4
Scalar Equivalent:
I64x4::from([
// Lane# 0
self.as_array()[0],
other.as_array()[0],
// Lane# 1
self.as_array()[2],
other.as_array()[2],
])
Avx2
-
VPUNPCKLQDQ ymm, ymm, ymm
sourcefn unpack_hi(&self, other: I64x4) -> I64x4
fn unpack_hi(&self, other: I64x4) -> I64x4
Scalar Equivalent:
I64x4::from([
// Lane# 0
self.as_array()[1],
other.as_array()[1],
// Lane# 1
self.as_array()[3],
other.as_array()[3],
])
Avx2
-
VPUNPCKHQDQ ymm, ymm, ymm
sourcefn max(&self, other: I64x4) -> I64x4
fn max(&self, other: I64x4) -> I64x4
Scalar Equivalent:
I64x4::from([
self.as_array()[0].max(other.as_array()[0]),
self.as_array()[1].max(other.as_array()[1]),
self.as_array()[2].max(other.as_array()[2]),
self.as_array()[3].max(other.as_array()[3]),
])
Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourcefn min(&self, other: I64x4) -> I64x4
fn min(&self, other: I64x4) -> I64x4
Scalar Equivalent:
I64x4::from([
self.as_array()[0].min(other.as_array()[0]),
self.as_array()[1].min(other.as_array()[1]),
self.as_array()[2].min(other.as_array()[2]),
self.as_array()[3].min(other.as_array()[3]),
])
Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
const ZERO: Self = _
type BroadcastLoInput = I64x2
sourceimpl SimdBase4x for I64x4
impl SimdBase4x for I64x4
sourcefn blend<const B3: bool, const B2: bool, const B1: bool, const B0: bool>(
&self,
if_true: I64x4
) -> I64x4
fn blend<const B3: bool, const B2: bool, const B1: bool, const B0: bool>(
&self,
if_true: I64x4
) -> I64x4
Scalar Equivalent:
I64x4::from([
(if B0 { if_true } else { *self }).as_array()[0],
(if B1 { if_true } else { *self }).as_array()[1],
(if B2 { if_true } else { *self }).as_array()[2],
(if B3 { if_true } else { *self }).as_array()[3],
])
Avx2
-
VPBLENDD ymm, ymm, ymm, imm8
sourceimpl SimdBase4x64 for I64x4
impl SimdBase4x64 for I64x4
sourceimpl SimdBase64 for I64x4
impl SimdBase64 for I64x4
sourcefn mul_lo(&self, other: I64x4) -> I64x4
fn mul_lo(&self, other: I64x4) -> I64x4
Scalar Equivalent:
I64x4::from([
((self.as_array()[0] as i32) as i64) * ((other.as_array()[0] as i32) as i64),
((self.as_array()[1] as i32) as i64) * ((other.as_array()[1] as i32) as i64),
((self.as_array()[2] as i32) as i64) * ((other.as_array()[2] as i32) as i64),
((self.as_array()[3] as i32) as i64) * ((other.as_array()[3] as i32) as i64),
])
Avx2
-
VPMULDQ ymm, ymm, ymm
sourceimpl SimdBaseGatherable<I32x4> for I64x4
impl SimdBaseGatherable<I32x4> for I64x4
Safety
base
does not need to be aligned. Forall i
, base + indices[i]
must meet
the safety requirements of std::ptr::read_unaligned
sourceunsafe fn gather(base: *const i64, indices: I32x4) -> I64x4
unsafe fn gather(base: *const i64, indices: I32x4) -> I64x4
Scalar Equivalent:
I64x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])
Avx2
-
VPGATHERDQ ymm, vm32x, ymm
sourceunsafe fn gather_masked(
base: *const i64,
indices: I32x4,
mask: I64x4,
src: I64x4
) -> I64x4
unsafe fn gather_masked(
base: *const i64,
indices: I32x4,
mask: I64x4,
src: I64x4
) -> I64x4
Scalar Equivalent:
I64x4::from([
if ((mask.as_array()[0] as u64) >> 63) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if ((mask.as_array()[1] as u64) >> 63) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if ((mask.as_array()[2] as u64) >> 63) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if ((mask.as_array()[3] as u64) >> 63) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])
Avx2
-
VPGATHERDQ ymm, vm32x, ymm
sourceimpl SimdBaseGatherable<I64x4> for I32x4
impl SimdBaseGatherable<I64x4> for I32x4
Safety
base
does not need to be aligned. Forall i
, base + indices[i]
must meet
the safety requirements of std::ptr::read_unaligned
sourceunsafe fn gather(base: *const i32, indices: I64x4) -> I32x4
unsafe fn gather(base: *const i32, indices: I64x4) -> I32x4
Scalar Equivalent:
I32x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])
Avx2
-
VPGATHERQD xmm, vm64y, xmm
sourceunsafe fn gather_masked(
base: *const i32,
indices: I64x4,
mask: I32x4,
src: I32x4
) -> I32x4
unsafe fn gather_masked(
base: *const i32,
indices: I64x4,
mask: I32x4,
src: I32x4
) -> I32x4
Scalar Equivalent:
I32x4::from([
if ((mask.as_array()[0] as u32) >> 31) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if ((mask.as_array()[1] as u32) >> 31) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if ((mask.as_array()[2] as u32) >> 31) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if ((mask.as_array()[3] as u32) >> 31) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])
Avx2
-
VPGATHERQD xmm, vm64y, xmm
sourceimpl SimdBaseGatherable<I64x4> for I64x4
impl SimdBaseGatherable<I64x4> for I64x4
Safety
base
does not need to be aligned. Forall i
, base + indices[i]
must meet
the safety requirements of std::ptr::read_unaligned
sourceunsafe fn gather(base: *const i64, indices: I64x4) -> I64x4
unsafe fn gather(base: *const i64, indices: I64x4) -> I64x4
Scalar Equivalent:
I64x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])
Avx2
-
VPGATHERQQ ymm, vm64x, ymm
sourceunsafe fn gather_masked(
base: *const i64,
indices: I64x4,
mask: I64x4,
src: I64x4
) -> I64x4
unsafe fn gather_masked(
base: *const i64,
indices: I64x4,
mask: I64x4,
src: I64x4
) -> I64x4
Scalar Equivalent:
I64x4::from([
if ((mask.as_array()[0] as u64) >> 63) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if ((mask.as_array()[1] as u64) >> 63) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if ((mask.as_array()[2] as u64) >> 63) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if ((mask.as_array()[3] as u64) >> 63) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])
Avx2
-
VPGATHERQQ ymm, vm64x, ymm
sourceimpl SimdBaseGatherable<I64x4> for U32x4
impl SimdBaseGatherable<I64x4> for U32x4
Safety
base
does not need to be aligned. Forall i
, base + indices[i]
must meet
the safety requirements of std::ptr::read_unaligned
sourceunsafe fn gather(base: *const u32, indices: I64x4) -> U32x4
unsafe fn gather(base: *const u32, indices: I64x4) -> U32x4
Scalar Equivalent:
U32x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])
Avx2
-
VPGATHERQD xmm, vm64y, xmm
sourceunsafe fn gather_masked(
base: *const u32,
indices: I64x4,
mask: U32x4,
src: U32x4
) -> U32x4
unsafe fn gather_masked(
base: *const u32,
indices: I64x4,
mask: U32x4,
src: U32x4
) -> U32x4
Scalar Equivalent:
U32x4::from([
if (mask.as_array()[0] >> 31) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if (mask.as_array()[1] >> 31) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if (mask.as_array()[2] >> 31) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if (mask.as_array()[3] >> 31) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])
Avx2
-
VPGATHERQD xmm, vm64y, xmm
sourceimpl SimdBaseGatherable<I64x4> for U64x4
impl SimdBaseGatherable<I64x4> for U64x4
Safety
base
does not need to be aligned. Forall i
, base + indices[i]
must meet
the safety requirements of std::ptr::read_unaligned
sourceunsafe fn gather(base: *const u64, indices: I64x4) -> U64x4
unsafe fn gather(base: *const u64, indices: I64x4) -> U64x4
Scalar Equivalent:
U64x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])
Avx2
-
VPGATHERQQ ymm, vm64x, ymm
sourceunsafe fn gather_masked(
base: *const u64,
indices: I64x4,
mask: U64x4,
src: U64x4
) -> U64x4
unsafe fn gather_masked(
base: *const u64,
indices: I64x4,
mask: U64x4,
src: U64x4
) -> U64x4
Scalar Equivalent:
U64x4::from([
if (mask.as_array()[0] >> 63) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if (mask.as_array()[1] >> 63) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if (mask.as_array()[2] >> 63) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if (mask.as_array()[3] >> 63) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])
Avx2
-
VPGATHERQQ ymm, vm64x, ymm
sourceimpl SimdBaseGatherable<U64x4> for I64x4
impl SimdBaseGatherable<U64x4> for I64x4
Safety
base
does not need to be aligned. Forall i
, base + indices[i]
must meet
the safety requirements of std::ptr::read_unaligned
sourceunsafe fn gather(base: *const i64, indices: U64x4) -> I64x4
unsafe fn gather(base: *const i64, indices: U64x4) -> I64x4
Scalar Equivalent:
I64x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])
Avx2
-
VPGATHERQQ ymm, vm64x, ymm
sourceunsafe fn gather_masked(
base: *const i64,
indices: U64x4,
mask: I64x4,
src: I64x4
) -> I64x4
unsafe fn gather_masked(
base: *const i64,
indices: U64x4,
mask: I64x4,
src: I64x4
) -> I64x4
Scalar Equivalent:
I64x4::from([
if ((mask.as_array()[0] as u64) >> 63) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if ((mask.as_array()[1] as u64) >> 63) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if ((mask.as_array()[2] as u64) >> 63) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if ((mask.as_array()[3] as u64) >> 63) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])
Avx2
-
VPGATHERQQ ymm, vm64x, ymm
sourceimpl Sub<I64x4> for I64x4
impl Sub<I64x4> for I64x4
sourcefn sub(self, rhs: I64x4) -> I64x4
fn sub(self, rhs: I64x4) -> I64x4
Scalar Equivalent:
I64x4::from([
self.as_array()[0].wrapping_sub(rhs.as_array()[0]),
self.as_array()[1].wrapping_sub(rhs.as_array()[1]),
self.as_array()[2].wrapping_sub(rhs.as_array()[2]),
self.as_array()[3].wrapping_sub(rhs.as_array()[3]),
])
Avx2
-
VPSUBQ ymm, ymm, ymm
sourceimpl SubAssign<I64x4> for I64x4
impl SubAssign<I64x4> for I64x4
sourcefn sub_assign(&mut self, rhs: Self)
fn sub_assign(&mut self, rhs: Self)
-=
operation. Read moreimpl Copy for I64x4
impl Eq for I64x4
impl Pod for I64x4
Auto Trait Implementations
impl RefUnwindSafe for I64x4
impl Send for I64x4
impl Sync for I64x4
impl Unpin for I64x4
impl UnwindSafe for I64x4
Blanket Implementations
sourceimpl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
const: unstable · sourcefn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
type Bits = T
type Bits = T
Self
must have the same layout as the specified Bits
except for
the possible invalid bit patterns being checked during
is_valid_bit_pattern
. Read more