pub struct I64x4(/* private fields */);Expand description
[i64; 4] as a vector.
Implementations§
Source§impl I64x4
impl I64x4
Sourcepub const fn from_array(arr: [i64; 4]) -> Self
pub const fn from_array(arr: [i64; 4]) -> Self
Create a vector from an array.
Unlike the From trait function, the from_array function is const.
§Example
const MY_EXTREMELY_FUN_VALUE: I64x4 = I64x4::from_array([0, 1, 2, 3]);
for (i, value) in MY_EXTREMELY_FUN_VALUE.as_array().iter().copied().enumerate() {
assert_eq!(i as i64, value);
}Trait Implementations§
Source§impl Add for I64x4
impl Add for I64x4
Source§fn add(self, rhs: I64x4) -> I64x4
fn add(self, rhs: I64x4) -> I64x4
Perform a pairwise wrapping_add
§Scalar Equivalent
I64x4::from([
self.as_array()[0].wrapping_add(rhs.as_array()[0]),
self.as_array()[1].wrapping_add(rhs.as_array()[1]),
self.as_array()[2].wrapping_add(rhs.as_array()[2]),
self.as_array()[3].wrapping_add(rhs.as_array()[3]),
])§AVX2 Intrinsics Used
_mm256_add_epi64VPADDQ ymm, ymm, ymm
§Neon Intrinsics Used
vaddq_s64- This intrinsic compiles to the following instructions:
Source§impl AddAssign for I64x4
impl AddAssign for I64x4
Source§fn add_assign(&mut self, other: I64x4)
fn add_assign(&mut self, other: I64x4)
+= operation. Read moreSource§impl BitAnd for I64x4
impl BitAnd for I64x4
Source§fn bitand(self, rhs: I64x4) -> I64x4
fn bitand(self, rhs: I64x4) -> I64x4
Perform a pairwise bitwise and
§Scalar Equivalent
I64x4::from([
self.as_array()[0] & rhs.as_array()[0],
self.as_array()[1] & rhs.as_array()[1],
self.as_array()[2] & rhs.as_array()[2],
self.as_array()[3] & rhs.as_array()[3],
])§AVX2 Intrinsics Used
_mm256_and_si256VPAND ymm, ymm, ymm
§Neon Intrinsics Used
vandq_s64- This intrinsic compiles to the following instructions:
Source§impl BitAndAssign for I64x4
impl BitAndAssign for I64x4
Source§fn bitand_assign(&mut self, other: I64x4)
fn bitand_assign(&mut self, other: I64x4)
&= operation. Read moreSource§impl BitOr for I64x4
impl BitOr for I64x4
Source§fn bitor(self, rhs: I64x4) -> I64x4
fn bitor(self, rhs: I64x4) -> I64x4
Perform a pairwise bitwise or
§Scalar Equivalent
I64x4::from([
self.as_array()[0] | rhs.as_array()[0],
self.as_array()[1] | rhs.as_array()[1],
self.as_array()[2] | rhs.as_array()[2],
self.as_array()[3] | rhs.as_array()[3],
])§AVX2 Intrinsics Used
_mm256_or_si256VPOR ymm, ymm, ymm
§Neon Intrinsics Used
vorrq_s64- This intrinsic compiles to the following instructions:
Source§impl BitOrAssign for I64x4
impl BitOrAssign for I64x4
Source§fn bitor_assign(&mut self, other: I64x4)
fn bitor_assign(&mut self, other: I64x4)
|= operation. Read moreSource§impl BitXor for I64x4
impl BitXor for I64x4
Source§fn bitxor(self, rhs: I64x4) -> I64x4
fn bitxor(self, rhs: I64x4) -> I64x4
Perform a pairwise bitwise xor
§Scalar Equivalent
I64x4::from([
self.as_array()[0] ^ rhs.as_array()[0],
self.as_array()[1] ^ rhs.as_array()[1],
self.as_array()[2] ^ rhs.as_array()[2],
self.as_array()[3] ^ rhs.as_array()[3],
])§AVX2 Intrinsics Used
_mm256_xor_si256VPXOR ymm, ymm, ymm
§Neon Intrinsics Used
veorq_s64- This intrinsic compiles to the following instructions:
Source§impl BitXorAssign for I64x4
impl BitXorAssign for I64x4
Source§fn bitxor_assign(&mut self, other: I64x4)
fn bitxor_assign(&mut self, other: I64x4)
^= operation. Read moreSource§impl ConditionallySelectable for I64x4
impl ConditionallySelectable for I64x4
Source§fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
Source§fn conditional_assign(&mut self, other: &Self, choice: Choice)
fn conditional_assign(&mut self, other: &Self, choice: Choice)
Source§fn conditional_swap(a: &mut Self, b: &mut Self, choice: Choice)
fn conditional_swap(a: &mut Self, b: &mut Self, choice: Choice)
self and other if choice == 1; otherwise,
reassign both unto themselves. Read moreSource§impl ConstantTimeEq for I64x4
impl ConstantTimeEq for I64x4
Source§impl<'de> Deserialize<'de> for I64x4
impl<'de> Deserialize<'de> for I64x4
Source§fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>where
D: Deserializer<'de>,
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>where
D: Deserializer<'de>,
Source§impl Distribution<I64x4> for Standard
impl Distribution<I64x4> for Standard
Source§impl ExtendingCast<I16x8> for I64x4
impl ExtendingCast<I16x8> for I64x4
Source§impl ExtendingCast<I32x4> for I64x4
impl ExtendingCast<I32x4> for I64x4
Source§impl ExtendingCast<I8x16> for I64x4
impl ExtendingCast<I8x16> for I64x4
Source§impl From<I64x2> for I64x4
impl From<I64x2> for I64x4
Source§fn from(vector: I64x2) -> I64x4
fn from(vector: I64x2) -> I64x4
Source§impl ShlAssign<u64> for I64x4
impl ShlAssign<u64> for I64x4
Source§fn shl_assign(&mut self, amount: u64)
fn shl_assign(&mut self, amount: u64)
<<= operation. Read moreSource§impl ShlAssign for I64x4
impl ShlAssign for I64x4
Source§fn shl_assign(&mut self, amount: I64x4)
fn shl_assign(&mut self, amount: I64x4)
<<= operation. Read moreSource§impl Shr<u64> for I64x4
impl Shr<u64> for I64x4
Source§fn shr(self, amount: u64) -> I64x4
fn shr(self, amount: u64) -> I64x4
§Scalar Equivalent:
if amount >= 64 {
let mut out = self.as_array();
for x in out.iter_mut() {
*x = if *x < 0 { -1 } else { 0 };
}
I64x4::from(out)
} else {
I64x4::from([
self.as_array()[0] >> amount,
self.as_array()[1] >> amount,
self.as_array()[2] >> amount,
self.as_array()[3] >> amount,
])
}§Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
Source§impl Shr for I64x4
impl Shr for I64x4
Source§fn shr(self, amount: I64x4) -> I64x4
fn shr(self, amount: I64x4) -> I64x4
§Scalar Equivalent:
let mut out = self.as_array();
for (x, amm) in out.iter_mut().zip(amount.as_array().iter().copied()) {
*x = if (0..64).contains(&amm) {
*x >> amm
} else if *x < 0 {
-1
} else {
0
};
}
I64x4::from(out)§Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
Source§impl ShrAssign<u64> for I64x4
impl ShrAssign<u64> for I64x4
Source§fn shr_assign(&mut self, amount: u64)
fn shr_assign(&mut self, amount: u64)
>>= operation. Read moreSource§impl ShrAssign for I64x4
impl ShrAssign for I64x4
Source§fn shr_assign(&mut self, amount: I64x4)
fn shr_assign(&mut self, amount: I64x4)
>>= operation. Read moreSource§impl SimdBase for I64x4
impl SimdBase for I64x4
Source§fn is_zero(&self) -> bool
fn is_zero(&self) -> bool
Source§fn set_lo(scalar: i64) -> I64x4
fn set_lo(scalar: i64) -> I64x4
Source§fn broadcast_lo(vector: I64x2) -> I64x4
fn broadcast_lo(vector: I64x2) -> I64x4
Source§fn cmp_eq(&self, other: I64x4) -> I64x4
fn cmp_eq(&self, other: I64x4) -> I64x4
§Scalar Equivalent:
I64x4::from([
if self.as_array()[0] == other.as_array()[0] { -1 } else { 0 },
if self.as_array()[1] == other.as_array()[1] { -1 } else { 0 },
if self.as_array()[2] == other.as_array()[2] { -1 } else { 0 },
if self.as_array()[3] == other.as_array()[3] { -1 } else { 0 },
])§Avx2
-
VPCMPEQQ ymm, ymm, ymm
Source§fn and_not(&self, other: I64x4) -> I64x4
fn and_not(&self, other: I64x4) -> I64x4
Source§fn cmp_gt(&self, other: I64x4) -> I64x4
fn cmp_gt(&self, other: I64x4) -> I64x4
§Scalar Equivalent:
I64x4::from([
if self.as_array()[0] > other.as_array()[0] { -1 } else { 0 },
if self.as_array()[1] > other.as_array()[1] { -1 } else { 0 },
if self.as_array()[2] > other.as_array()[2] { -1 } else { 0 },
if self.as_array()[3] > other.as_array()[3] { -1 } else { 0 },
])§Avx2
-
VPCMPGTQ ymm, ymm, ymm
Source§fn shift_left<const BITS: usize>(&self) -> I64x4
fn shift_left<const BITS: usize>(&self) -> I64x4
Source§fn shift_right<const BITS: usize>(&self) -> I64x4
fn shift_right<const BITS: usize>(&self) -> I64x4
Source§fn unpack_lo(&self, other: I64x4) -> I64x4
fn unpack_lo(&self, other: I64x4) -> I64x4
Source§fn unpack_hi(&self, other: I64x4) -> I64x4
fn unpack_hi(&self, other: I64x4) -> I64x4
Source§fn max(&self, other: I64x4) -> I64x4
fn max(&self, other: I64x4) -> I64x4
§Scalar Equivalent:
I64x4::from([
self.as_array()[0].max(other.as_array()[0]),
self.as_array()[1].max(other.as_array()[1]),
self.as_array()[2].max(other.as_array()[2]),
self.as_array()[3].max(other.as_array()[3]),
])§Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
Source§fn min(&self, other: I64x4) -> I64x4
fn min(&self, other: I64x4) -> I64x4
§Scalar Equivalent:
I64x4::from([
self.as_array()[0].min(other.as_array()[0]),
self.as_array()[1].min(other.as_array()[1]),
self.as_array()[2].min(other.as_array()[2]),
self.as_array()[3].min(other.as_array()[3]),
])§Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
Source§type BroadcastLoInput = I64x2
type BroadcastLoInput = I64x2
[Self::Scalar; 128 / (8 * std::mem::size_of::<Self::Scalar>())]Source§impl SimdBase4x for I64x4
impl SimdBase4x for I64x4
Source§fn blend<const B3: bool, const B2: bool, const B1: bool, const B0: bool>(
&self,
if_true: I64x4,
) -> I64x4
fn blend<const B3: bool, const B2: bool, const B1: bool, const B0: bool>( &self, if_true: I64x4, ) -> I64x4
Source§impl SimdBase4x64 for I64x4
impl SimdBase4x64 for I64x4
Source§impl SimdBase64 for I64x4
impl SimdBase64 for I64x4
Source§fn mul_lo(&self, other: I64x4) -> I64x4
fn mul_lo(&self, other: I64x4) -> I64x4
§Scalar Equivalent:
I64x4::from([
((self.as_array()[0] as i32) as i64) * ((other.as_array()[0] as i32) as i64),
((self.as_array()[1] as i32) as i64) * ((other.as_array()[1] as i32) as i64),
((self.as_array()[2] as i32) as i64) * ((other.as_array()[2] as i32) as i64),
((self.as_array()[3] as i32) as i64) * ((other.as_array()[3] as i32) as i64),
])§Avx2
-
VPMULDQ ymm, ymm, ymm
Source§impl SimdBaseGatherable<I32x4> for I64x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
impl SimdBaseGatherable<I32x4> for I64x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
Source§unsafe fn gather(base: *const i64, indices: I32x4) -> I64x4
unsafe fn gather(base: *const i64, indices: I32x4) -> I64x4
§Scalar Equivalent:
I64x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])§Avx2
-
VPGATHERDQ ymm, vm32x, ymm
Source§unsafe fn gather_masked(
base: *const i64,
indices: I32x4,
mask: I64x4,
src: I64x4,
) -> I64x4
unsafe fn gather_masked( base: *const i64, indices: I32x4, mask: I64x4, src: I64x4, ) -> I64x4
§Scalar Equivalent:
I64x4::from([
if ((mask.as_array()[0] as u64) >> 63) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if ((mask.as_array()[1] as u64) >> 63) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if ((mask.as_array()[2] as u64) >> 63) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if ((mask.as_array()[3] as u64) >> 63) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])§Avx2
-
VPGATHERDQ ymm, vm32x, ymm
Source§impl SimdBaseGatherable<I64x4> for I32x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
impl SimdBaseGatherable<I64x4> for I32x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
Source§unsafe fn gather(base: *const i32, indices: I64x4) -> I32x4
unsafe fn gather(base: *const i32, indices: I64x4) -> I32x4
§Scalar Equivalent:
I32x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])§Avx2
-
VPGATHERQD xmm, vm64y, xmm
Source§unsafe fn gather_masked(
base: *const i32,
indices: I64x4,
mask: I32x4,
src: I32x4,
) -> I32x4
unsafe fn gather_masked( base: *const i32, indices: I64x4, mask: I32x4, src: I32x4, ) -> I32x4
§Scalar Equivalent:
I32x4::from([
if ((mask.as_array()[0] as u32) >> 31) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if ((mask.as_array()[1] as u32) >> 31) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if ((mask.as_array()[2] as u32) >> 31) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if ((mask.as_array()[3] as u32) >> 31) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])§Avx2
-
VPGATHERQD xmm, vm64y, xmm
Source§impl SimdBaseGatherable<I64x4> for I64x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
impl SimdBaseGatherable<I64x4> for I64x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
Source§unsafe fn gather(base: *const i64, indices: I64x4) -> I64x4
unsafe fn gather(base: *const i64, indices: I64x4) -> I64x4
§Scalar Equivalent:
I64x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])§Avx2
-
VPGATHERQQ ymm, vm64x, ymm
Source§unsafe fn gather_masked(
base: *const i64,
indices: I64x4,
mask: I64x4,
src: I64x4,
) -> I64x4
unsafe fn gather_masked( base: *const i64, indices: I64x4, mask: I64x4, src: I64x4, ) -> I64x4
§Scalar Equivalent:
I64x4::from([
if ((mask.as_array()[0] as u64) >> 63) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if ((mask.as_array()[1] as u64) >> 63) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if ((mask.as_array()[2] as u64) >> 63) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if ((mask.as_array()[3] as u64) >> 63) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])§Avx2
-
VPGATHERQQ ymm, vm64x, ymm
Source§impl SimdBaseGatherable<I64x4> for U32x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
impl SimdBaseGatherable<I64x4> for U32x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
Source§unsafe fn gather(base: *const u32, indices: I64x4) -> U32x4
unsafe fn gather(base: *const u32, indices: I64x4) -> U32x4
§Scalar Equivalent:
U32x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])§Avx2
-
VPGATHERQD xmm, vm64y, xmm
Source§unsafe fn gather_masked(
base: *const u32,
indices: I64x4,
mask: U32x4,
src: U32x4,
) -> U32x4
unsafe fn gather_masked( base: *const u32, indices: I64x4, mask: U32x4, src: U32x4, ) -> U32x4
§Scalar Equivalent:
U32x4::from([
if (mask.as_array()[0] >> 31) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if (mask.as_array()[1] >> 31) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if (mask.as_array()[2] >> 31) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if (mask.as_array()[3] >> 31) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])§Avx2
-
VPGATHERQD xmm, vm64y, xmm
Source§impl SimdBaseGatherable<I64x4> for U64x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
impl SimdBaseGatherable<I64x4> for U64x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
Source§unsafe fn gather(base: *const u64, indices: I64x4) -> U64x4
unsafe fn gather(base: *const u64, indices: I64x4) -> U64x4
§Scalar Equivalent:
U64x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])§Avx2
-
VPGATHERQQ ymm, vm64x, ymm
Source§unsafe fn gather_masked(
base: *const u64,
indices: I64x4,
mask: U64x4,
src: U64x4,
) -> U64x4
unsafe fn gather_masked( base: *const u64, indices: I64x4, mask: U64x4, src: U64x4, ) -> U64x4
§Scalar Equivalent:
U64x4::from([
if (mask.as_array()[0] >> 63) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if (mask.as_array()[1] >> 63) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if (mask.as_array()[2] >> 63) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if (mask.as_array()[3] >> 63) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])§Avx2
-
VPGATHERQQ ymm, vm64x, ymm
Source§impl SimdBaseGatherable<U64x4> for I64x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
impl SimdBaseGatherable<U64x4> for I64x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
Source§unsafe fn gather(base: *const i64, indices: U64x4) -> I64x4
unsafe fn gather(base: *const i64, indices: U64x4) -> I64x4
§Scalar Equivalent:
I64x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])§Avx2
-
VPGATHERQQ ymm, vm64x, ymm
Source§unsafe fn gather_masked(
base: *const i64,
indices: U64x4,
mask: I64x4,
src: I64x4,
) -> I64x4
unsafe fn gather_masked( base: *const i64, indices: U64x4, mask: I64x4, src: I64x4, ) -> I64x4
§Scalar Equivalent:
I64x4::from([
if ((mask.as_array()[0] as u64) >> 63) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if ((mask.as_array()[1] as u64) >> 63) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if ((mask.as_array()[2] as u64) >> 63) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if ((mask.as_array()[3] as u64) >> 63) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])§Avx2
-
VPGATHERQQ ymm, vm64x, ymm
Source§impl Sub for I64x4
impl Sub for I64x4
Source§fn sub(self, rhs: I64x4) -> I64x4
fn sub(self, rhs: I64x4) -> I64x4
Perform a pairwise wrapping_sub
§Scalar Equivalent
I64x4::from([
self.as_array()[0].wrapping_sub(rhs.as_array()[0]),
self.as_array()[1].wrapping_sub(rhs.as_array()[1]),
self.as_array()[2].wrapping_sub(rhs.as_array()[2]),
self.as_array()[3].wrapping_sub(rhs.as_array()[3]),
])§AVX2 Intrinsics Used
_mm256_sub_epi64VPSUBQ ymm, ymm, ymm
§Neon Intrinsics Used
vsubq_s64- This intrinsic compiles to the following instructions:
Source§impl SubAssign for I64x4
impl SubAssign for I64x4
Source§fn sub_assign(&mut self, other: I64x4)
fn sub_assign(&mut self, other: I64x4)
-= operation. Read moreimpl Copy for I64x4
impl Eq for I64x4
impl Pod for I64x4
Auto Trait Implementations§
impl Freeze for I64x4
impl RefUnwindSafe for I64x4
impl Send for I64x4
impl Sync for I64x4
impl Unpin for I64x4
impl UnwindSafe for I64x4
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
§impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
§type Bits = T
type Bits = T
Self must have the same layout as the specified Bits except for
the possible invalid bit patterns being checked during
is_valid_bit_pattern.§fn is_valid_bit_pattern(_bits: &T) -> bool
fn is_valid_bit_pattern(_bits: &T) -> bool
bits
as &Self.