pub struct U64x4(/* private fields */);Expand description
[u64; 4] as a vector.
Implementations§
Source§impl U64x4
impl U64x4
Sourcepub const fn from_array(arr: [u64; 4]) -> Self
pub const fn from_array(arr: [u64; 4]) -> Self
Create a vector from an array.
Unlike the From trait function, the from_array function is const.
§Example
const MY_EXTREMELY_FUN_VALUE: U64x4 = U64x4::from_array([0, 1, 2, 3]);
for (i, value) in MY_EXTREMELY_FUN_VALUE.as_array().iter().copied().enumerate() {
assert_eq!(i as u64, value);
}Trait Implementations§
Source§impl Add for U64x4
impl Add for U64x4
Source§fn add(self, rhs: U64x4) -> U64x4
fn add(self, rhs: U64x4) -> U64x4
Perform a pairwise wrapping_add
§Scalar Equivalent
U64x4::from([
self.as_array()[0].wrapping_add(rhs.as_array()[0]),
self.as_array()[1].wrapping_add(rhs.as_array()[1]),
self.as_array()[2].wrapping_add(rhs.as_array()[2]),
self.as_array()[3].wrapping_add(rhs.as_array()[3]),
])§AVX2 Intrinsics Used
_mm256_add_epi64VPADDQ ymm, ymm, ymm
§Neon Intrinsics Used
vaddq_u64- This intrinsic compiles to the following instructions:
Source§impl AddAssign for U64x4
impl AddAssign for U64x4
Source§fn add_assign(&mut self, other: U64x4)
fn add_assign(&mut self, other: U64x4)
+= operation. Read moreSource§impl BitAnd for U64x4
impl BitAnd for U64x4
Source§fn bitand(self, rhs: U64x4) -> U64x4
fn bitand(self, rhs: U64x4) -> U64x4
Perform a pairwise bitwise and
§Scalar Equivalent
U64x4::from([
self.as_array()[0] & rhs.as_array()[0],
self.as_array()[1] & rhs.as_array()[1],
self.as_array()[2] & rhs.as_array()[2],
self.as_array()[3] & rhs.as_array()[3],
])§AVX2 Intrinsics Used
_mm256_and_si256VPAND ymm, ymm, ymm
§Neon Intrinsics Used
vandq_u64- This intrinsic compiles to the following instructions:
Source§impl BitAndAssign for U64x4
impl BitAndAssign for U64x4
Source§fn bitand_assign(&mut self, other: U64x4)
fn bitand_assign(&mut self, other: U64x4)
&= operation. Read moreSource§impl BitOr for U64x4
impl BitOr for U64x4
Source§fn bitor(self, rhs: U64x4) -> U64x4
fn bitor(self, rhs: U64x4) -> U64x4
Perform a pairwise bitwise or
§Scalar Equivalent
U64x4::from([
self.as_array()[0] | rhs.as_array()[0],
self.as_array()[1] | rhs.as_array()[1],
self.as_array()[2] | rhs.as_array()[2],
self.as_array()[3] | rhs.as_array()[3],
])§AVX2 Intrinsics Used
_mm256_or_si256VPOR ymm, ymm, ymm
§Neon Intrinsics Used
vorrq_u64- This intrinsic compiles to the following instructions:
Source§impl BitOrAssign for U64x4
impl BitOrAssign for U64x4
Source§fn bitor_assign(&mut self, other: U64x4)
fn bitor_assign(&mut self, other: U64x4)
|= operation. Read moreSource§impl BitXor for U64x4
impl BitXor for U64x4
Source§fn bitxor(self, rhs: U64x4) -> U64x4
fn bitxor(self, rhs: U64x4) -> U64x4
Perform a pairwise bitwise xor
§Scalar Equivalent
U64x4::from([
self.as_array()[0] ^ rhs.as_array()[0],
self.as_array()[1] ^ rhs.as_array()[1],
self.as_array()[2] ^ rhs.as_array()[2],
self.as_array()[3] ^ rhs.as_array()[3],
])§AVX2 Intrinsics Used
_mm256_xor_si256VPXOR ymm, ymm, ymm
§Neon Intrinsics Used
veorq_u64- This intrinsic compiles to the following instructions:
Source§impl BitXorAssign for U64x4
impl BitXorAssign for U64x4
Source§fn bitxor_assign(&mut self, other: U64x4)
fn bitxor_assign(&mut self, other: U64x4)
^= operation. Read moreSource§impl ConditionallySelectable for U64x4
impl ConditionallySelectable for U64x4
Source§fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
Source§fn conditional_assign(&mut self, other: &Self, choice: Choice)
fn conditional_assign(&mut self, other: &Self, choice: Choice)
Source§fn conditional_swap(a: &mut Self, b: &mut Self, choice: Choice)
fn conditional_swap(a: &mut Self, b: &mut Self, choice: Choice)
self and other if choice == 1; otherwise,
reassign both unto themselves. Read moreSource§impl ConstantTimeEq for U64x4
impl ConstantTimeEq for U64x4
Source§impl<'de> Deserialize<'de> for U64x4
impl<'de> Deserialize<'de> for U64x4
Source§fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>where
D: Deserializer<'de>,
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>where
D: Deserializer<'de>,
Source§impl Distribution<U64x4> for Standard
impl Distribution<U64x4> for Standard
Source§impl ExtendingCast<U16x8> for U64x4
impl ExtendingCast<U16x8> for U64x4
Source§impl ExtendingCast<U32x4> for U64x4
impl ExtendingCast<U32x4> for U64x4
Source§impl ExtendingCast<U8x16> for U64x4
impl ExtendingCast<U8x16> for U64x4
Source§impl From<U64x2> for U64x4
impl From<U64x2> for U64x4
Source§fn from(vector: U64x2) -> U64x4
fn from(vector: U64x2) -> U64x4
Source§impl Shl<u64> for U64x4
impl Shl<u64> for U64x4
Source§impl Shl for U64x4
impl Shl for U64x4
Source§impl ShlAssign<u64> for U64x4
impl ShlAssign<u64> for U64x4
Source§fn shl_assign(&mut self, amount: u64)
fn shl_assign(&mut self, amount: u64)
<<= operation. Read moreSource§impl ShlAssign for U64x4
impl ShlAssign for U64x4
Source§fn shl_assign(&mut self, amount: U64x4)
fn shl_assign(&mut self, amount: U64x4)
<<= operation. Read moreSource§impl Shr<u64> for U64x4
impl Shr<u64> for U64x4
Source§impl Shr for U64x4
impl Shr for U64x4
Source§impl ShrAssign<u64> for U64x4
impl ShrAssign<u64> for U64x4
Source§fn shr_assign(&mut self, amount: u64)
fn shr_assign(&mut self, amount: u64)
>>= operation. Read moreSource§impl ShrAssign for U64x4
impl ShrAssign for U64x4
Source§fn shr_assign(&mut self, amount: U64x4)
fn shr_assign(&mut self, amount: U64x4)
>>= operation. Read moreSource§impl SimdBase for U64x4
impl SimdBase for U64x4
Source§fn is_zero(&self) -> bool
fn is_zero(&self) -> bool
Source§fn set_lo(scalar: u64) -> U64x4
fn set_lo(scalar: u64) -> U64x4
Source§fn broadcast_lo(vector: U64x2) -> U64x4
fn broadcast_lo(vector: U64x2) -> U64x4
Source§fn cmp_eq(&self, other: U64x4) -> U64x4
fn cmp_eq(&self, other: U64x4) -> U64x4
§Scalar Equivalent:
U64x4::from([
if self.as_array()[0] == other.as_array()[0] { u64::MAX } else { 0 },
if self.as_array()[1] == other.as_array()[1] { u64::MAX } else { 0 },
if self.as_array()[2] == other.as_array()[2] { u64::MAX } else { 0 },
if self.as_array()[3] == other.as_array()[3] { u64::MAX } else { 0 },
])§Avx2
-
VPCMPEQQ ymm, ymm, ymm
Source§fn and_not(&self, other: U64x4) -> U64x4
fn and_not(&self, other: U64x4) -> U64x4
Source§fn cmp_gt(&self, other: U64x4) -> U64x4
fn cmp_gt(&self, other: U64x4) -> U64x4
§Scalar Equivalent:
U64x4::from([
if self.as_array()[0] > other.as_array()[0] { u64::MAX } else { 0 },
if self.as_array()[1] > other.as_array()[1] { u64::MAX } else { 0 },
if self.as_array()[2] > other.as_array()[2] { u64::MAX } else { 0 },
if self.as_array()[3] > other.as_array()[3] { u64::MAX } else { 0 },
])§Avx2
NOTE: this implementation uses an efficient vector polyfill, though this operation is not natively supported.
// Based on https://stackoverflow.com/a/33173643 and https://git.io/JmghK
let sign_bit = Self::broadcast(1 << 63);
Self::from(I64x4::from(*self ^ sign_bit).cmp_gt(
I64x4::from(other ^ sign_bit)
))Source§fn shift_left<const BITS: usize>(&self) -> U64x4
fn shift_left<const BITS: usize>(&self) -> U64x4
Source§fn shift_right<const BITS: usize>(&self) -> U64x4
fn shift_right<const BITS: usize>(&self) -> U64x4
Source§fn unpack_lo(&self, other: U64x4) -> U64x4
fn unpack_lo(&self, other: U64x4) -> U64x4
Source§fn unpack_hi(&self, other: U64x4) -> U64x4
fn unpack_hi(&self, other: U64x4) -> U64x4
Source§fn max(&self, other: U64x4) -> U64x4
fn max(&self, other: U64x4) -> U64x4
§Scalar Equivalent:
U64x4::from([
self.as_array()[0].max(other.as_array()[0]),
self.as_array()[1].max(other.as_array()[1]),
self.as_array()[2].max(other.as_array()[2]),
self.as_array()[3].max(other.as_array()[3]),
])§Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
Source§fn min(&self, other: U64x4) -> U64x4
fn min(&self, other: U64x4) -> U64x4
§Scalar Equivalent:
U64x4::from([
self.as_array()[0].min(other.as_array()[0]),
self.as_array()[1].min(other.as_array()[1]),
self.as_array()[2].min(other.as_array()[2]),
self.as_array()[3].min(other.as_array()[3]),
])§Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
Source§type BroadcastLoInput = U64x2
type BroadcastLoInput = U64x2
[Self::Scalar; 128 / (8 * std::mem::size_of::<Self::Scalar>())]Source§impl SimdBase4x for U64x4
impl SimdBase4x for U64x4
Source§fn blend<const B3: bool, const B2: bool, const B1: bool, const B0: bool>(
&self,
if_true: U64x4,
) -> U64x4
fn blend<const B3: bool, const B2: bool, const B1: bool, const B0: bool>( &self, if_true: U64x4, ) -> U64x4
Source§impl SimdBase4x64 for U64x4
impl SimdBase4x64 for U64x4
Source§impl SimdBase64 for U64x4
impl SimdBase64 for U64x4
Source§fn mul_lo(&self, other: U64x4) -> U64x4
fn mul_lo(&self, other: U64x4) -> U64x4
§Scalar Equivalent:
U64x4::from([
((self.as_array()[0] as u32) as u64) * ((other.as_array()[0] as u32) as u64),
((self.as_array()[1] as u32) as u64) * ((other.as_array()[1] as u32) as u64),
((self.as_array()[2] as u32) as u64) * ((other.as_array()[2] as u32) as u64),
((self.as_array()[3] as u32) as u64) * ((other.as_array()[3] as u32) as u64),
])§Avx2
-
VPMULUDQ ymm, ymm, ymm
Source§impl SimdBaseGatherable<I32x4> for U64x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
impl SimdBaseGatherable<I32x4> for U64x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
Source§unsafe fn gather(base: *const u64, indices: I32x4) -> U64x4
unsafe fn gather(base: *const u64, indices: I32x4) -> U64x4
§Scalar Equivalent:
U64x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])§Avx2
-
VPGATHERDQ ymm, vm32x, ymm
Source§unsafe fn gather_masked(
base: *const u64,
indices: I32x4,
mask: U64x4,
src: U64x4,
) -> U64x4
unsafe fn gather_masked( base: *const u64, indices: I32x4, mask: U64x4, src: U64x4, ) -> U64x4
§Scalar Equivalent:
U64x4::from([
if (mask.as_array()[0] >> 63) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if (mask.as_array()[1] >> 63) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if (mask.as_array()[2] >> 63) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if (mask.as_array()[3] >> 63) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])§Avx2
-
VPGATHERDQ ymm, vm32x, ymm
Source§impl SimdBaseGatherable<I64x4> for U64x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
impl SimdBaseGatherable<I64x4> for U64x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
Source§unsafe fn gather(base: *const u64, indices: I64x4) -> U64x4
unsafe fn gather(base: *const u64, indices: I64x4) -> U64x4
§Scalar Equivalent:
U64x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])§Avx2
-
VPGATHERQQ ymm, vm64x, ymm
Source§unsafe fn gather_masked(
base: *const u64,
indices: I64x4,
mask: U64x4,
src: U64x4,
) -> U64x4
unsafe fn gather_masked( base: *const u64, indices: I64x4, mask: U64x4, src: U64x4, ) -> U64x4
§Scalar Equivalent:
U64x4::from([
if (mask.as_array()[0] >> 63) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if (mask.as_array()[1] >> 63) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if (mask.as_array()[2] >> 63) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if (mask.as_array()[3] >> 63) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])§Avx2
-
VPGATHERQQ ymm, vm64x, ymm
Source§impl SimdBaseGatherable<U64x4> for I32x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
impl SimdBaseGatherable<U64x4> for I32x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
Source§unsafe fn gather(base: *const i32, indices: U64x4) -> I32x4
unsafe fn gather(base: *const i32, indices: U64x4) -> I32x4
§Scalar Equivalent:
I32x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])§Avx2
-
VPGATHERQD xmm, vm64y, xmm
Source§unsafe fn gather_masked(
base: *const i32,
indices: U64x4,
mask: I32x4,
src: I32x4,
) -> I32x4
unsafe fn gather_masked( base: *const i32, indices: U64x4, mask: I32x4, src: I32x4, ) -> I32x4
§Scalar Equivalent:
I32x4::from([
if ((mask.as_array()[0] as u32) >> 31) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if ((mask.as_array()[1] as u32) >> 31) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if ((mask.as_array()[2] as u32) >> 31) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if ((mask.as_array()[3] as u32) >> 31) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])§Avx2
-
VPGATHERQD xmm, vm64y, xmm
Source§impl SimdBaseGatherable<U64x4> for I64x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
impl SimdBaseGatherable<U64x4> for I64x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
Source§unsafe fn gather(base: *const i64, indices: U64x4) -> I64x4
unsafe fn gather(base: *const i64, indices: U64x4) -> I64x4
§Scalar Equivalent:
I64x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])§Avx2
-
VPGATHERQQ ymm, vm64x, ymm
Source§unsafe fn gather_masked(
base: *const i64,
indices: U64x4,
mask: I64x4,
src: I64x4,
) -> I64x4
unsafe fn gather_masked( base: *const i64, indices: U64x4, mask: I64x4, src: I64x4, ) -> I64x4
§Scalar Equivalent:
I64x4::from([
if ((mask.as_array()[0] as u64) >> 63) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if ((mask.as_array()[1] as u64) >> 63) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if ((mask.as_array()[2] as u64) >> 63) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if ((mask.as_array()[3] as u64) >> 63) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])§Avx2
-
VPGATHERQQ ymm, vm64x, ymm
Source§impl SimdBaseGatherable<U64x4> for U32x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
impl SimdBaseGatherable<U64x4> for U32x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
Source§unsafe fn gather(base: *const u32, indices: U64x4) -> U32x4
unsafe fn gather(base: *const u32, indices: U64x4) -> U32x4
§Scalar Equivalent:
U32x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])§Avx2
-
VPGATHERQD xmm, vm64y, xmm
Source§unsafe fn gather_masked(
base: *const u32,
indices: U64x4,
mask: U32x4,
src: U32x4,
) -> U32x4
unsafe fn gather_masked( base: *const u32, indices: U64x4, mask: U32x4, src: U32x4, ) -> U32x4
§Scalar Equivalent:
U32x4::from([
if (mask.as_array()[0] >> 31) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if (mask.as_array()[1] >> 31) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if (mask.as_array()[2] >> 31) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if (mask.as_array()[3] >> 31) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])§Avx2
-
VPGATHERQD xmm, vm64y, xmm
Source§impl SimdBaseGatherable<U64x4> for U64x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
impl SimdBaseGatherable<U64x4> for U64x4
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
Source§unsafe fn gather(base: *const u64, indices: U64x4) -> U64x4
unsafe fn gather(base: *const u64, indices: U64x4) -> U64x4
§Scalar Equivalent:
U64x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])§Avx2
-
VPGATHERQQ ymm, vm64x, ymm
Source§unsafe fn gather_masked(
base: *const u64,
indices: U64x4,
mask: U64x4,
src: U64x4,
) -> U64x4
unsafe fn gather_masked( base: *const u64, indices: U64x4, mask: U64x4, src: U64x4, ) -> U64x4
§Scalar Equivalent:
U64x4::from([
if (mask.as_array()[0] >> 63) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if (mask.as_array()[1] >> 63) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if (mask.as_array()[2] >> 63) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if (mask.as_array()[3] >> 63) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])§Avx2
-
VPGATHERQQ ymm, vm64x, ymm
Source§impl Sub for U64x4
impl Sub for U64x4
Source§fn sub(self, rhs: U64x4) -> U64x4
fn sub(self, rhs: U64x4) -> U64x4
Perform a pairwise wrapping_sub
§Scalar Equivalent
U64x4::from([
self.as_array()[0].wrapping_sub(rhs.as_array()[0]),
self.as_array()[1].wrapping_sub(rhs.as_array()[1]),
self.as_array()[2].wrapping_sub(rhs.as_array()[2]),
self.as_array()[3].wrapping_sub(rhs.as_array()[3]),
])§AVX2 Intrinsics Used
_mm256_sub_epi64VPSUBQ ymm, ymm, ymm
§Neon Intrinsics Used
vsubq_u64- This intrinsic compiles to the following instructions:
Source§impl SubAssign for U64x4
impl SubAssign for U64x4
Source§fn sub_assign(&mut self, other: U64x4)
fn sub_assign(&mut self, other: U64x4)
-= operation. Read moreimpl Copy for U64x4
impl Eq for U64x4
impl Pod for U64x4
Auto Trait Implementations§
impl Freeze for U64x4
impl RefUnwindSafe for U64x4
impl Send for U64x4
impl Sync for U64x4
impl Unpin for U64x4
impl UnwindSafe for U64x4
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
§impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
§type Bits = T
type Bits = T
Self must have the same layout as the specified Bits except for
the possible invalid bit patterns being checked during
is_valid_bit_pattern.§fn is_valid_bit_pattern(_bits: &T) -> bool
fn is_valid_bit_pattern(_bits: &T) -> bool
bits
as &Self.