Struct vectoreyes::U64x4
source · [−]#[repr(transparent)]pub struct U64x4(_);
Expand description
[u64; 4]
as a vector.
Implementations
Trait Implementations
sourceimpl Add<U64x4> for U64x4
impl Add<U64x4> for U64x4
sourcefn add(self, rhs: U64x4) -> U64x4
fn add(self, rhs: U64x4) -> U64x4
Scalar Equivalent:
U64x4::from([
self.as_array()[0].wrapping_add(rhs.as_array()[0]),
self.as_array()[1].wrapping_add(rhs.as_array()[1]),
self.as_array()[2].wrapping_add(rhs.as_array()[2]),
self.as_array()[3].wrapping_add(rhs.as_array()[3]),
])
Avx2
-
VPADDQ ymm, ymm, ymm
sourceimpl AddAssign<U64x4> for U64x4
impl AddAssign<U64x4> for U64x4
sourcefn add_assign(&mut self, rhs: Self)
fn add_assign(&mut self, rhs: Self)
+=
operation. Read moresourceimpl BitAnd<U64x4> for U64x4
impl BitAnd<U64x4> for U64x4
sourceimpl BitAndAssign<U64x4> for U64x4
impl BitAndAssign<U64x4> for U64x4
sourcefn bitand_assign(&mut self, rhs: Self)
fn bitand_assign(&mut self, rhs: Self)
&=
operation. Read moresourceimpl BitOr<U64x4> for U64x4
impl BitOr<U64x4> for U64x4
sourceimpl BitOrAssign<U64x4> for U64x4
impl BitOrAssign<U64x4> for U64x4
sourcefn bitor_assign(&mut self, rhs: Self)
fn bitor_assign(&mut self, rhs: Self)
|=
operation. Read moresourceimpl BitXor<U64x4> for U64x4
impl BitXor<U64x4> for U64x4
sourceimpl BitXorAssign<U64x4> for U64x4
impl BitXorAssign<U64x4> for U64x4
sourcefn bitxor_assign(&mut self, rhs: Self)
fn bitxor_assign(&mut self, rhs: Self)
^=
operation. Read moresourceimpl ConditionallySelectable for U64x4
impl ConditionallySelectable for U64x4
sourcefn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
sourcefn conditional_assign(&mut self, other: &Self, choice: Choice)
fn conditional_assign(&mut self, other: &Self, choice: Choice)
sourceimpl ConstantTimeEq for U64x4
impl ConstantTimeEq for U64x4
sourceimpl ExtendingCast<U16x8> for U64x4
impl ExtendingCast<U16x8> for U64x4
sourcefn extending_cast_from(vector: U16x8) -> U64x4
fn extending_cast_from(vector: U16x8) -> U64x4
Scalar Equivalent:
U64x4::from([
u64::from(vector.as_array()[0]),
u64::from(vector.as_array()[1]),
u64::from(vector.as_array()[2]),
u64::from(vector.as_array()[3]),
])
Avx2
-
VPMOVZXWQ ymm, xmm
sourceimpl ExtendingCast<U32x4> for U64x4
impl ExtendingCast<U32x4> for U64x4
sourcefn extending_cast_from(vector: U32x4) -> U64x4
fn extending_cast_from(vector: U32x4) -> U64x4
Scalar Equivalent:
U64x4::from([
u64::from(vector.as_array()[0]),
u64::from(vector.as_array()[1]),
u64::from(vector.as_array()[2]),
u64::from(vector.as_array()[3]),
])
Avx2
-
VPMOVZXDQ ymm, xmm
sourceimpl ExtendingCast<U8x16> for U64x4
impl ExtendingCast<U8x16> for U64x4
sourcefn extending_cast_from(vector: U8x16) -> U64x4
fn extending_cast_from(vector: U8x16) -> U64x4
Scalar Equivalent:
U64x4::from([
u64::from(vector.as_array()[0]),
u64::from(vector.as_array()[1]),
u64::from(vector.as_array()[2]),
u64::from(vector.as_array()[3]),
])
Avx2
-
VPMOVZXBQ ymm, xmm
sourceimpl From<[U64x2; 2]> for U64x4
impl From<[U64x2; 2]> for U64x4
sourceimpl From<U32x4> for U64x4
impl From<U32x4> for U64x4
sourceimpl From<U64x2> for U64x4
impl From<U64x2> for U64x4
sourcefn from(vector: U64x2) -> U64x4
fn from(vector: U64x2) -> U64x4
NOTE: this will zero the upper bits of the destination. Other intrinsics are more effcient, but leave the upper bits undefined. At present, these more effcient intrinsics are not exposed.
Scalar Equivalent:
let mut out = [0; 4];
out[0..2].copy_from_slice(&vector.as_array());
U64x4::from(out)
Avx2
sourceimpl From<U64x4> for [U64x2; 2]
impl From<U64x4> for [U64x2; 2]
sourceimpl Shl<U64x4> for U64x4
impl Shl<U64x4> for U64x4
sourceimpl Shl<u64> for U64x4
impl Shl<u64> for U64x4
sourceimpl ShlAssign<U64x4> for U64x4
impl ShlAssign<U64x4> for U64x4
sourcefn shl_assign(&mut self, amount: U64x4)
fn shl_assign(&mut self, amount: U64x4)
<<=
operation. Read moresourceimpl ShlAssign<u64> for U64x4
impl ShlAssign<u64> for U64x4
sourcefn shl_assign(&mut self, amount: u64)
fn shl_assign(&mut self, amount: u64)
<<=
operation. Read moresourceimpl Shr<U64x4> for U64x4
impl Shr<U64x4> for U64x4
sourceimpl Shr<u64> for U64x4
impl Shr<u64> for U64x4
sourceimpl ShrAssign<U64x4> for U64x4
impl ShrAssign<U64x4> for U64x4
sourcefn shr_assign(&mut self, amount: U64x4)
fn shr_assign(&mut self, amount: U64x4)
>>=
operation. Read moresourceimpl ShrAssign<u64> for U64x4
impl ShrAssign<u64> for U64x4
sourcefn shr_assign(&mut self, amount: u64)
fn shr_assign(&mut self, amount: u64)
>>=
operation. Read moresourceimpl SimdBase for U64x4
impl SimdBase for U64x4
sourcefn set_lo(scalar: u64) -> U64x4
fn set_lo(scalar: u64) -> U64x4
Scalar Equivalent:
let mut out = [0; 4];
out[0] = scalar;
U64x4::from(out)
Avx2
-
Instruction sequence.
sourcefn broadcast_lo(vector: U64x2) -> U64x4
fn broadcast_lo(vector: U64x2) -> U64x4
sourcefn cmp_eq(&self, other: U64x4) -> U64x4
fn cmp_eq(&self, other: U64x4) -> U64x4
Scalar Equivalent:
U64x4::from([
if self.as_array()[0] == other.as_array()[0] { u64::MAX } else { 0 },
if self.as_array()[1] == other.as_array()[1] { u64::MAX } else { 0 },
if self.as_array()[2] == other.as_array()[2] { u64::MAX } else { 0 },
if self.as_array()[3] == other.as_array()[3] { u64::MAX } else { 0 },
])
Avx2
-
VPCMPEQQ ymm, ymm, ymm
sourcefn and_not(&self, other: U64x4) -> U64x4
fn and_not(&self, other: U64x4) -> U64x4
Scalar Equivalent:
U64x4::from([
self.as_array()[0] & (!other.as_array()[0]),
self.as_array()[1] & (!other.as_array()[1]),
self.as_array()[2] & (!other.as_array()[2]),
self.as_array()[3] & (!other.as_array()[3]),
])
Avx2
-
VPANDN ymm, ymm, ymm
sourcefn cmp_gt(&self, other: U64x4) -> U64x4
fn cmp_gt(&self, other: U64x4) -> U64x4
Scalar Equivalent:
U64x4::from([
if self.as_array()[0] > other.as_array()[0] { u64::MAX } else { 0 },
if self.as_array()[1] > other.as_array()[1] { u64::MAX } else { 0 },
if self.as_array()[2] > other.as_array()[2] { u64::MAX } else { 0 },
if self.as_array()[3] > other.as_array()[3] { u64::MAX } else { 0 },
])
Avx2
NOTE: this implementation uses an efficient vector polyfill, though this operation is not natively supported.
// Based on https://stackoverflow.com/a/33173643 and https://git.io/JmghK
let sign_bit = Self::broadcast(1 << 63);
Self::from(I64x4::from(*self ^ sign_bit).cmp_gt(
I64x4::from(other ^ sign_bit)
))
sourcefn shift_left<const BITS: usize>(&self) -> U64x4
fn shift_left<const BITS: usize>(&self) -> U64x4
Scalar Equivalent:
let mut out = self.as_array();
for x in out.iter_mut() {
*x <<= BITS;
}
U64x4::from(out)
Avx2
-
VPSLLQ ymm, ymm, imm8
sourcefn shift_right<const BITS: usize>(&self) -> U64x4
fn shift_right<const BITS: usize>(&self) -> U64x4
Scalar Equivalent:
let mut out = self.as_array();
for x in out.iter_mut() {
*x >>= BITS;
}
U64x4::from(out)
Avx2
-
VPSRLQ ymm, ymm, imm8
sourcefn unpack_lo(&self, other: U64x4) -> U64x4
fn unpack_lo(&self, other: U64x4) -> U64x4
Scalar Equivalent:
U64x4::from([
// Lane# 0
self.as_array()[0],
other.as_array()[0],
// Lane# 1
self.as_array()[2],
other.as_array()[2],
])
Avx2
-
VPUNPCKLQDQ ymm, ymm, ymm
sourcefn unpack_hi(&self, other: U64x4) -> U64x4
fn unpack_hi(&self, other: U64x4) -> U64x4
Scalar Equivalent:
U64x4::from([
// Lane# 0
self.as_array()[1],
other.as_array()[1],
// Lane# 1
self.as_array()[3],
other.as_array()[3],
])
Avx2
-
VPUNPCKHQDQ ymm, ymm, ymm
sourcefn max(&self, other: U64x4) -> U64x4
fn max(&self, other: U64x4) -> U64x4
Scalar Equivalent:
U64x4::from([
self.as_array()[0].max(other.as_array()[0]),
self.as_array()[1].max(other.as_array()[1]),
self.as_array()[2].max(other.as_array()[2]),
self.as_array()[3].max(other.as_array()[3]),
])
Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourcefn min(&self, other: U64x4) -> U64x4
fn min(&self, other: U64x4) -> U64x4
Scalar Equivalent:
U64x4::from([
self.as_array()[0].min(other.as_array()[0]),
self.as_array()[1].min(other.as_array()[1]),
self.as_array()[2].min(other.as_array()[2]),
self.as_array()[3].min(other.as_array()[3]),
])
Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
const ZERO: Self = _
type BroadcastLoInput = U64x2
sourceimpl SimdBase4x for U64x4
impl SimdBase4x for U64x4
sourcefn blend<const B3: bool, const B2: bool, const B1: bool, const B0: bool>(
&self,
if_true: U64x4
) -> U64x4
fn blend<const B3: bool, const B2: bool, const B1: bool, const B0: bool>(
&self,
if_true: U64x4
) -> U64x4
Scalar Equivalent:
U64x4::from([
(if B0 { if_true } else { *self }).as_array()[0],
(if B1 { if_true } else { *self }).as_array()[1],
(if B2 { if_true } else { *self }).as_array()[2],
(if B3 { if_true } else { *self }).as_array()[3],
])
Avx2
-
VPBLENDD ymm, ymm, ymm, imm8
sourceimpl SimdBase4x64 for U64x4
impl SimdBase4x64 for U64x4
sourceimpl SimdBase64 for U64x4
impl SimdBase64 for U64x4
sourcefn mul_lo(&self, other: U64x4) -> U64x4
fn mul_lo(&self, other: U64x4) -> U64x4
Scalar Equivalent:
U64x4::from([
((self.as_array()[0] as u32) as u64) * ((other.as_array()[0] as u32) as u64),
((self.as_array()[1] as u32) as u64) * ((other.as_array()[1] as u32) as u64),
((self.as_array()[2] as u32) as u64) * ((other.as_array()[2] as u32) as u64),
((self.as_array()[3] as u32) as u64) * ((other.as_array()[3] as u32) as u64),
])
Avx2
-
VPMULUDQ ymm, ymm, ymm
sourceimpl SimdBaseGatherable<I32x4> for U64x4
impl SimdBaseGatherable<I32x4> for U64x4
Safety
base
does not need to be aligned. Forall i
, base + indices[i]
must meet
the safety requirements of std::ptr::read_unaligned
sourceunsafe fn gather(base: *const u64, indices: I32x4) -> U64x4
unsafe fn gather(base: *const u64, indices: I32x4) -> U64x4
Scalar Equivalent:
U64x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])
Avx2
-
VPGATHERDQ ymm, vm32x, ymm
sourceunsafe fn gather_masked(
base: *const u64,
indices: I32x4,
mask: U64x4,
src: U64x4
) -> U64x4
unsafe fn gather_masked(
base: *const u64,
indices: I32x4,
mask: U64x4,
src: U64x4
) -> U64x4
Scalar Equivalent:
U64x4::from([
if (mask.as_array()[0] >> 63) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if (mask.as_array()[1] >> 63) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if (mask.as_array()[2] >> 63) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if (mask.as_array()[3] >> 63) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])
Avx2
-
VPGATHERDQ ymm, vm32x, ymm
sourceimpl SimdBaseGatherable<I64x4> for U64x4
impl SimdBaseGatherable<I64x4> for U64x4
Safety
base
does not need to be aligned. Forall i
, base + indices[i]
must meet
the safety requirements of std::ptr::read_unaligned
sourceunsafe fn gather(base: *const u64, indices: I64x4) -> U64x4
unsafe fn gather(base: *const u64, indices: I64x4) -> U64x4
Scalar Equivalent:
U64x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])
Avx2
-
VPGATHERQQ ymm, vm64x, ymm
sourceunsafe fn gather_masked(
base: *const u64,
indices: I64x4,
mask: U64x4,
src: U64x4
) -> U64x4
unsafe fn gather_masked(
base: *const u64,
indices: I64x4,
mask: U64x4,
src: U64x4
) -> U64x4
Scalar Equivalent:
U64x4::from([
if (mask.as_array()[0] >> 63) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if (mask.as_array()[1] >> 63) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if (mask.as_array()[2] >> 63) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if (mask.as_array()[3] >> 63) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])
Avx2
-
VPGATHERQQ ymm, vm64x, ymm
sourceimpl SimdBaseGatherable<U64x4> for I32x4
impl SimdBaseGatherable<U64x4> for I32x4
Safety
base
does not need to be aligned. Forall i
, base + indices[i]
must meet
the safety requirements of std::ptr::read_unaligned
sourceunsafe fn gather(base: *const i32, indices: U64x4) -> I32x4
unsafe fn gather(base: *const i32, indices: U64x4) -> I32x4
Scalar Equivalent:
I32x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])
Avx2
-
VPGATHERQD xmm, vm64y, xmm
sourceunsafe fn gather_masked(
base: *const i32,
indices: U64x4,
mask: I32x4,
src: I32x4
) -> I32x4
unsafe fn gather_masked(
base: *const i32,
indices: U64x4,
mask: I32x4,
src: I32x4
) -> I32x4
Scalar Equivalent:
I32x4::from([
if ((mask.as_array()[0] as u32) >> 31) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if ((mask.as_array()[1] as u32) >> 31) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if ((mask.as_array()[2] as u32) >> 31) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if ((mask.as_array()[3] as u32) >> 31) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])
Avx2
-
VPGATHERQD xmm, vm64y, xmm
sourceimpl SimdBaseGatherable<U64x4> for I64x4
impl SimdBaseGatherable<U64x4> for I64x4
Safety
base
does not need to be aligned. Forall i
, base + indices[i]
must meet
the safety requirements of std::ptr::read_unaligned
sourceunsafe fn gather(base: *const i64, indices: U64x4) -> I64x4
unsafe fn gather(base: *const i64, indices: U64x4) -> I64x4
Scalar Equivalent:
I64x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])
Avx2
-
VPGATHERQQ ymm, vm64x, ymm
sourceunsafe fn gather_masked(
base: *const i64,
indices: U64x4,
mask: I64x4,
src: I64x4
) -> I64x4
unsafe fn gather_masked(
base: *const i64,
indices: U64x4,
mask: I64x4,
src: I64x4
) -> I64x4
Scalar Equivalent:
I64x4::from([
if ((mask.as_array()[0] as u64) >> 63) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if ((mask.as_array()[1] as u64) >> 63) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if ((mask.as_array()[2] as u64) >> 63) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if ((mask.as_array()[3] as u64) >> 63) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])
Avx2
-
VPGATHERQQ ymm, vm64x, ymm
sourceimpl SimdBaseGatherable<U64x4> for U32x4
impl SimdBaseGatherable<U64x4> for U32x4
Safety
base
does not need to be aligned. Forall i
, base + indices[i]
must meet
the safety requirements of std::ptr::read_unaligned
sourceunsafe fn gather(base: *const u32, indices: U64x4) -> U32x4
unsafe fn gather(base: *const u32, indices: U64x4) -> U32x4
Scalar Equivalent:
U32x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])
Avx2
-
VPGATHERQD xmm, vm64y, xmm
sourceunsafe fn gather_masked(
base: *const u32,
indices: U64x4,
mask: U32x4,
src: U32x4
) -> U32x4
unsafe fn gather_masked(
base: *const u32,
indices: U64x4,
mask: U32x4,
src: U32x4
) -> U32x4
Scalar Equivalent:
U32x4::from([
if (mask.as_array()[0] >> 31) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if (mask.as_array()[1] >> 31) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if (mask.as_array()[2] >> 31) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if (mask.as_array()[3] >> 31) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])
Avx2
-
VPGATHERQD xmm, vm64y, xmm
sourceimpl SimdBaseGatherable<U64x4> for U64x4
impl SimdBaseGatherable<U64x4> for U64x4
Safety
base
does not need to be aligned. Forall i
, base + indices[i]
must meet
the safety requirements of std::ptr::read_unaligned
sourceunsafe fn gather(base: *const u64, indices: U64x4) -> U64x4
unsafe fn gather(base: *const u64, indices: U64x4) -> U64x4
Scalar Equivalent:
U64x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])
Avx2
-
VPGATHERQQ ymm, vm64x, ymm
sourceunsafe fn gather_masked(
base: *const u64,
indices: U64x4,
mask: U64x4,
src: U64x4
) -> U64x4
unsafe fn gather_masked(
base: *const u64,
indices: U64x4,
mask: U64x4,
src: U64x4
) -> U64x4
Scalar Equivalent:
U64x4::from([
if (mask.as_array()[0] >> 63) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if (mask.as_array()[1] >> 63) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if (mask.as_array()[2] >> 63) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if (mask.as_array()[3] >> 63) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])
Avx2
-
VPGATHERQQ ymm, vm64x, ymm
sourceimpl Sub<U64x4> for U64x4
impl Sub<U64x4> for U64x4
sourcefn sub(self, rhs: U64x4) -> U64x4
fn sub(self, rhs: U64x4) -> U64x4
Scalar Equivalent:
U64x4::from([
self.as_array()[0].wrapping_sub(rhs.as_array()[0]),
self.as_array()[1].wrapping_sub(rhs.as_array()[1]),
self.as_array()[2].wrapping_sub(rhs.as_array()[2]),
self.as_array()[3].wrapping_sub(rhs.as_array()[3]),
])
Avx2
-
VPSUBQ ymm, ymm, ymm
sourceimpl SubAssign<U64x4> for U64x4
impl SubAssign<U64x4> for U64x4
sourcefn sub_assign(&mut self, rhs: Self)
fn sub_assign(&mut self, rhs: Self)
-=
operation. Read moreimpl Copy for U64x4
impl Eq for U64x4
impl Pod for U64x4
Auto Trait Implementations
impl RefUnwindSafe for U64x4
impl Send for U64x4
impl Sync for U64x4
impl Unpin for U64x4
impl UnwindSafe for U64x4
Blanket Implementations
sourceimpl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
const: unstable · sourcefn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
type Bits = T
type Bits = T
Self
must have the same layout as the specified Bits
except for
the possible invalid bit patterns being checked during
is_valid_bit_pattern
. Read more