pub struct U64x2(/* private fields */);Expand description
[u64; 2] as a vector.
Implementations§
Source§impl U64x2
impl U64x2
Sourcepub const fn from_array(arr: [u64; 2]) -> Self
pub const fn from_array(arr: [u64; 2]) -> Self
Create a vector from an array.
Unlike the From trait function, the from_array function is const.
§Example
const MY_EXTREMELY_FUN_VALUE: U64x2 = U64x2::from_array([0, 1]);
for (i, value) in MY_EXTREMELY_FUN_VALUE.as_array().iter().copied().enumerate() {
assert_eq!(i as u64, value);
}Source§impl U64x2
impl U64x2
Sourcepub fn carryless_mul<const HI_OTHER: bool, const HI_SELF: bool>(
&self,
other: U64x2,
) -> U64x2
pub fn carryless_mul<const HI_OTHER: bool, const HI_SELF: bool>( &self, other: U64x2, ) -> U64x2
§Scalar Equivalent:
let x = if HI_SELF { self.as_array()[1] } else { self.as_array()[0] };
let y = if HI_OTHER { other.as_array()[1] } else { other.as_array()[0] };
// This software carryless-multplication implementation is from https://github.com/RustCrypto/universal-hashes/blob/2e8a948dbb25bc2ac6c712b4bdc21b158527ca70/polyval/src/backend/soft64.rs
// That code is MIT/Apache dual-licensed.
#[inline(always)]
fn bmul64(x: u64, y: u64) -> u64 {
use std::num::Wrapping;
let x0 = Wrapping(x & 0x1111_1111_1111_1111);
let x1 = Wrapping(x & 0x2222_2222_2222_2222);
let x2 = Wrapping(x & 0x4444_4444_4444_4444);
let x3 = Wrapping(x & 0x8888_8888_8888_8888);
let y0 = Wrapping(y & 0x1111_1111_1111_1111);
let y1 = Wrapping(y & 0x2222_2222_2222_2222);
let y2 = Wrapping(y & 0x4444_4444_4444_4444);
let y3 = Wrapping(y & 0x8888_8888_8888_8888);
let mut z0 = ((x0 * y0) ^ (x1 * y3) ^ (x2 * y2) ^ (x3 * y1)).0;
let mut z1 = ((x0 * y1) ^ (x1 * y0) ^ (x2 * y3) ^ (x3 * y2)).0;
let mut z2 = ((x0 * y2) ^ (x1 * y1) ^ (x2 * y0) ^ (x3 * y3)).0;
let mut z3 = ((x0 * y3) ^ (x1 * y2) ^ (x2 * y1) ^ (x3 * y0)).0;
z0 &= 0x1111_1111_1111_1111;
z1 &= 0x2222_2222_2222_2222;
z2 &= 0x4444_4444_4444_4444;
z3 &= 0x8888_8888_8888_8888;
z0 | z1 | z2 | z3
}
#[inline(always)]
fn rev64(mut x: u64) -> u64 {
x = ((x & 0x5555_5555_5555_5555) << 1) | ((x >> 1) & 0x5555_5555_5555_5555);
x = ((x & 0x3333_3333_3333_3333) << 2) | ((x >> 2) & 0x3333_3333_3333_3333);
x = ((x & 0x0f0f_0f0f_0f0f_0f0f) << 4) | ((x >> 4) & 0x0f0f_0f0f_0f0f_0f0f);
x = ((x & 0x00ff_00ff_00ff_00ff) << 8) | ((x >> 8) & 0x00ff_00ff_00ff_00ff);
x = ((x & 0xffff_0000_ffff) << 16) | ((x >> 16) & 0xffff_0000_ffff);
x.rotate_right(32)
}
U64x2::from([
bmul64(x, y),
rev64(bmul64(rev64(x), rev64(y))) >> 1,
])§Avx2
-
PCLMULQDQ xmm, xmm, imm8
Trait Implementations§
Source§impl Add for U64x2
impl Add for U64x2
Source§fn add(self, rhs: U64x2) -> U64x2
fn add(self, rhs: U64x2) -> U64x2
Perform a pairwise wrapping_add
§Scalar Equivalent
ⓘ
U64x2::from([
self.as_array()[0].wrapping_add(rhs.as_array()[0]),
self.as_array()[1].wrapping_add(rhs.as_array()[1]),
])§AVX2 Intrinsics Used
_mm_add_epi64PADDQ xmm, xmm
§Neon Intrinsics Used
vaddq_u64- This intrinsic compiles to the following instructions:
Source§impl AddAssign for U64x2
impl AddAssign for U64x2
Source§fn add_assign(&mut self, other: U64x2)
fn add_assign(&mut self, other: U64x2)
Performs the
+= operation. Read moreSource§impl BitAnd for U64x2
impl BitAnd for U64x2
Source§fn bitand(self, rhs: U64x2) -> U64x2
fn bitand(self, rhs: U64x2) -> U64x2
Source§impl BitAndAssign for U64x2
impl BitAndAssign for U64x2
Source§fn bitand_assign(&mut self, other: U64x2)
fn bitand_assign(&mut self, other: U64x2)
Performs the
&= operation. Read moreSource§impl BitOr for U64x2
impl BitOr for U64x2
Source§fn bitor(self, rhs: U64x2) -> U64x2
fn bitor(self, rhs: U64x2) -> U64x2
Source§impl BitOrAssign for U64x2
impl BitOrAssign for U64x2
Source§fn bitor_assign(&mut self, other: U64x2)
fn bitor_assign(&mut self, other: U64x2)
Performs the
|= operation. Read moreSource§impl BitXor for U64x2
impl BitXor for U64x2
Source§fn bitxor(self, rhs: U64x2) -> U64x2
fn bitxor(self, rhs: U64x2) -> U64x2
Source§impl BitXorAssign for U64x2
impl BitXorAssign for U64x2
Source§fn bitxor_assign(&mut self, other: U64x2)
fn bitxor_assign(&mut self, other: U64x2)
Performs the
^= operation. Read moreSource§impl ConditionallySelectable for U64x2
impl ConditionallySelectable for U64x2
Source§fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
Source§fn conditional_assign(&mut self, other: &Self, choice: Choice)
fn conditional_assign(&mut self, other: &Self, choice: Choice)
Source§fn conditional_swap(a: &mut Self, b: &mut Self, choice: Choice)
fn conditional_swap(a: &mut Self, b: &mut Self, choice: Choice)
Conditionally swap
self and other if choice == 1; otherwise,
reassign both unto themselves. Read moreSource§impl ConstantTimeEq for U64x2
impl ConstantTimeEq for U64x2
Source§impl<'de> Deserialize<'de> for U64x2
impl<'de> Deserialize<'de> for U64x2
Source§fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>where
D: Deserializer<'de>,
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>where
D: Deserializer<'de>,
Deserialize this value from the given Serde deserializer. Read more
Source§impl Distribution<U64x2> for Standard
impl Distribution<U64x2> for Standard
Source§impl ExtendingCast<U16x8> for U64x2
impl ExtendingCast<U16x8> for U64x2
Source§impl ExtendingCast<U32x4> for U64x2
impl ExtendingCast<U32x4> for U64x2
Source§impl ExtendingCast<U8x16> for U64x2
impl ExtendingCast<U8x16> for U64x2
Source§impl From<U64x2> for U64x4
impl From<U64x2> for U64x4
Source§fn from(vector: U64x2) -> U64x4
fn from(vector: U64x2) -> U64x4
Source§impl Shl<u64> for U64x2
impl Shl<u64> for U64x2
Source§impl Shl for U64x2
impl Shl for U64x2
Source§impl ShlAssign<u64> for U64x2
impl ShlAssign<u64> for U64x2
Source§fn shl_assign(&mut self, amount: u64)
fn shl_assign(&mut self, amount: u64)
Performs the
<<= operation. Read moreSource§impl ShlAssign for U64x2
impl ShlAssign for U64x2
Source§fn shl_assign(&mut self, amount: U64x2)
fn shl_assign(&mut self, amount: U64x2)
Performs the
<<= operation. Read moreSource§impl Shr<u64> for U64x2
impl Shr<u64> for U64x2
Source§impl Shr for U64x2
impl Shr for U64x2
Source§impl ShrAssign<u64> for U64x2
impl ShrAssign<u64> for U64x2
Source§fn shr_assign(&mut self, amount: u64)
fn shr_assign(&mut self, amount: u64)
Performs the
>>= operation. Read moreSource§impl ShrAssign for U64x2
impl ShrAssign for U64x2
Source§fn shr_assign(&mut self, amount: U64x2)
fn shr_assign(&mut self, amount: U64x2)
Performs the
>>= operation. Read moreSource§impl SimdBase for U64x2
impl SimdBase for U64x2
Source§fn set_lo(scalar: u64) -> U64x2
fn set_lo(scalar: u64) -> U64x2
Source§fn broadcast_lo(vector: U64x2) -> U64x2
fn broadcast_lo(vector: U64x2) -> U64x2
Source§fn cmp_eq(&self, other: U64x2) -> U64x2
fn cmp_eq(&self, other: U64x2) -> U64x2
Source§fn and_not(&self, other: U64x2) -> U64x2
fn and_not(&self, other: U64x2) -> U64x2
Source§fn cmp_gt(&self, other: U64x2) -> U64x2
fn cmp_gt(&self, other: U64x2) -> U64x2
§Scalar Equivalent:
U64x2::from([
if self.as_array()[0] > other.as_array()[0] { u64::MAX } else { 0 },
if self.as_array()[1] > other.as_array()[1] { u64::MAX } else { 0 },
])§Avx2
NOTE: this implementation uses an efficient vector polyfill, though this operation is not natively supported.
ⓘ
// Based on https://stackoverflow.com/a/33173643 and https://git.io/JmghK
let sign_bit = Self::broadcast(1 << 63);
Self::from(I64x2::from(*self ^ sign_bit).cmp_gt(
I64x2::from(other ^ sign_bit)
))Source§fn shift_left<const BITS: usize>(&self) -> U64x2
fn shift_left<const BITS: usize>(&self) -> U64x2
Source§fn shift_right<const BITS: usize>(&self) -> U64x2
fn shift_right<const BITS: usize>(&self) -> U64x2
Source§fn unpack_lo(&self, other: U64x2) -> U64x2
fn unpack_lo(&self, other: U64x2) -> U64x2
Source§fn unpack_hi(&self, other: U64x2) -> U64x2
fn unpack_hi(&self, other: U64x2) -> U64x2
Source§type BroadcastLoInput = U64x2
type BroadcastLoInput = U64x2
A vector of
[Self::Scalar; 128 / (8 * std::mem::size_of::<Self::Scalar>())]Source§impl SimdBase64 for U64x2
impl SimdBase64 for U64x2
Source§impl SimdBaseGatherable<I64x2> for U64x2
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
impl SimdBaseGatherable<I64x2> for U64x2
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
Source§unsafe fn gather(base: *const u64, indices: I64x2) -> U64x2
unsafe fn gather(base: *const u64, indices: I64x2) -> U64x2
Source§unsafe fn gather_masked(
base: *const u64,
indices: I64x2,
mask: U64x2,
src: U64x2,
) -> U64x2
unsafe fn gather_masked( base: *const u64, indices: I64x2, mask: U64x2, src: U64x2, ) -> U64x2
§Scalar Equivalent:
U64x2::from([
if (mask.as_array()[0] >> 63) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if (mask.as_array()[1] >> 63) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
])§Avx2
-
VPGATHERQQ xmm, vm64x, xmm
Source§impl SimdBaseGatherable<U64x2> for I64x2
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
impl SimdBaseGatherable<U64x2> for I64x2
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
Source§unsafe fn gather(base: *const i64, indices: U64x2) -> I64x2
unsafe fn gather(base: *const i64, indices: U64x2) -> I64x2
Source§unsafe fn gather_masked(
base: *const i64,
indices: U64x2,
mask: I64x2,
src: I64x2,
) -> I64x2
unsafe fn gather_masked( base: *const i64, indices: U64x2, mask: I64x2, src: I64x2, ) -> I64x2
§Scalar Equivalent:
I64x2::from([
if ((mask.as_array()[0] as u64) >> 63) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if ((mask.as_array()[1] as u64) >> 63) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
])§Avx2
-
VPGATHERQQ xmm, vm64x, xmm
Source§impl SimdBaseGatherable<U64x2> for U64x2
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
impl SimdBaseGatherable<U64x2> for U64x2
§Safety
base does not need to be aligned. Forall i, base + indices[i] must meet
the safety requirements of std::ptr::read_unaligned
Source§unsafe fn gather(base: *const u64, indices: U64x2) -> U64x2
unsafe fn gather(base: *const u64, indices: U64x2) -> U64x2
Source§unsafe fn gather_masked(
base: *const u64,
indices: U64x2,
mask: U64x2,
src: U64x2,
) -> U64x2
unsafe fn gather_masked( base: *const u64, indices: U64x2, mask: U64x2, src: U64x2, ) -> U64x2
§Scalar Equivalent:
U64x2::from([
if (mask.as_array()[0] >> 63) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if (mask.as_array()[1] >> 63) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
])§Avx2
-
VPGATHERQQ xmm, vm64x, xmm
Source§impl Sub for U64x2
impl Sub for U64x2
Source§fn sub(self, rhs: U64x2) -> U64x2
fn sub(self, rhs: U64x2) -> U64x2
Perform a pairwise wrapping_sub
§Scalar Equivalent
ⓘ
U64x2::from([
self.as_array()[0].wrapping_sub(rhs.as_array()[0]),
self.as_array()[1].wrapping_sub(rhs.as_array()[1]),
])§AVX2 Intrinsics Used
_mm_sub_epi64PSUBQ xmm, xmm
§Neon Intrinsics Used
vsubq_u64- This intrinsic compiles to the following instructions:
Source§impl SubAssign for U64x2
impl SubAssign for U64x2
Source§fn sub_assign(&mut self, other: U64x2)
fn sub_assign(&mut self, other: U64x2)
Performs the
-= operation. Read moreimpl Copy for U64x2
impl Eq for U64x2
impl Pod for U64x2
Auto Trait Implementations§
impl Freeze for U64x2
impl RefUnwindSafe for U64x2
impl Send for U64x2
impl Sync for U64x2
impl Unpin for U64x2
impl UnwindSafe for U64x2
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
§impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
§type Bits = T
type Bits = T
Self must have the same layout as the specified Bits except for
the possible invalid bit patterns being checked during
is_valid_bit_pattern.§fn is_valid_bit_pattern(_bits: &T) -> bool
fn is_valid_bit_pattern(_bits: &T) -> bool
If this function returns true, then it must be valid to reinterpret
bits
as &Self.