Struct vectoreyes::I64x2
source · [−]#[repr(transparent)]pub struct I64x2(_);
Expand description
[i64; 2]
as a vector.
Implementations
Trait Implementations
sourceimpl Add<I64x2> for I64x2
impl Add<I64x2> for I64x2
sourceimpl AddAssign<I64x2> for I64x2
impl AddAssign<I64x2> for I64x2
sourcefn add_assign(&mut self, rhs: Self)
fn add_assign(&mut self, rhs: Self)
+=
operation. Read moresourceimpl BitAnd<I64x2> for I64x2
impl BitAnd<I64x2> for I64x2
sourceimpl BitAndAssign<I64x2> for I64x2
impl BitAndAssign<I64x2> for I64x2
sourcefn bitand_assign(&mut self, rhs: Self)
fn bitand_assign(&mut self, rhs: Self)
&=
operation. Read moresourceimpl BitOr<I64x2> for I64x2
impl BitOr<I64x2> for I64x2
sourceimpl BitOrAssign<I64x2> for I64x2
impl BitOrAssign<I64x2> for I64x2
sourcefn bitor_assign(&mut self, rhs: Self)
fn bitor_assign(&mut self, rhs: Self)
|=
operation. Read moresourceimpl BitXor<I64x2> for I64x2
impl BitXor<I64x2> for I64x2
sourceimpl BitXorAssign<I64x2> for I64x2
impl BitXorAssign<I64x2> for I64x2
sourcefn bitxor_assign(&mut self, rhs: Self)
fn bitxor_assign(&mut self, rhs: Self)
^=
operation. Read moresourceimpl ConditionallySelectable for I64x2
impl ConditionallySelectable for I64x2
sourcefn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
sourcefn conditional_assign(&mut self, other: &Self, choice: Choice)
fn conditional_assign(&mut self, other: &Self, choice: Choice)
sourceimpl ConstantTimeEq for I64x2
impl ConstantTimeEq for I64x2
sourceimpl ExtendingCast<I16x8> for I64x2
impl ExtendingCast<I16x8> for I64x2
sourcefn extending_cast_from(vector: I16x8) -> I64x2
fn extending_cast_from(vector: I16x8) -> I64x2
Scalar Equivalent:
I64x2::from([
i64::from(vector.as_array()[0]),
i64::from(vector.as_array()[1]),
])
Avx2
-
PMOVSXWQ xmm, xmm
sourceimpl ExtendingCast<I32x4> for I64x2
impl ExtendingCast<I32x4> for I64x2
sourcefn extending_cast_from(vector: I32x4) -> I64x2
fn extending_cast_from(vector: I32x4) -> I64x2
Scalar Equivalent:
I64x2::from([
i64::from(vector.as_array()[0]),
i64::from(vector.as_array()[1]),
])
Avx2
-
PMOVSXDQ xmm, xmm
sourceimpl ExtendingCast<I8x16> for I64x2
impl ExtendingCast<I8x16> for I64x2
sourcefn extending_cast_from(vector: I8x16) -> I64x2
fn extending_cast_from(vector: I8x16) -> I64x2
Scalar Equivalent:
I64x2::from([
i64::from(vector.as_array()[0]),
i64::from(vector.as_array()[1]),
])
Avx2
-
PMOVSXBQ xmm, xmm
sourceimpl From<I64x2> for I64x4
impl From<I64x2> for I64x4
sourcefn from(vector: I64x2) -> I64x4
fn from(vector: I64x2) -> I64x4
NOTE: this will zero the upper bits of the destination. Other intrinsics are more effcient, but leave the upper bits undefined. At present, these more effcient intrinsics are not exposed.
Scalar Equivalent:
let mut out = [0; 4];
out[0..2].copy_from_slice(&vector.as_array());
I64x4::from(out)
Avx2
sourceimpl Shl<I64x2> for I64x2
impl Shl<I64x2> for I64x2
sourcefn shl(self, amount: I64x2) -> I64x2
fn shl(self, amount: I64x2) -> I64x2
Scalar Equivalent:
let mut out = self.as_array();
for (x, amm) in out.iter_mut().zip(amount.as_array().iter().copied()) {
*x = if amm >= 64 || amm < 0 {
0
} else {
*x << amm
};
}
I64x2::from(out)
Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourceimpl Shl<u64> for I64x2
impl Shl<u64> for I64x2
sourceimpl ShlAssign<I64x2> for I64x2
impl ShlAssign<I64x2> for I64x2
sourcefn shl_assign(&mut self, amount: I64x2)
fn shl_assign(&mut self, amount: I64x2)
<<=
operation. Read moresourceimpl ShlAssign<u64> for I64x2
impl ShlAssign<u64> for I64x2
sourcefn shl_assign(&mut self, amount: u64)
fn shl_assign(&mut self, amount: u64)
<<=
operation. Read moresourceimpl Shr<I64x2> for I64x2
impl Shr<I64x2> for I64x2
sourcefn shr(self, amount: I64x2) -> I64x2
fn shr(self, amount: I64x2) -> I64x2
Scalar Equivalent:
let mut out = self.as_array();
for (x, amm) in out.iter_mut().zip(amount.as_array().iter().copied()) {
*x = if amm >= 64 || amm < 0 {
if *x < 0 { -1 } else { 0 }
} else {
*x >> amm
};
}
I64x2::from(out)
Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourceimpl Shr<u64> for I64x2
impl Shr<u64> for I64x2
sourcefn shr(self, amount: u64) -> I64x2
fn shr(self, amount: u64) -> I64x2
Scalar Equivalent:
if amount >= 64 {
let mut out = self.as_array();
for x in out.iter_mut() {
*x = if *x < 0 { -1 } else { 0 };
}
I64x2::from(out)
} else {
I64x2::from([
self.as_array()[0] >> amount,
self.as_array()[1] >> amount,
])
}
Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourceimpl ShrAssign<I64x2> for I64x2
impl ShrAssign<I64x2> for I64x2
sourcefn shr_assign(&mut self, amount: I64x2)
fn shr_assign(&mut self, amount: I64x2)
>>=
operation. Read moresourceimpl ShrAssign<u64> for I64x2
impl ShrAssign<u64> for I64x2
sourcefn shr_assign(&mut self, amount: u64)
fn shr_assign(&mut self, amount: u64)
>>=
operation. Read moresourceimpl SimdBase for I64x2
impl SimdBase for I64x2
sourcefn set_lo(scalar: i64) -> I64x2
fn set_lo(scalar: i64) -> I64x2
Scalar Equivalent:
let mut out = [0; 2];
out[0] = scalar;
I64x2::from(out)
Avx2
-
Instruction sequence.
sourcefn broadcast_lo(vector: I64x2) -> I64x2
fn broadcast_lo(vector: I64x2) -> I64x2
sourcefn cmp_eq(&self, other: I64x2) -> I64x2
fn cmp_eq(&self, other: I64x2) -> I64x2
Scalar Equivalent:
I64x2::from([
if self.as_array()[0] == other.as_array()[0] { -1 } else { 0 },
if self.as_array()[1] == other.as_array()[1] { -1 } else { 0 },
])
Avx2
-
PCMPEQQ xmm, xmm
sourcefn and_not(&self, other: I64x2) -> I64x2
fn and_not(&self, other: I64x2) -> I64x2
Scalar Equivalent:
I64x2::from([
self.as_array()[0] & (!other.as_array()[0]),
self.as_array()[1] & (!other.as_array()[1]),
])
Avx2
-
PANDN xmm, xmm
sourcefn cmp_gt(&self, other: I64x2) -> I64x2
fn cmp_gt(&self, other: I64x2) -> I64x2
Scalar Equivalent:
I64x2::from([
if self.as_array()[0] > other.as_array()[0] { -1 } else { 0 },
if self.as_array()[1] > other.as_array()[1] { -1 } else { 0 },
])
Avx2
-
PCMPGTQ xmm, xmm
sourcefn shift_left<const BITS: usize>(&self) -> I64x2
fn shift_left<const BITS: usize>(&self) -> I64x2
Scalar Equivalent:
let mut out = self.as_array();
for x in out.iter_mut() {
*x <<= BITS;
}
I64x2::from(out)
Avx2
-
PSLLQ xmm, imm8
sourcefn shift_right<const BITS: usize>(&self) -> I64x2
fn shift_right<const BITS: usize>(&self) -> I64x2
Scalar Equivalent:
let mut out = self.as_array();
for x in out.iter_mut() {
*x >>= BITS;
}
I64x2::from(out)
Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourcefn unpack_lo(&self, other: I64x2) -> I64x2
fn unpack_lo(&self, other: I64x2) -> I64x2
Scalar Equivalent:
I64x2::from([
// Lane# 0
self.as_array()[0],
other.as_array()[0],
])
Avx2
-
PUNPCKLQDQ xmm, xmm
sourcefn unpack_hi(&self, other: I64x2) -> I64x2
fn unpack_hi(&self, other: I64x2) -> I64x2
Scalar Equivalent:
I64x2::from([
// Lane# 0
self.as_array()[1],
other.as_array()[1],
])
Avx2
-
PUNPCKHQDQ xmm, xmm
sourcefn max(&self, other: I64x2) -> I64x2
fn max(&self, other: I64x2) -> I64x2
Scalar Equivalent:
I64x2::from([
self.as_array()[0].max(other.as_array()[0]),
self.as_array()[1].max(other.as_array()[1]),
])
Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourcefn min(&self, other: I64x2) -> I64x2
fn min(&self, other: I64x2) -> I64x2
Scalar Equivalent:
I64x2::from([
self.as_array()[0].min(other.as_array()[0]),
self.as_array()[1].min(other.as_array()[1]),
])
Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
const ZERO: Self = _
type BroadcastLoInput = I64x2
sourceimpl SimdBase64 for I64x2
impl SimdBase64 for I64x2
sourceimpl SimdBaseGatherable<I64x2> for I64x2
impl SimdBaseGatherable<I64x2> for I64x2
Safety
base
does not need to be aligned. Forall i
, base + indices[i]
must meet
the safety requirements of std::ptr::read_unaligned
sourceunsafe fn gather(base: *const i64, indices: I64x2) -> I64x2
unsafe fn gather(base: *const i64, indices: I64x2) -> I64x2
Scalar Equivalent:
I64x2::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
])
Avx2
-
VPGATHERQQ xmm, vm64x, xmm
sourceunsafe fn gather_masked(
base: *const i64,
indices: I64x2,
mask: I64x2,
src: I64x2
) -> I64x2
unsafe fn gather_masked(
base: *const i64,
indices: I64x2,
mask: I64x2,
src: I64x2
) -> I64x2
Scalar Equivalent:
I64x2::from([
if ((mask.as_array()[0] as u64) >> 63) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if ((mask.as_array()[1] as u64) >> 63) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
])
Avx2
-
VPGATHERQQ xmm, vm64x, xmm
sourceimpl SimdBaseGatherable<I64x2> for U64x2
impl SimdBaseGatherable<I64x2> for U64x2
Safety
base
does not need to be aligned. Forall i
, base + indices[i]
must meet
the safety requirements of std::ptr::read_unaligned
sourceunsafe fn gather(base: *const u64, indices: I64x2) -> U64x2
unsafe fn gather(base: *const u64, indices: I64x2) -> U64x2
Scalar Equivalent:
U64x2::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
])
Avx2
-
VPGATHERQQ xmm, vm64x, xmm
sourceunsafe fn gather_masked(
base: *const u64,
indices: I64x2,
mask: U64x2,
src: U64x2
) -> U64x2
unsafe fn gather_masked(
base: *const u64,
indices: I64x2,
mask: U64x2,
src: U64x2
) -> U64x2
Scalar Equivalent:
U64x2::from([
if (mask.as_array()[0] >> 63) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if (mask.as_array()[1] >> 63) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
])
Avx2
-
VPGATHERQQ xmm, vm64x, xmm
sourceimpl SimdBaseGatherable<U64x2> for I64x2
impl SimdBaseGatherable<U64x2> for I64x2
Safety
base
does not need to be aligned. Forall i
, base + indices[i]
must meet
the safety requirements of std::ptr::read_unaligned
sourceunsafe fn gather(base: *const i64, indices: U64x2) -> I64x2
unsafe fn gather(base: *const i64, indices: U64x2) -> I64x2
Scalar Equivalent:
I64x2::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
])
Avx2
-
VPGATHERQQ xmm, vm64x, xmm
sourceunsafe fn gather_masked(
base: *const i64,
indices: U64x2,
mask: I64x2,
src: I64x2
) -> I64x2
unsafe fn gather_masked(
base: *const i64,
indices: U64x2,
mask: I64x2,
src: I64x2
) -> I64x2
Scalar Equivalent:
I64x2::from([
if ((mask.as_array()[0] as u64) >> 63) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if ((mask.as_array()[1] as u64) >> 63) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
])
Avx2
-
VPGATHERQQ xmm, vm64x, xmm
sourceimpl Sub<I64x2> for I64x2
impl Sub<I64x2> for I64x2
sourceimpl SubAssign<I64x2> for I64x2
impl SubAssign<I64x2> for I64x2
sourcefn sub_assign(&mut self, rhs: Self)
fn sub_assign(&mut self, rhs: Self)
-=
operation. Read moreimpl Copy for I64x2
impl Eq for I64x2
impl Pod for I64x2
Auto Trait Implementations
impl RefUnwindSafe for I64x2
impl Send for I64x2
impl Sync for I64x2
impl Unpin for I64x2
impl UnwindSafe for I64x2
Blanket Implementations
sourceimpl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
const: unstable · sourcefn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
type Bits = T
type Bits = T
Self
must have the same layout as the specified Bits
except for
the possible invalid bit patterns being checked during
is_valid_bit_pattern
. Read more