Struct vectoreyes::I16x16
source · [−]#[repr(transparent)]pub struct I16x16(_);
Expand description
[i16; 16]
as a vector.
Implementations
sourceimpl I16x16
impl I16x16
sourcepub const fn from_array(array: [i16; 16]) -> I16x16
pub const fn from_array(array: [i16; 16]) -> I16x16
Create a vector from an array.
Unlike the From
trait function, the from_array
function is const
.
Example
const MY_EXTREMELY_FUN_VALUE: I16x16 =
I16x16::from_array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
for (i, value) in MY_EXTREMELY_FUN_VALUE.as_array().iter().copied().enumerate() {
assert_eq!(i as i16, value);
}
Avx2
Trait Implementations
sourceimpl Add<I16x16> for I16x16
impl Add<I16x16> for I16x16
sourcefn add(self, rhs: I16x16) -> I16x16
fn add(self, rhs: I16x16) -> I16x16
Scalar Equivalent:
I16x16::from([
self.as_array()[0].wrapping_add(rhs.as_array()[0]),
self.as_array()[1].wrapping_add(rhs.as_array()[1]),
self.as_array()[2].wrapping_add(rhs.as_array()[2]),
self.as_array()[3].wrapping_add(rhs.as_array()[3]),
self.as_array()[4].wrapping_add(rhs.as_array()[4]),
self.as_array()[5].wrapping_add(rhs.as_array()[5]),
self.as_array()[6].wrapping_add(rhs.as_array()[6]),
self.as_array()[7].wrapping_add(rhs.as_array()[7]),
self.as_array()[8].wrapping_add(rhs.as_array()[8]),
self.as_array()[9].wrapping_add(rhs.as_array()[9]),
self.as_array()[10].wrapping_add(rhs.as_array()[10]),
self.as_array()[11].wrapping_add(rhs.as_array()[11]),
self.as_array()[12].wrapping_add(rhs.as_array()[12]),
self.as_array()[13].wrapping_add(rhs.as_array()[13]),
self.as_array()[14].wrapping_add(rhs.as_array()[14]),
self.as_array()[15].wrapping_add(rhs.as_array()[15]),
])
Avx2
-
VPADDW ymm, ymm, ymm
sourceimpl AddAssign<I16x16> for I16x16
impl AddAssign<I16x16> for I16x16
sourcefn add_assign(&mut self, rhs: Self)
fn add_assign(&mut self, rhs: Self)
Performs the
+=
operation. Read moresourceimpl BitAnd<I16x16> for I16x16
impl BitAnd<I16x16> for I16x16
sourcefn bitand(self, rhs: I16x16) -> I16x16
fn bitand(self, rhs: I16x16) -> I16x16
Scalar Equivalent:
I16x16::from([
self.as_array()[0] & rhs.as_array()[0],
self.as_array()[1] & rhs.as_array()[1],
self.as_array()[2] & rhs.as_array()[2],
self.as_array()[3] & rhs.as_array()[3],
self.as_array()[4] & rhs.as_array()[4],
self.as_array()[5] & rhs.as_array()[5],
self.as_array()[6] & rhs.as_array()[6],
self.as_array()[7] & rhs.as_array()[7],
self.as_array()[8] & rhs.as_array()[8],
self.as_array()[9] & rhs.as_array()[9],
self.as_array()[10] & rhs.as_array()[10],
self.as_array()[11] & rhs.as_array()[11],
self.as_array()[12] & rhs.as_array()[12],
self.as_array()[13] & rhs.as_array()[13],
self.as_array()[14] & rhs.as_array()[14],
self.as_array()[15] & rhs.as_array()[15],
])
Avx2
-
VPAND ymm, ymm, ymm
sourceimpl BitAndAssign<I16x16> for I16x16
impl BitAndAssign<I16x16> for I16x16
sourcefn bitand_assign(&mut self, rhs: Self)
fn bitand_assign(&mut self, rhs: Self)
Performs the
&=
operation. Read moresourceimpl BitOr<I16x16> for I16x16
impl BitOr<I16x16> for I16x16
sourcefn bitor(self, rhs: I16x16) -> I16x16
fn bitor(self, rhs: I16x16) -> I16x16
Scalar Equivalent:
I16x16::from([
self.as_array()[0] | rhs.as_array()[0],
self.as_array()[1] | rhs.as_array()[1],
self.as_array()[2] | rhs.as_array()[2],
self.as_array()[3] | rhs.as_array()[3],
self.as_array()[4] | rhs.as_array()[4],
self.as_array()[5] | rhs.as_array()[5],
self.as_array()[6] | rhs.as_array()[6],
self.as_array()[7] | rhs.as_array()[7],
self.as_array()[8] | rhs.as_array()[8],
self.as_array()[9] | rhs.as_array()[9],
self.as_array()[10] | rhs.as_array()[10],
self.as_array()[11] | rhs.as_array()[11],
self.as_array()[12] | rhs.as_array()[12],
self.as_array()[13] | rhs.as_array()[13],
self.as_array()[14] | rhs.as_array()[14],
self.as_array()[15] | rhs.as_array()[15],
])
Avx2
-
VPOR ymm, ymm, ymm
sourceimpl BitOrAssign<I16x16> for I16x16
impl BitOrAssign<I16x16> for I16x16
sourcefn bitor_assign(&mut self, rhs: Self)
fn bitor_assign(&mut self, rhs: Self)
Performs the
|=
operation. Read moresourceimpl BitXor<I16x16> for I16x16
impl BitXor<I16x16> for I16x16
sourcefn bitxor(self, rhs: I16x16) -> I16x16
fn bitxor(self, rhs: I16x16) -> I16x16
Scalar Equivalent:
I16x16::from([
self.as_array()[0] ^ rhs.as_array()[0],
self.as_array()[1] ^ rhs.as_array()[1],
self.as_array()[2] ^ rhs.as_array()[2],
self.as_array()[3] ^ rhs.as_array()[3],
self.as_array()[4] ^ rhs.as_array()[4],
self.as_array()[5] ^ rhs.as_array()[5],
self.as_array()[6] ^ rhs.as_array()[6],
self.as_array()[7] ^ rhs.as_array()[7],
self.as_array()[8] ^ rhs.as_array()[8],
self.as_array()[9] ^ rhs.as_array()[9],
self.as_array()[10] ^ rhs.as_array()[10],
self.as_array()[11] ^ rhs.as_array()[11],
self.as_array()[12] ^ rhs.as_array()[12],
self.as_array()[13] ^ rhs.as_array()[13],
self.as_array()[14] ^ rhs.as_array()[14],
self.as_array()[15] ^ rhs.as_array()[15],
])
Avx2
-
VPXOR ymm, ymm, ymm
sourceimpl BitXorAssign<I16x16> for I16x16
impl BitXorAssign<I16x16> for I16x16
sourcefn bitxor_assign(&mut self, rhs: Self)
fn bitxor_assign(&mut self, rhs: Self)
Performs the
^=
operation. Read moresourceimpl ConditionallySelectable for I16x16
impl ConditionallySelectable for I16x16
sourcefn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
sourcefn conditional_assign(&mut self, other: &Self, choice: Choice)
fn conditional_assign(&mut self, other: &Self, choice: Choice)
sourceimpl ConstantTimeEq for I16x16
impl ConstantTimeEq for I16x16
sourceimpl ExtendingCast<I8x16> for I16x16
impl ExtendingCast<I8x16> for I16x16
sourcefn extending_cast_from(vector: I8x16) -> I16x16
fn extending_cast_from(vector: I8x16) -> I16x16
Scalar Equivalent:
I16x16::from([
i16::from(vector.as_array()[0]),
i16::from(vector.as_array()[1]),
i16::from(vector.as_array()[2]),
i16::from(vector.as_array()[3]),
i16::from(vector.as_array()[4]),
i16::from(vector.as_array()[5]),
i16::from(vector.as_array()[6]),
i16::from(vector.as_array()[7]),
i16::from(vector.as_array()[8]),
i16::from(vector.as_array()[9]),
i16::from(vector.as_array()[10]),
i16::from(vector.as_array()[11]),
i16::from(vector.as_array()[12]),
i16::from(vector.as_array()[13]),
i16::from(vector.as_array()[14]),
i16::from(vector.as_array()[15]),
])
Avx2
-
VPMOVSXBW ymm, xmm
sourceimpl From<[I16x8; 2]> for I16x16
impl From<[I16x8; 2]> for I16x16
sourceimpl From<I16x16> for [I16x8; 2]
impl From<I16x16> for [I16x8; 2]
sourceimpl From<I16x8> for I16x16
impl From<I16x8> for I16x16
sourcefn from(vector: I16x8) -> I16x16
fn from(vector: I16x8) -> I16x16
NOTE: this will zero the upper bits of the destination. Other intrinsics are more effcient, but leave the upper bits undefined. At present, these more effcient intrinsics are not exposed.
Scalar Equivalent:
let mut out = [0; 16];
out[0..8].copy_from_slice(&vector.as_array());
I16x16::from(out)
Avx2
sourceimpl From<I8x16> for I16x16
impl From<I8x16> for I16x16
sourcefn from(vector: I8x16) -> I16x16
fn from(vector: I8x16) -> I16x16
Scalar Equivalent:
I16x16::from([
i16::from(vector.as_array()[0]),
i16::from(vector.as_array()[1]),
i16::from(vector.as_array()[2]),
i16::from(vector.as_array()[3]),
i16::from(vector.as_array()[4]),
i16::from(vector.as_array()[5]),
i16::from(vector.as_array()[6]),
i16::from(vector.as_array()[7]),
i16::from(vector.as_array()[8]),
i16::from(vector.as_array()[9]),
i16::from(vector.as_array()[10]),
i16::from(vector.as_array()[11]),
i16::from(vector.as_array()[12]),
i16::from(vector.as_array()[13]),
i16::from(vector.as_array()[14]),
i16::from(vector.as_array()[15]),
])
Avx2
-
VPMOVSXBW ymm, xmm
sourceimpl Shl<I16x16> for I16x16
impl Shl<I16x16> for I16x16
sourcefn shl(self, amount: I16x16) -> I16x16
fn shl(self, amount: I16x16) -> I16x16
Scalar Equivalent:
let mut out = self.as_array();
for (x, amm) in out.iter_mut().zip(amount.as_array().iter().copied()) {
*x = if amm >= 16 || amm < 0 {
0
} else {
*x << amm
};
}
I16x16::from(out)
Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourceimpl Shl<u64> for I16x16
impl Shl<u64> for I16x16
sourcefn shl(self, amount: u64) -> I16x16
fn shl(self, amount: u64) -> I16x16
Scalar Equivalent:
if amount >= 16 {
I16x16::ZERO
} else {
I16x16::from([
self.as_array()[0] << amount,
self.as_array()[1] << amount,
self.as_array()[2] << amount,
self.as_array()[3] << amount,
self.as_array()[4] << amount,
self.as_array()[5] << amount,
self.as_array()[6] << amount,
self.as_array()[7] << amount,
self.as_array()[8] << amount,
self.as_array()[9] << amount,
self.as_array()[10] << amount,
self.as_array()[11] << amount,
self.as_array()[12] << amount,
self.as_array()[13] << amount,
self.as_array()[14] << amount,
self.as_array()[15] << amount,
])
}
Avx2
-
VPSLLW ymm, ymm, xmm
-
Instruction sequence.
sourceimpl ShlAssign<I16x16> for I16x16
impl ShlAssign<I16x16> for I16x16
sourcefn shl_assign(&mut self, amount: I16x16)
fn shl_assign(&mut self, amount: I16x16)
Performs the
<<=
operation. Read moresourceimpl ShlAssign<u64> for I16x16
impl ShlAssign<u64> for I16x16
sourcefn shl_assign(&mut self, amount: u64)
fn shl_assign(&mut self, amount: u64)
Performs the
<<=
operation. Read moresourceimpl Shr<I16x16> for I16x16
impl Shr<I16x16> for I16x16
sourcefn shr(self, amount: I16x16) -> I16x16
fn shr(self, amount: I16x16) -> I16x16
Scalar Equivalent:
let mut out = self.as_array();
for (x, amm) in out.iter_mut().zip(amount.as_array().iter().copied()) {
*x = if amm >= 16 || amm < 0 {
if *x < 0 { -1 } else { 0 }
} else {
*x >> amm
};
}
I16x16::from(out)
Avx2
WARNING: this implementation is a polyfill which executes the scalar implemenation.
sourceimpl Shr<u64> for I16x16
impl Shr<u64> for I16x16
sourcefn shr(self, amount: u64) -> I16x16
fn shr(self, amount: u64) -> I16x16
Scalar Equivalent:
if amount >= 16 {
let mut out = self.as_array();
for x in out.iter_mut() {
*x = if *x < 0 { -1 } else { 0 };
}
I16x16::from(out)
} else {
I16x16::from([
self.as_array()[0] >> amount,
self.as_array()[1] >> amount,
self.as_array()[2] >> amount,
self.as_array()[3] >> amount,
self.as_array()[4] >> amount,
self.as_array()[5] >> amount,
self.as_array()[6] >> amount,
self.as_array()[7] >> amount,
self.as_array()[8] >> amount,
self.as_array()[9] >> amount,
self.as_array()[10] >> amount,
self.as_array()[11] >> amount,
self.as_array()[12] >> amount,
self.as_array()[13] >> amount,
self.as_array()[14] >> amount,
self.as_array()[15] >> amount,
])
}
Avx2
-
VPSRAW ymm, ymm, xmm
-
Instruction sequence.
sourceimpl ShrAssign<I16x16> for I16x16
impl ShrAssign<I16x16> for I16x16
sourcefn shr_assign(&mut self, amount: I16x16)
fn shr_assign(&mut self, amount: I16x16)
Performs the
>>=
operation. Read moresourceimpl ShrAssign<u64> for I16x16
impl ShrAssign<u64> for I16x16
sourcefn shr_assign(&mut self, amount: u64)
fn shr_assign(&mut self, amount: u64)
Performs the
>>=
operation. Read moresourceimpl SimdBase for I16x16
impl SimdBase for I16x16
sourcefn set_lo(scalar: i16) -> I16x16
fn set_lo(scalar: i16) -> I16x16
Scalar Equivalent:
let mut out = [0; 16];
out[0] = scalar;
I16x16::from(out)
Avx2
-
Instruction sequence.
sourcefn broadcast_lo(vector: I16x8) -> I16x16
fn broadcast_lo(vector: I16x8) -> I16x16
sourcefn cmp_eq(&self, other: I16x16) -> I16x16
fn cmp_eq(&self, other: I16x16) -> I16x16
Scalar Equivalent:
I16x16::from([
if self.as_array()[0] == other.as_array()[0] { -1 } else { 0 },
if self.as_array()[1] == other.as_array()[1] { -1 } else { 0 },
if self.as_array()[2] == other.as_array()[2] { -1 } else { 0 },
if self.as_array()[3] == other.as_array()[3] { -1 } else { 0 },
if self.as_array()[4] == other.as_array()[4] { -1 } else { 0 },
if self.as_array()[5] == other.as_array()[5] { -1 } else { 0 },
if self.as_array()[6] == other.as_array()[6] { -1 } else { 0 },
if self.as_array()[7] == other.as_array()[7] { -1 } else { 0 },
if self.as_array()[8] == other.as_array()[8] { -1 } else { 0 },
if self.as_array()[9] == other.as_array()[9] { -1 } else { 0 },
if self.as_array()[10] == other.as_array()[10] { -1 } else { 0 },
if self.as_array()[11] == other.as_array()[11] { -1 } else { 0 },
if self.as_array()[12] == other.as_array()[12] { -1 } else { 0 },
if self.as_array()[13] == other.as_array()[13] { -1 } else { 0 },
if self.as_array()[14] == other.as_array()[14] { -1 } else { 0 },
if self.as_array()[15] == other.as_array()[15] { -1 } else { 0 },
])
Avx2
-
VPCMPEQW ymm, ymm, ymm
sourcefn and_not(&self, other: I16x16) -> I16x16
fn and_not(&self, other: I16x16) -> I16x16
Scalar Equivalent:
I16x16::from([
self.as_array()[0] & (!other.as_array()[0]),
self.as_array()[1] & (!other.as_array()[1]),
self.as_array()[2] & (!other.as_array()[2]),
self.as_array()[3] & (!other.as_array()[3]),
self.as_array()[4] & (!other.as_array()[4]),
self.as_array()[5] & (!other.as_array()[5]),
self.as_array()[6] & (!other.as_array()[6]),
self.as_array()[7] & (!other.as_array()[7]),
self.as_array()[8] & (!other.as_array()[8]),
self.as_array()[9] & (!other.as_array()[9]),
self.as_array()[10] & (!other.as_array()[10]),
self.as_array()[11] & (!other.as_array()[11]),
self.as_array()[12] & (!other.as_array()[12]),
self.as_array()[13] & (!other.as_array()[13]),
self.as_array()[14] & (!other.as_array()[14]),
self.as_array()[15] & (!other.as_array()[15]),
])
Avx2
-
VPANDN ymm, ymm, ymm
sourcefn cmp_gt(&self, other: I16x16) -> I16x16
fn cmp_gt(&self, other: I16x16) -> I16x16
Scalar Equivalent:
I16x16::from([
if self.as_array()[0] > other.as_array()[0] { -1 } else { 0 },
if self.as_array()[1] > other.as_array()[1] { -1 } else { 0 },
if self.as_array()[2] > other.as_array()[2] { -1 } else { 0 },
if self.as_array()[3] > other.as_array()[3] { -1 } else { 0 },
if self.as_array()[4] > other.as_array()[4] { -1 } else { 0 },
if self.as_array()[5] > other.as_array()[5] { -1 } else { 0 },
if self.as_array()[6] > other.as_array()[6] { -1 } else { 0 },
if self.as_array()[7] > other.as_array()[7] { -1 } else { 0 },
if self.as_array()[8] > other.as_array()[8] { -1 } else { 0 },
if self.as_array()[9] > other.as_array()[9] { -1 } else { 0 },
if self.as_array()[10] > other.as_array()[10] { -1 } else { 0 },
if self.as_array()[11] > other.as_array()[11] { -1 } else { 0 },
if self.as_array()[12] > other.as_array()[12] { -1 } else { 0 },
if self.as_array()[13] > other.as_array()[13] { -1 } else { 0 },
if self.as_array()[14] > other.as_array()[14] { -1 } else { 0 },
if self.as_array()[15] > other.as_array()[15] { -1 } else { 0 },
])
Avx2
-
VPCMPGTW ymm, ymm, ymm
sourcefn shift_left<const BITS: usize>(&self) -> I16x16
fn shift_left<const BITS: usize>(&self) -> I16x16
Scalar Equivalent:
let mut out = self.as_array();
for x in out.iter_mut() {
*x <<= BITS;
}
I16x16::from(out)
Avx2
-
VPSLLW ymm, ymm, imm8
sourcefn shift_right<const BITS: usize>(&self) -> I16x16
fn shift_right<const BITS: usize>(&self) -> I16x16
Scalar Equivalent:
let mut out = self.as_array();
for x in out.iter_mut() {
*x >>= BITS;
}
I16x16::from(out)
Avx2
-
VPSRAW ymm, ymm, imm8
sourcefn unpack_lo(&self, other: I16x16) -> I16x16
fn unpack_lo(&self, other: I16x16) -> I16x16
Scalar Equivalent:
I16x16::from([
// Lane# 0
self.as_array()[0],
other.as_array()[0],
self.as_array()[1],
other.as_array()[1],
self.as_array()[2],
other.as_array()[2],
self.as_array()[3],
other.as_array()[3],
// Lane# 1
self.as_array()[8],
other.as_array()[8],
self.as_array()[9],
other.as_array()[9],
self.as_array()[10],
other.as_array()[10],
self.as_array()[11],
other.as_array()[11],
])
Avx2
-
VPUNPCKLWD ymm, ymm, ymm
sourcefn unpack_hi(&self, other: I16x16) -> I16x16
fn unpack_hi(&self, other: I16x16) -> I16x16
Scalar Equivalent:
I16x16::from([
// Lane# 0
self.as_array()[4],
other.as_array()[4],
self.as_array()[5],
other.as_array()[5],
self.as_array()[6],
other.as_array()[6],
self.as_array()[7],
other.as_array()[7],
// Lane# 1
self.as_array()[12],
other.as_array()[12],
self.as_array()[13],
other.as_array()[13],
self.as_array()[14],
other.as_array()[14],
self.as_array()[15],
other.as_array()[15],
])
Avx2
-
VPUNPCKHWD ymm, ymm, ymm
sourcefn max(&self, other: I16x16) -> I16x16
fn max(&self, other: I16x16) -> I16x16
Scalar Equivalent:
I16x16::from([
self.as_array()[0].max(other.as_array()[0]),
self.as_array()[1].max(other.as_array()[1]),
self.as_array()[2].max(other.as_array()[2]),
self.as_array()[3].max(other.as_array()[3]),
self.as_array()[4].max(other.as_array()[4]),
self.as_array()[5].max(other.as_array()[5]),
self.as_array()[6].max(other.as_array()[6]),
self.as_array()[7].max(other.as_array()[7]),
self.as_array()[8].max(other.as_array()[8]),
self.as_array()[9].max(other.as_array()[9]),
self.as_array()[10].max(other.as_array()[10]),
self.as_array()[11].max(other.as_array()[11]),
self.as_array()[12].max(other.as_array()[12]),
self.as_array()[13].max(other.as_array()[13]),
self.as_array()[14].max(other.as_array()[14]),
self.as_array()[15].max(other.as_array()[15]),
])
Avx2
-
VPMAXSW ymm, ymm, ymm
sourcefn min(&self, other: I16x16) -> I16x16
fn min(&self, other: I16x16) -> I16x16
Scalar Equivalent:
I16x16::from([
self.as_array()[0].min(other.as_array()[0]),
self.as_array()[1].min(other.as_array()[1]),
self.as_array()[2].min(other.as_array()[2]),
self.as_array()[3].min(other.as_array()[3]),
self.as_array()[4].min(other.as_array()[4]),
self.as_array()[5].min(other.as_array()[5]),
self.as_array()[6].min(other.as_array()[6]),
self.as_array()[7].min(other.as_array()[7]),
self.as_array()[8].min(other.as_array()[8]),
self.as_array()[9].min(other.as_array()[9]),
self.as_array()[10].min(other.as_array()[10]),
self.as_array()[11].min(other.as_array()[11]),
self.as_array()[12].min(other.as_array()[12]),
self.as_array()[13].min(other.as_array()[13]),
self.as_array()[14].min(other.as_array()[14]),
self.as_array()[15].min(other.as_array()[15]),
])
Avx2
-
VPMINSW ymm, ymm, ymm
const ZERO: Self = _
type BroadcastLoInput = I16x8
sourceimpl Sub<I16x16> for I16x16
impl Sub<I16x16> for I16x16
sourcefn sub(self, rhs: I16x16) -> I16x16
fn sub(self, rhs: I16x16) -> I16x16
Scalar Equivalent:
I16x16::from([
self.as_array()[0].wrapping_sub(rhs.as_array()[0]),
self.as_array()[1].wrapping_sub(rhs.as_array()[1]),
self.as_array()[2].wrapping_sub(rhs.as_array()[2]),
self.as_array()[3].wrapping_sub(rhs.as_array()[3]),
self.as_array()[4].wrapping_sub(rhs.as_array()[4]),
self.as_array()[5].wrapping_sub(rhs.as_array()[5]),
self.as_array()[6].wrapping_sub(rhs.as_array()[6]),
self.as_array()[7].wrapping_sub(rhs.as_array()[7]),
self.as_array()[8].wrapping_sub(rhs.as_array()[8]),
self.as_array()[9].wrapping_sub(rhs.as_array()[9]),
self.as_array()[10].wrapping_sub(rhs.as_array()[10]),
self.as_array()[11].wrapping_sub(rhs.as_array()[11]),
self.as_array()[12].wrapping_sub(rhs.as_array()[12]),
self.as_array()[13].wrapping_sub(rhs.as_array()[13]),
self.as_array()[14].wrapping_sub(rhs.as_array()[14]),
self.as_array()[15].wrapping_sub(rhs.as_array()[15]),
])
Avx2
-
VPSUBW ymm, ymm, ymm
sourceimpl SubAssign<I16x16> for I16x16
impl SubAssign<I16x16> for I16x16
sourcefn sub_assign(&mut self, rhs: Self)
fn sub_assign(&mut self, rhs: Self)
Performs the
-=
operation. Read moreimpl Copy for I16x16
impl Eq for I16x16
impl Pod for I16x16
Auto Trait Implementations
impl RefUnwindSafe for I16x16
impl Send for I16x16
impl Sync for I16x16
impl Unpin for I16x16
impl UnwindSafe for I16x16
Blanket Implementations
sourceimpl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
const: unstable · sourcefn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
type Bits = T
type Bits = T
Self
must have the same layout as the specified Bits
except for
the possible invalid bit patterns being checked during
is_valid_bit_pattern
. Read more