Struct vectoreyes::I32x4
source · [−]#[repr(transparent)]pub struct I32x4(_);
Expand description
[i32; 4]
as a vector.
Implementations
Trait Implementations
sourceimpl Add<I32x4> for I32x4
impl Add<I32x4> for I32x4
sourcefn add(self, rhs: I32x4) -> I32x4
fn add(self, rhs: I32x4) -> I32x4
Scalar Equivalent:
I32x4::from([
self.as_array()[0].wrapping_add(rhs.as_array()[0]),
self.as_array()[1].wrapping_add(rhs.as_array()[1]),
self.as_array()[2].wrapping_add(rhs.as_array()[2]),
self.as_array()[3].wrapping_add(rhs.as_array()[3]),
])
Avx2
-
PADDD xmm, xmm
sourceimpl AddAssign<I32x4> for I32x4
impl AddAssign<I32x4> for I32x4
sourcefn add_assign(&mut self, rhs: Self)
fn add_assign(&mut self, rhs: Self)
+=
operation. Read moresourceimpl BitAnd<I32x4> for I32x4
impl BitAnd<I32x4> for I32x4
sourceimpl BitAndAssign<I32x4> for I32x4
impl BitAndAssign<I32x4> for I32x4
sourcefn bitand_assign(&mut self, rhs: Self)
fn bitand_assign(&mut self, rhs: Self)
&=
operation. Read moresourceimpl BitOr<I32x4> for I32x4
impl BitOr<I32x4> for I32x4
sourceimpl BitOrAssign<I32x4> for I32x4
impl BitOrAssign<I32x4> for I32x4
sourcefn bitor_assign(&mut self, rhs: Self)
fn bitor_assign(&mut self, rhs: Self)
|=
operation. Read moresourceimpl BitXor<I32x4> for I32x4
impl BitXor<I32x4> for I32x4
sourceimpl BitXorAssign<I32x4> for I32x4
impl BitXorAssign<I32x4> for I32x4
sourcefn bitxor_assign(&mut self, rhs: Self)
fn bitxor_assign(&mut self, rhs: Self)
^=
operation. Read moresourceimpl ConditionallySelectable for I32x4
impl ConditionallySelectable for I32x4
sourcefn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self
sourcefn conditional_assign(&mut self, other: &Self, choice: Choice)
fn conditional_assign(&mut self, other: &Self, choice: Choice)
sourceimpl ConstantTimeEq for I32x4
impl ConstantTimeEq for I32x4
sourceimpl ExtendingCast<I16x8> for I32x4
impl ExtendingCast<I16x8> for I32x4
sourcefn extending_cast_from(vector: I16x8) -> I32x4
fn extending_cast_from(vector: I16x8) -> I32x4
Scalar Equivalent:
I32x4::from([
i32::from(vector.as_array()[0]),
i32::from(vector.as_array()[1]),
i32::from(vector.as_array()[2]),
i32::from(vector.as_array()[3]),
])
Avx2
-
PMOVSXWD xmm, xmm
sourceimpl ExtendingCast<I32x4> for I64x2
impl ExtendingCast<I32x4> for I64x2
sourcefn extending_cast_from(vector: I32x4) -> I64x2
fn extending_cast_from(vector: I32x4) -> I64x2
Scalar Equivalent:
I64x2::from([
i64::from(vector.as_array()[0]),
i64::from(vector.as_array()[1]),
])
Avx2
-
PMOVSXDQ xmm, xmm
sourceimpl ExtendingCast<I32x4> for I64x4
impl ExtendingCast<I32x4> for I64x4
sourcefn extending_cast_from(vector: I32x4) -> I64x4
fn extending_cast_from(vector: I32x4) -> I64x4
Scalar Equivalent:
I64x4::from([
i64::from(vector.as_array()[0]),
i64::from(vector.as_array()[1]),
i64::from(vector.as_array()[2]),
i64::from(vector.as_array()[3]),
])
Avx2
-
VPMOVSXDQ ymm, xmm
sourceimpl ExtendingCast<I8x16> for I32x4
impl ExtendingCast<I8x16> for I32x4
sourcefn extending_cast_from(vector: I8x16) -> I32x4
fn extending_cast_from(vector: I8x16) -> I32x4
Scalar Equivalent:
I32x4::from([
i32::from(vector.as_array()[0]),
i32::from(vector.as_array()[1]),
i32::from(vector.as_array()[2]),
i32::from(vector.as_array()[3]),
])
Avx2
-
PMOVSXBD xmm, xmm
sourceimpl From<I32x4> for I32x8
impl From<I32x4> for I32x8
sourcefn from(vector: I32x4) -> I32x8
fn from(vector: I32x4) -> I32x8
NOTE: this will zero the upper bits of the destination. Other intrinsics are more effcient, but leave the upper bits undefined. At present, these more effcient intrinsics are not exposed.
Scalar Equivalent:
let mut out = [0; 8];
out[0..4].copy_from_slice(&vector.as_array());
I32x8::from(out)
Avx2
sourceimpl From<I32x4> for I64x4
impl From<I32x4> for I64x4
sourceimpl Shl<I32x4> for I32x4
impl Shl<I32x4> for I32x4
sourceimpl Shl<u64> for I32x4
impl Shl<u64> for I32x4
sourceimpl ShlAssign<I32x4> for I32x4
impl ShlAssign<I32x4> for I32x4
sourcefn shl_assign(&mut self, amount: I32x4)
fn shl_assign(&mut self, amount: I32x4)
<<=
operation. Read moresourceimpl ShlAssign<u64> for I32x4
impl ShlAssign<u64> for I32x4
sourcefn shl_assign(&mut self, amount: u64)
fn shl_assign(&mut self, amount: u64)
<<=
operation. Read moresourceimpl Shr<I32x4> for I32x4
impl Shr<I32x4> for I32x4
sourceimpl Shr<u64> for I32x4
impl Shr<u64> for I32x4
sourcefn shr(self, amount: u64) -> I32x4
fn shr(self, amount: u64) -> I32x4
Scalar Equivalent:
if amount >= 32 {
let mut out = self.as_array();
for x in out.iter_mut() {
*x = if *x < 0 { -1 } else { 0 };
}
I32x4::from(out)
} else {
I32x4::from([
self.as_array()[0] >> amount,
self.as_array()[1] >> amount,
self.as_array()[2] >> amount,
self.as_array()[3] >> amount,
])
}
Avx2
-
Instruction sequence.
-
PSRAD xmm, xmm
sourceimpl ShrAssign<I32x4> for I32x4
impl ShrAssign<I32x4> for I32x4
sourcefn shr_assign(&mut self, amount: I32x4)
fn shr_assign(&mut self, amount: I32x4)
>>=
operation. Read moresourceimpl ShrAssign<u64> for I32x4
impl ShrAssign<u64> for I32x4
sourcefn shr_assign(&mut self, amount: u64)
fn shr_assign(&mut self, amount: u64)
>>=
operation. Read moresourceimpl SimdBase for I32x4
impl SimdBase for I32x4
sourcefn set_lo(scalar: i32) -> I32x4
fn set_lo(scalar: i32) -> I32x4
Scalar Equivalent:
let mut out = [0; 4];
out[0] = scalar;
I32x4::from(out)
Avx2
-
Instruction sequence.
sourcefn broadcast_lo(vector: I32x4) -> I32x4
fn broadcast_lo(vector: I32x4) -> I32x4
sourcefn cmp_eq(&self, other: I32x4) -> I32x4
fn cmp_eq(&self, other: I32x4) -> I32x4
Scalar Equivalent:
I32x4::from([
if self.as_array()[0] == other.as_array()[0] { -1 } else { 0 },
if self.as_array()[1] == other.as_array()[1] { -1 } else { 0 },
if self.as_array()[2] == other.as_array()[2] { -1 } else { 0 },
if self.as_array()[3] == other.as_array()[3] { -1 } else { 0 },
])
Avx2
-
PCMPEQD xmm, xmm
sourcefn and_not(&self, other: I32x4) -> I32x4
fn and_not(&self, other: I32x4) -> I32x4
Scalar Equivalent:
I32x4::from([
self.as_array()[0] & (!other.as_array()[0]),
self.as_array()[1] & (!other.as_array()[1]),
self.as_array()[2] & (!other.as_array()[2]),
self.as_array()[3] & (!other.as_array()[3]),
])
Avx2
-
PANDN xmm, xmm
sourcefn cmp_gt(&self, other: I32x4) -> I32x4
fn cmp_gt(&self, other: I32x4) -> I32x4
Scalar Equivalent:
I32x4::from([
if self.as_array()[0] > other.as_array()[0] { -1 } else { 0 },
if self.as_array()[1] > other.as_array()[1] { -1 } else { 0 },
if self.as_array()[2] > other.as_array()[2] { -1 } else { 0 },
if self.as_array()[3] > other.as_array()[3] { -1 } else { 0 },
])
Avx2
-
PCMPGTD xmm, xmm
sourcefn shift_left<const BITS: usize>(&self) -> I32x4
fn shift_left<const BITS: usize>(&self) -> I32x4
Scalar Equivalent:
let mut out = self.as_array();
for x in out.iter_mut() {
*x <<= BITS;
}
I32x4::from(out)
Avx2
-
PSLLD xmm, imm8
sourcefn shift_right<const BITS: usize>(&self) -> I32x4
fn shift_right<const BITS: usize>(&self) -> I32x4
Scalar Equivalent:
let mut out = self.as_array();
for x in out.iter_mut() {
*x >>= BITS;
}
I32x4::from(out)
Avx2
-
PSRAD xmm, imm8
sourcefn unpack_lo(&self, other: I32x4) -> I32x4
fn unpack_lo(&self, other: I32x4) -> I32x4
Scalar Equivalent:
I32x4::from([
// Lane# 0
self.as_array()[0],
other.as_array()[0],
self.as_array()[1],
other.as_array()[1],
])
Avx2
-
PUNPCKLDQ xmm, xmm
sourcefn unpack_hi(&self, other: I32x4) -> I32x4
fn unpack_hi(&self, other: I32x4) -> I32x4
Scalar Equivalent:
I32x4::from([
// Lane# 0
self.as_array()[2],
other.as_array()[2],
self.as_array()[3],
other.as_array()[3],
])
Avx2
-
PUNPCKHDQ xmm, xmm
sourcefn max(&self, other: I32x4) -> I32x4
fn max(&self, other: I32x4) -> I32x4
Scalar Equivalent:
I32x4::from([
self.as_array()[0].max(other.as_array()[0]),
self.as_array()[1].max(other.as_array()[1]),
self.as_array()[2].max(other.as_array()[2]),
self.as_array()[3].max(other.as_array()[3]),
])
Avx2
-
PMAXSD xmm, xmm
sourcefn min(&self, other: I32x4) -> I32x4
fn min(&self, other: I32x4) -> I32x4
Scalar Equivalent:
I32x4::from([
self.as_array()[0].min(other.as_array()[0]),
self.as_array()[1].min(other.as_array()[1]),
self.as_array()[2].min(other.as_array()[2]),
self.as_array()[3].min(other.as_array()[3]),
])
Avx2
-
PMINSD xmm, xmm
const ZERO: Self = _
type BroadcastLoInput = I32x4
sourceimpl SimdBase32 for I32x4
impl SimdBase32 for I32x4
sourceimpl SimdBase4x for I32x4
impl SimdBase4x for I32x4
sourcefn blend<const B3: bool, const B2: bool, const B1: bool, const B0: bool>(
&self,
if_true: I32x4
) -> I32x4
fn blend<const B3: bool, const B2: bool, const B1: bool, const B0: bool>(
&self,
if_true: I32x4
) -> I32x4
Scalar Equivalent:
I32x4::from([
(if B0 { if_true } else { *self }).as_array()[0],
(if B1 { if_true } else { *self }).as_array()[1],
(if B2 { if_true } else { *self }).as_array()[2],
(if B3 { if_true } else { *self }).as_array()[3],
])
Avx2
-
VPBLENDD xmm, xmm, xmm, imm8
sourceimpl SimdBaseGatherable<I32x4> for I32x4
impl SimdBaseGatherable<I32x4> for I32x4
Safety
base
does not need to be aligned. Forall i
, base + indices[i]
must meet
the safety requirements of std::ptr::read_unaligned
sourceunsafe fn gather(base: *const i32, indices: I32x4) -> I32x4
unsafe fn gather(base: *const i32, indices: I32x4) -> I32x4
Scalar Equivalent:
I32x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])
Avx2
-
VPGATHERDD xmm, vm32x, xmm
sourceunsafe fn gather_masked(
base: *const i32,
indices: I32x4,
mask: I32x4,
src: I32x4
) -> I32x4
unsafe fn gather_masked(
base: *const i32,
indices: I32x4,
mask: I32x4,
src: I32x4
) -> I32x4
Scalar Equivalent:
I32x4::from([
if ((mask.as_array()[0] as u32) >> 31) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if ((mask.as_array()[1] as u32) >> 31) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if ((mask.as_array()[2] as u32) >> 31) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if ((mask.as_array()[3] as u32) >> 31) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])
Avx2
-
VPGATHERDD xmm, vm32x, xmm
sourceimpl SimdBaseGatherable<I32x4> for I64x4
impl SimdBaseGatherable<I32x4> for I64x4
Safety
base
does not need to be aligned. Forall i
, base + indices[i]
must meet
the safety requirements of std::ptr::read_unaligned
sourceunsafe fn gather(base: *const i64, indices: I32x4) -> I64x4
unsafe fn gather(base: *const i64, indices: I32x4) -> I64x4
Scalar Equivalent:
I64x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])
Avx2
-
VPGATHERDQ ymm, vm32x, ymm
sourceunsafe fn gather_masked(
base: *const i64,
indices: I32x4,
mask: I64x4,
src: I64x4
) -> I64x4
unsafe fn gather_masked(
base: *const i64,
indices: I32x4,
mask: I64x4,
src: I64x4
) -> I64x4
Scalar Equivalent:
I64x4::from([
if ((mask.as_array()[0] as u64) >> 63) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if ((mask.as_array()[1] as u64) >> 63) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if ((mask.as_array()[2] as u64) >> 63) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if ((mask.as_array()[3] as u64) >> 63) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])
Avx2
-
VPGATHERDQ ymm, vm32x, ymm
sourceimpl SimdBaseGatherable<I32x4> for U32x4
impl SimdBaseGatherable<I32x4> for U32x4
Safety
base
does not need to be aligned. Forall i
, base + indices[i]
must meet
the safety requirements of std::ptr::read_unaligned
sourceunsafe fn gather(base: *const u32, indices: I32x4) -> U32x4
unsafe fn gather(base: *const u32, indices: I32x4) -> U32x4
Scalar Equivalent:
U32x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])
Avx2
-
VPGATHERDD xmm, vm32x, xmm
sourceunsafe fn gather_masked(
base: *const u32,
indices: I32x4,
mask: U32x4,
src: U32x4
) -> U32x4
unsafe fn gather_masked(
base: *const u32,
indices: I32x4,
mask: U32x4,
src: U32x4
) -> U32x4
Scalar Equivalent:
U32x4::from([
if (mask.as_array()[0] >> 31) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if (mask.as_array()[1] >> 31) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if (mask.as_array()[2] >> 31) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if (mask.as_array()[3] >> 31) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])
Avx2
-
VPGATHERDD xmm, vm32x, xmm
sourceimpl SimdBaseGatherable<I32x4> for U64x4
impl SimdBaseGatherable<I32x4> for U64x4
Safety
base
does not need to be aligned. Forall i
, base + indices[i]
must meet
the safety requirements of std::ptr::read_unaligned
sourceunsafe fn gather(base: *const u64, indices: I32x4) -> U64x4
unsafe fn gather(base: *const u64, indices: I32x4) -> U64x4
Scalar Equivalent:
U64x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])
Avx2
-
VPGATHERDQ ymm, vm32x, ymm
sourceunsafe fn gather_masked(
base: *const u64,
indices: I32x4,
mask: U64x4,
src: U64x4
) -> U64x4
unsafe fn gather_masked(
base: *const u64,
indices: I32x4,
mask: U64x4,
src: U64x4
) -> U64x4
Scalar Equivalent:
U64x4::from([
if (mask.as_array()[0] >> 63) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if (mask.as_array()[1] >> 63) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if (mask.as_array()[2] >> 63) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if (mask.as_array()[3] >> 63) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])
Avx2
-
VPGATHERDQ ymm, vm32x, ymm
sourceimpl SimdBaseGatherable<I64x4> for I32x4
impl SimdBaseGatherable<I64x4> for I32x4
Safety
base
does not need to be aligned. Forall i
, base + indices[i]
must meet
the safety requirements of std::ptr::read_unaligned
sourceunsafe fn gather(base: *const i32, indices: I64x4) -> I32x4
unsafe fn gather(base: *const i32, indices: I64x4) -> I32x4
Scalar Equivalent:
I32x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])
Avx2
-
VPGATHERQD xmm, vm64y, xmm
sourceunsafe fn gather_masked(
base: *const i32,
indices: I64x4,
mask: I32x4,
src: I32x4
) -> I32x4
unsafe fn gather_masked(
base: *const i32,
indices: I64x4,
mask: I32x4,
src: I32x4
) -> I32x4
Scalar Equivalent:
I32x4::from([
if ((mask.as_array()[0] as u32) >> 31) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if ((mask.as_array()[1] as u32) >> 31) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if ((mask.as_array()[2] as u32) >> 31) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if ((mask.as_array()[3] as u32) >> 31) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])
Avx2
-
VPGATHERQD xmm, vm64y, xmm
sourceimpl SimdBaseGatherable<U64x4> for I32x4
impl SimdBaseGatherable<U64x4> for I32x4
Safety
base
does not need to be aligned. Forall i
, base + indices[i]
must meet
the safety requirements of std::ptr::read_unaligned
sourceunsafe fn gather(base: *const i32, indices: U64x4) -> I32x4
unsafe fn gather(base: *const i32, indices: U64x4) -> I32x4
Scalar Equivalent:
I32x4::from([
base.offset(indices.as_array()[0] as isize).read_unaligned(),
base.offset(indices.as_array()[1] as isize).read_unaligned(),
base.offset(indices.as_array()[2] as isize).read_unaligned(),
base.offset(indices.as_array()[3] as isize).read_unaligned(),
])
Avx2
-
VPGATHERQD xmm, vm64y, xmm
sourceunsafe fn gather_masked(
base: *const i32,
indices: U64x4,
mask: I32x4,
src: I32x4
) -> I32x4
unsafe fn gather_masked(
base: *const i32,
indices: U64x4,
mask: I32x4,
src: I32x4
) -> I32x4
Scalar Equivalent:
I32x4::from([
if ((mask.as_array()[0] as u32) >> 31) == 1 {
base.offset(indices.as_array()[0] as isize).read_unaligned()
} else {
src.as_array()[0]
},
if ((mask.as_array()[1] as u32) >> 31) == 1 {
base.offset(indices.as_array()[1] as isize).read_unaligned()
} else {
src.as_array()[1]
},
if ((mask.as_array()[2] as u32) >> 31) == 1 {
base.offset(indices.as_array()[2] as isize).read_unaligned()
} else {
src.as_array()[2]
},
if ((mask.as_array()[3] as u32) >> 31) == 1 {
base.offset(indices.as_array()[3] as isize).read_unaligned()
} else {
src.as_array()[3]
},
])
Avx2
-
VPGATHERQD xmm, vm64y, xmm
sourceimpl Sub<I32x4> for I32x4
impl Sub<I32x4> for I32x4
sourcefn sub(self, rhs: I32x4) -> I32x4
fn sub(self, rhs: I32x4) -> I32x4
Scalar Equivalent:
I32x4::from([
self.as_array()[0].wrapping_sub(rhs.as_array()[0]),
self.as_array()[1].wrapping_sub(rhs.as_array()[1]),
self.as_array()[2].wrapping_sub(rhs.as_array()[2]),
self.as_array()[3].wrapping_sub(rhs.as_array()[3]),
])
Avx2
-
PSUBD xmm, xmm
sourceimpl SubAssign<I32x4> for I32x4
impl SubAssign<I32x4> for I32x4
sourcefn sub_assign(&mut self, rhs: Self)
fn sub_assign(&mut self, rhs: Self)
-=
operation. Read moreimpl Copy for I32x4
impl Eq for I32x4
impl Pod for I32x4
Auto Trait Implementations
impl RefUnwindSafe for I32x4
impl Send for I32x4
impl Sync for I32x4
impl Unpin for I32x4
impl UnwindSafe for I32x4
Blanket Implementations
sourceimpl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
const: unstable · sourcefn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
impl<T> CheckedBitPattern for Twhere
T: AnyBitPattern,
type Bits = T
type Bits = T
Self
must have the same layout as the specified Bits
except for
the possible invalid bit patterns being checked during
is_valid_bit_pattern
. Read more