use crate::{
mutability::Mutability,
order::{
BitOrder,
Lsb0,
},
ptr::{
BitPtr,
BitSpan,
},
store::BitStore,
};
use core::{
any::TypeId,
fmt::{
self,
Debug,
Formatter,
Pointer,
},
hash::{
Hash,
Hasher,
},
iter::FusedIterator,
ops::{
Bound,
Range,
RangeBounds,
},
};
#[cfg_attr(target_pointer_width = "32", repr(C, align(4)))]
#[cfg_attr(target_pointer_width = "64", repr(C, align(8)))]
#[cfg_attr(
not(any(target_pointer_width = "32", target_pointer_width = "64")),
repr(C)
)]
pub struct BitPtrRange<M, O = Lsb0, T = usize>
where
M: Mutability,
O: BitOrder,
T: BitStore,
{
pub start: BitPtr<M, O, T>,
pub end: BitPtr<M, O, T>,
}
impl<M, O, T> BitPtrRange<M, O, T>
where
M: Mutability,
O: BitOrder,
T: BitStore,
{
pub const EMPTY: Self = Self {
start: BitPtr::DANGLING,
end: BitPtr::DANGLING,
};
#[inline]
#[cfg(not(tarpaulin_include))]
pub fn raw_parts(&self) -> (BitPtr<M, O, T>, BitPtr<M, O, T>) {
(self.start, self.end)
}
#[inline(always)]
#[cfg(not(tarpaulin_include))]
pub fn into_range(self) -> Range<BitPtr<M, O, T>> {
self.start .. self.end
}
#[inline]
pub fn is_empty(&self) -> bool {
self.start == self.end
}
#[inline]
pub fn contains<M2, T2>(&self, pointer: &BitPtr<M2, O, T2>) -> bool
where
M2: Mutability,
T2: BitStore,
{
self.start <= *pointer && *pointer < self.end
}
#[inline]
pub(crate) fn into_bitspan(self) -> BitSpan<M, O, T> {
unsafe { self.start.span_unchecked(self.len()) }
}
#[inline]
fn take_front(&mut self) -> BitPtr<M, O, T> {
let start = self.start;
self.start = unsafe { start.add(1) };
start
}
#[inline]
fn take_back(&mut self) -> BitPtr<M, O, T> {
let prev = unsafe { self.end.sub(1) };
self.end = prev;
prev
}
}
#[cfg(not(tarpaulin_include))]
impl<M, O, T> Clone for BitPtrRange<M, O, T>
where
M: Mutability,
O: BitOrder,
T: BitStore,
{
#[inline(always)]
fn clone(&self) -> Self {
Self { ..*self }
}
}
impl<M, O, T> Eq for BitPtrRange<M, O, T>
where
M: Mutability,
O: BitOrder,
T: BitStore,
{
}
#[cfg(not(tarpaulin_include))]
impl<M1, M2, O, T1, T2> PartialEq<BitPtrRange<M2, O, T2>>
for BitPtrRange<M1, O, T1>
where
M1: Mutability,
M2: Mutability,
O: BitOrder,
T1: BitStore,
T2: BitStore,
{
#[inline(always)]
fn eq(&self, other: &BitPtrRange<M2, O, T2>) -> bool {
if TypeId::of::<T1::Mem>() != TypeId::of::<T2::Mem>() {
return false;
}
self.start == other.start && self.end == other.end
}
}
#[cfg(not(tarpaulin_include))]
impl<M, O, T> Default for BitPtrRange<M, O, T>
where
M: Mutability,
O: BitOrder,
T: BitStore,
{
#[inline(always)]
fn default() -> Self {
Self::EMPTY
}
}
#[cfg(not(tarpaulin_include))]
impl<M, O, T> From<Range<BitPtr<M, O, T>>> for BitPtrRange<M, O, T>
where
M: Mutability,
O: BitOrder,
T: BitStore,
{
#[inline(always)]
fn from(Range { start, end }: Range<BitPtr<M, O, T>>) -> Self {
Self { start, end }
}
}
#[cfg(not(tarpaulin_include))]
impl<M, O, T> Into<Range<BitPtr<M, O, T>>> for BitPtrRange<M, O, T>
where
M: Mutability,
O: BitOrder,
T: BitStore,
{
#[inline(always)]
fn into(self) -> Range<BitPtr<M, O, T>> {
self.into_range()
}
}
#[cfg(not(tarpaulin_include))]
impl<M, O, T> Debug for BitPtrRange<M, O, T>
where
M: Mutability,
O: BitOrder,
T: BitStore,
{
#[inline]
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
let (start, end) = self.raw_parts();
Pointer::fmt(&start, fmt)?;
write!(fmt, "{0}..{0}", if fmt.alternate() { " " } else { "" })?;
Pointer::fmt(&end, fmt)
}
}
#[cfg(not(tarpaulin_include))]
impl<M, O, T> Hash for BitPtrRange<M, O, T>
where
M: Mutability,
O: BitOrder,
T: BitStore,
{
#[inline]
fn hash<H>(&self, state: &mut H)
where H: Hasher {
self.start.hash(state);
self.end.hash(state);
}
}
impl<M, O, T> Iterator for BitPtrRange<M, O, T>
where
M: Mutability,
O: BitOrder,
T: BitStore,
{
type Item = BitPtr<M, O, T>;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
if Self::is_empty(&*self) {
return None;
}
Some(self.take_front())
}
#[inline]
fn nth(&mut self, n: usize) -> Option<Self::Item> {
if n >= self.len() {
self.start = self.end;
return None;
}
self.start = unsafe { self.start.add(n) };
Some(self.take_front())
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
#[inline(always)]
#[cfg(not(tarpaulin_include))]
fn count(self) -> usize {
self.len()
}
#[inline(always)]
#[cfg(not(tarpaulin_include))]
fn last(mut self) -> Option<Self::Item> {
self.next_back()
}
}
impl<M, O, T> DoubleEndedIterator for BitPtrRange<M, O, T>
where
M: Mutability,
O: BitOrder,
T: BitStore,
{
#[inline]
fn next_back(&mut self) -> Option<Self::Item> {
if Self::is_empty(&*self) {
return None;
}
Some(self.take_back())
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
if n >= self.len() {
self.end = self.start;
return None;
}
let out = unsafe { self.end.sub(n.wrapping_add(1)) };
self.end = out;
Some(out)
}
}
impl<M, O, T> ExactSizeIterator for BitPtrRange<M, O, T>
where
M: Mutability,
O: BitOrder,
T: BitStore,
{
#[cfg_attr(not(tarpaulin_include), inline(always))]
fn len(&self) -> usize {
(unsafe { self.end.offset_from(self.start) }) as usize
}
}
impl<M, O, T> FusedIterator for BitPtrRange<M, O, T>
where
M: Mutability,
O: BitOrder,
T: BitStore,
{
}
#[cfg(not(tarpaulin_include))]
impl<M, O, T> RangeBounds<BitPtr<M, O, T>> for BitPtrRange<M, O, T>
where
M: Mutability,
O: BitOrder,
T: BitStore,
{
#[inline(always)]
fn start_bound(&self) -> Bound<&BitPtr<M, O, T>> {
Bound::Included(&self.start)
}
#[inline(always)]
fn end_bound(&self) -> Bound<&BitPtr<M, O, T>> {
Bound::Excluded(&self.end)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
mutability::Const,
order::Lsb0,
};
use core::mem::size_of;
#[test]
fn assert_size() {
assert!(
size_of::<BitPtrRange<Const, Lsb0, u8>>() <= 3 * size_of::<usize>()
);
}
}