瀏覽代碼

Adjust the semantics of details and enhance the abstraction of internal locks.

Chen Chengjun 1 年之前
父節點
當前提交
8212d351a9
共有 10 個文件被更改,包括 493 次插入330 次删除
  1. 9 0
      Cargo.lock
  2. 5 0
      Cargo.toml
  3. 11 11
      src/cow.rs
  4. 137 118
      src/cursor.rs
  5. 27 27
      src/entry.rs
  6. 36 0
      src/lib.rs
  7. 8 8
      src/mark.rs
  8. 102 94
      src/node.rs
  9. 68 47
      src/test.rs
  10. 90 25
      src/xarray.rs

+ 9 - 0
Cargo.lock

@@ -2,6 +2,15 @@
 # It is not intended for manual editing.
 version = 3
 
+[[package]]
+name = "smallvec"
+version = "1.13.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7"
+
 [[package]]
 name = "xarray"
 version = "0.1.0"
+dependencies = [
+ "smallvec",
+]

+ 5 - 0
Cargo.toml

@@ -6,3 +6,8 @@ edition = "2021"
 # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
 
 [dependencies]
+smallvec = "*"
+
+[features]
+default = []
+std = []

+ 11 - 11
src/cow.rs

@@ -2,38 +2,38 @@ use super::*;
 
 /// The COW trait provides the capability for Copy-On-Write (COW) behavior to structures related to XArray,
 /// allowing them to perform COW operations on their internal XEntries.
-pub(crate) trait Cow<I: ItemEntry> {
+pub(super) trait Cow<I: ItemEntry, L: XLock> {
     /// Check if the target entry that is about to be operated on need to perform COW.
     /// If the target entry is subject to a mutable operation and is shared with other XArrays,
     /// perform the COW and return the copied XEntry with `Some()`, else return `None`.
-    fn copy_if_shared(&self, entry: &XEntry<I>) -> Option<XEntry<I>>;
+    fn copy_if_shared(&self, entry: &XEntry<I, L>) -> Option<XEntry<I, L>>;
 }
 
-impl<I: ItemEntry> Cow<I> for XNode<I, ReadWrite> {
-    default fn copy_if_shared(&self, _entry: &XEntry<I>) -> Option<XEntry<I>> {
+impl<I: ItemEntry, L: XLock> Cow<I, L> for XNode<I, L, ReadWrite> {
+    default fn copy_if_shared(&self, _entry: &XEntry<I, L>) -> Option<XEntry<I, L>> {
         None
     }
 }
 
-impl<I: ItemEntry + Clone> Cow<I> for XNode<I, ReadWrite> {
-    fn copy_if_shared(&self, entry: &XEntry<I>) -> Option<XEntry<I>> {
+impl<I: ItemEntry + Clone, L: XLock> Cow<I, L> for XNode<I, L, ReadWrite> {
+    fn copy_if_shared(&self, entry: &XEntry<I, L>) -> Option<XEntry<I, L>> {
         copy_if_shared(entry)
     }
 }
 
-impl<I: ItemEntry, M: ValidMark> Cow<I> for XArray<I, M> {
-    default fn copy_if_shared(&self, _entry: &XEntry<I>) -> Option<XEntry<I>> {
+impl<I: ItemEntry, L: XLock, M: ValidMark> Cow<I, L> for XArray<I, L, M> {
+    default fn copy_if_shared(&self, _entry: &XEntry<I, L>) -> Option<XEntry<I, L>> {
         None
     }
 }
 
-impl<I: ItemEntry + Clone, M: ValidMark> Cow<I> for XArray<I, M> {
-    fn copy_if_shared(&self, entry: &XEntry<I>) -> Option<XEntry<I>> {
+impl<I: ItemEntry + Clone, L: XLock, M: ValidMark> Cow<I, L> for XArray<I, L, M> {
+    fn copy_if_shared(&self, entry: &XEntry<I, L>) -> Option<XEntry<I, L>> {
         copy_if_shared(entry)
     }
 }
 
-fn copy_if_shared<I: ItemEntry + Clone>(entry: &XEntry<I>) -> Option<XEntry<I>> {
+fn copy_if_shared<I: ItemEntry + Clone, L: XLock>(entry: &XEntry<I, L>) -> Option<XEntry<I, L>> {
     if entry.is_node() && entry.node_strong_count().unwrap() > 1 {
         let new_entry = deep_clone_node_entry(entry);
         Some(new_entry)

+ 137 - 118
src/cursor.rs

@@ -1,38 +1,37 @@
+use smallvec::SmallVec;
+
 use super::*;
-use std::marker::PhantomData;
-use std::ops::{Deref, DerefMut};
+use core::marker::PhantomData;
+use core::ops::{Deref, DerefMut};
 
 /// CursorState represents the current state of the cursor. Currently, there are two possible states:
 /// 1. inactive: the state where the cursor is not positioned on any node.
 /// 2. positioned on a node: this state includes information about the node the cursor is on,
 /// as well as the offset of the entry that needs to be operated on within the slots of the current node.
-enum CursorState<'a, I, Operation>
+enum CursorState<'a, I, L, Operation>
 where
     I: ItemEntry,
+    L: XLock,
 {
     Inactive,
     AtNode {
-        node: &'a XNode<I, Operation>,
+        node: &'a XNode<I, L, Operation>,
         operation_offset: u8,
     },
 }
 
-impl<'a, I: ItemEntry, Operation> CursorState<'a, I, Operation> {
+impl<'a, I: ItemEntry, L: XLock, Operation> CursorState<'a, I, L, Operation> {
     fn default() -> Self {
         Self::Inactive
     }
 
-    fn arrive_node(&mut self, node: &'a XNode<I, Operation>, operation_offset: u8) {
+    fn arrive_node(&mut self, node: &'a XNode<I, L, Operation>, operation_offset: u8) {
         *self = Self::AtNode {
             node,
             operation_offset,
         };
     }
 
-    fn is_inactive(&self) -> bool {
-        matches!(self, Self::Inactive)
-    }
-
     fn is_at_node(&self) -> bool {
         matches!(
             self,
@@ -43,7 +42,7 @@ impl<'a, I: ItemEntry, Operation> CursorState<'a, I, Operation> {
         )
     }
 
-    fn node_info(&self) -> Option<(&'a XNode<I, Operation>, u8)> {
+    fn node_info(&self) -> Option<(&'a XNode<I, L, Operation>, u8)> {
         if let Self::AtNode {
             node,
             operation_offset,
@@ -66,39 +65,40 @@ impl<'a, I: ItemEntry, Operation> CursorState<'a, I, Operation> {
 /// XEntry at the next index. If the Cursor perform reset or next and then have a target index that is not able to touch,
 /// the Cursor's state will also set to `Inactive`.
 ///
-/// Hence, at any given moment a cursor will be positioned on the XNode and be ready to operate its target XEntry.
+/// Hence, at any given moment when no operation is being performed, a cursor will be positioned on the XNode and be ready to operate its target XEntry.
 /// If not, it means that the cursor is not able to touch the target `XEntry`.
 ///
 /// The cursor will also record all nodes passed from the head node to the target position in `passed_node`,
 /// thereby assisting it in performing some operations that involve searching upwards.
 ///
 /// Multiple Cursors are allowed to operate on a single XArray at the same time.
-pub struct Cursor<'a, I, M>
+pub struct Cursor<'a, I, L, M>
 where
     I: ItemEntry,
+    L: XLock,
     M: ValidMark,
 {
     /// The `XArray` the cursor located in.
-    xa: &'a XArray<I, M>,
+    xa: &'a XArray<I, L, M>,
     /// The target index of the cursor in the belonged `XArray`.
     index: u64,
     /// Represents the current state of the cursor.
-    state: CursorState<'a, I, ReadOnly>,
+    state: CursorState<'a, I, L, ReadOnly>,
     /// Record add nodes passed from the head node to the target position.
-    /// The index is the layer of the recorded node.
-    passed_node: [Option<&'a XNode<I, ReadOnly>>; MAX_LAYER],
+    /// The index is the height of the recorded node.
+    ancestors: SmallVec<[&'a XNode<I, L, ReadOnly>; MAX_HEIGHT]>,
 
     _marker: PhantomData<I>,
 }
 
-impl<'a, I: ItemEntry, M: ValidMark> Cursor<'a, I, M> {
+impl<'a, I: ItemEntry, L: XLock, M: ValidMark> Cursor<'a, I, L, M> {
     /// Create an `Cursor` to perform read related operations on the `XArray`.
-    pub(crate) fn new(xa: &'a XArray<I, M>, index: u64) -> Self {
+    pub(super) fn new(xa: &'a XArray<I, L, M>, index: u64) -> Self {
         let mut cursor = Self {
             xa,
             index,
             state: CursorState::default(),
-            passed_node: [None; MAX_LAYER],
+            ancestors: SmallVec::new(),
             _marker: PhantomData,
         };
 
@@ -108,7 +108,7 @@ impl<'a, I: ItemEntry, M: ValidMark> Cursor<'a, I, M> {
 
     /// Move the `Cursor` to the `XNode`, and update the cursor's state based on its target index.
     /// Return a reference to the `XEntry` within the slots of the current XNode that needs to be operated on.
-    fn move_to(&mut self, node: &'a XNode<I, ReadOnly>) -> &'a XEntry<I> {
+    fn move_to(&mut self, node: &'a XNode<I, L, ReadOnly>) -> &'a XEntry<I, L> {
         let (current_entry, offset) = {
             let offset = node.entry_offset(self.index);
             let current_entry = node.ref_node_entry(offset);
@@ -144,20 +144,25 @@ impl<'a, I: ItemEntry, M: ValidMark> Cursor<'a, I, M> {
         operation_offset += 1;
         while operation_offset == SLOT_SIZE as u8 {
             operation_offset = current_node.offset_in_parent() + 1;
-            let parent_layer = (*current_node.layer() + 1) as usize;
-            self.passed_node[parent_layer - 1] = None;
-            current_node = self.passed_node[parent_layer].unwrap();
+            if let Some(node) = self.ancestors.pop() {
+                current_node = node;
+                continue;
+            }
+
+            operation_offset = 0;
+            break;
         }
         self.state.arrive_node(current_node, operation_offset);
 
-        while current_node.layer() != 0 {
+        while !current_node.is_leaf() {
             let next_entry = current_node.ref_node_entry(operation_offset);
             if !next_entry.is_node() {
                 self.init();
                 return;
             }
+
             let next_node = next_entry.as_node().unwrap();
-            self.passed_node[*next_node.layer() as usize] = Some(next_node);
+            self.ancestors.push(current_node);
             self.move_to(next_node);
             (current_node, operation_offset) = self.state.node_info().unwrap();
         }
@@ -181,7 +186,7 @@ impl<'a, I: ItemEntry, M: ValidMark> Cursor<'a, I, M> {
         if let Some((current_node, operation_offset)) = self.state.node_info() {
             let entry = current_node.ref_node_entry(operation_offset);
             if entry.is_item() {
-                return Some(unsafe { &*(entry as *const XEntry<I> as *const I) });
+                return Some(unsafe { &*(entry as *const XEntry<I, L> as *const I) });
             }
         }
         None
@@ -191,7 +196,7 @@ impl<'a, I: ItemEntry, M: ValidMark> Cursor<'a, I, M> {
     /// It then returns the reference to the `XEntry` stored in the slot corresponding to the target index.
     /// A target operated XEntry must be an item entry.
     /// If can not touch the target entry, the function will return `None`.
-    fn traverse_to_target(&mut self) -> Option<&'a XEntry<I>> {
+    fn traverse_to_target(&mut self) -> Option<&'a XEntry<I, L>> {
         if self.is_arrived() {
             let (current_node, operation_offset) = self.state.node_info().unwrap();
             return Some(current_node.ref_node_entry(operation_offset));
@@ -204,18 +209,15 @@ impl<'a, I: ItemEntry, M: ValidMark> Cursor<'a, I, M> {
         self.move_to(self.xa.head().as_node().unwrap());
 
         let (mut current_node, operation_offset) = self.state.node_info().unwrap();
-        let mut current_layer = current_node.layer();
         let mut operated_entry = current_node.ref_node_entry(operation_offset);
-        while current_layer > 0 {
+        while !current_node.is_leaf() {
             if !operated_entry.is_node() {
                 self.init();
                 return None;
             }
-
-            self.passed_node[*current_layer as usize] = Some(current_node);
+            self.ancestors.push(current_node);
 
             current_node = operated_entry.as_node().unwrap();
-            *current_layer -= 1;
             operated_entry = self.move_to(current_node);
         }
         Some(operated_entry)
@@ -224,7 +226,7 @@ impl<'a, I: ItemEntry, M: ValidMark> Cursor<'a, I, M> {
     /// Initialize the Cursor to its initial state.
     pub fn init(&mut self) {
         self.state = CursorState::default();
-        self.passed_node = [None; MAX_LAYER];
+        self.ancestors = SmallVec::new();
     }
 
     /// Return the target index of the cursor.
@@ -250,44 +252,49 @@ impl<'a, I: ItemEntry, M: ValidMark> Cursor<'a, I, M> {
 /// XEntry at the next index. If the `CursorMut` perform reset or next and then have a target index that is not able to touch,
 /// the `CursorMut`'s state will also set to `Inactive`.
 ///
-/// Hence, at any given moment a `CursorMut` will be positioned on the XNode and be ready to operate its target XEntry.
+/// Hence, at any given moment when no operation is being performed, a `CursorMut` will be positioned on the XNode and be ready to operate its target XEntry.
 /// If not, it means that the `CursorMut` is not able to touch the target `XEntry`. For this situation, the `CursorMut`
 /// can invoke `store` method which will expand the XArray to guarantee to reach the target position.
 ///
 /// The `CursorMut` will also record all nodes passed from the head node to the target position in passed_node,
 /// thereby assisting it in performing some operations that involve searching upwards.
 ///
-/// **Features for COW (Copy-On-Write).** The CursorMut guarantees that all nodes it traverses during the process are exclusively owned by the current XArray.
+/// **Features for COW (Copy-On-Write).** The CursorMut guarantees that if it is exclusive, all nodes it traverses during the process are exclusively owned by the current XArray.
 /// If it finds that the node it is about to access is shared with another XArray due to a COW clone, it will trigger a COW to copy and create an exclusive node for access.
 /// Additionally, since it holds a mutable reference to the current XArray, it will not conflict with any other cursors on the XArray.
+/// CursorMut is set to exclusive when a modification is about to be performed
 ///
-/// When a CursorMut doing operation on XArray, it should not be affected by other CursorMuts or affect other Cursors.
-pub struct CursorMut<'a, I, M>
+/// When a CursorMut doing write operation on XArray, it should not be affected by other CursorMuts or affect other Cursors.
+pub struct CursorMut<'a, I, L, M>
 where
     I: ItemEntry,
+    L: XLock,
     M: ValidMark,
 {
     /// The `XArray` the cursor located in.
-    xa: &'a mut XArray<I, M>,
+    xa: &'a mut XArray<I, L, M>,
     /// The target index of the cursor in the belonged `XArray`.
     index: u64,
     /// Represents the current state of the cursor.
-    state: CursorState<'a, I, ReadWrite>,
+    state: CursorState<'a, I, L, ReadWrite>,
     /// Record add nodes passed from the head node to the target position.
-    /// The index is the layer of the recorded node.
-    passed_node: [Option<&'a XNode<I, ReadWrite>>; MAX_LAYER],
+    /// The index is the height of the recorded node.
+    ancestors: SmallVec<[&'a XNode<I, L, ReadWrite>; MAX_HEIGHT]>,
+
+    is_exclusive: bool,
 
     _marker: PhantomData<I>,
 }
 
-impl<'a, I: ItemEntry, M: ValidMark> CursorMut<'a, I, M> {
+impl<'a, I: ItemEntry, L: XLock, M: ValidMark> CursorMut<'a, I, L, M> {
     /// Create an `CursorMut` to perform read and write operations on the `XArray`.
-    pub(crate) fn new(xa: &'a mut XArray<I, M>, index: u64) -> Self {
+    pub(super) fn new(xa: &'a mut XArray<I, L, M>, index: u64) -> Self {
         let mut cursor = Self {
             xa,
             index,
             state: CursorState::default(),
-            passed_node: [None; MAX_LAYER],
+            ancestors: SmallVec::new(),
+            is_exclusive: false,
             _marker: PhantomData,
         };
 
@@ -297,10 +304,10 @@ impl<'a, I: ItemEntry, M: ValidMark> CursorMut<'a, I, M> {
 
     /// Move the `CursorMut` to the `XNode`, and update the cursor's state based on its target index.
     /// Return a reference to the `XEntry` within the slots of the current XNode that needs to be operated on next.
-    fn move_to(&mut self, node: &'a XNode<I, ReadWrite>) -> &'a XEntry<I> {
+    fn move_to(&mut self, node: &'a XNode<I, L, ReadWrite>) -> &'a XEntry<I, L> {
         let (current_entry, offset) = {
             let offset = node.entry_offset(self.index);
-            let current_entry = node.ref_node_entry(offset);
+            let current_entry = node.ref_node_entry(self.is_exclusive, offset);
             (current_entry, offset)
         };
         self.state.arrive_node(node, offset);
@@ -311,6 +318,7 @@ impl<'a, I: ItemEntry, M: ValidMark> CursorMut<'a, I, M> {
     pub fn reset_to(&mut self, index: u64) {
         self.init();
         self.index = index;
+        self.is_exclusive = false;
         self.traverse_to_target();
     }
 
@@ -320,6 +328,7 @@ impl<'a, I: ItemEntry, M: ValidMark> CursorMut<'a, I, M> {
     /// the method returns the provided entry without making changes.
     /// Otherwise, it replaces the current entry with the provided one and returns the old entry.
     pub fn store(&mut self, item: I) -> Option<I> {
+        self.ensure_exclusive_before_modify();
         let stored_entry = XEntry::from_item(item);
         let target_entry = self.expand_and_traverse_to_target();
         if stored_entry.raw() == target_entry.raw() {
@@ -349,20 +358,25 @@ impl<'a, I: ItemEntry, M: ValidMark> CursorMut<'a, I, M> {
         operation_offset += 1;
         while operation_offset == SLOT_SIZE as u8 {
             operation_offset = current_node.offset_in_parent() + 1;
-            let parent_layer = (*current_node.layer() + 1) as usize;
-            self.passed_node[parent_layer - 1] = None;
-            current_node = self.passed_node[parent_layer].unwrap();
+            if let Some(node) = self.ancestors.pop() {
+                current_node = node;
+                continue;
+            }
+
+            operation_offset = 0;
+            break;
         }
         self.state.arrive_node(current_node, operation_offset);
 
-        while current_node.layer() != 0 {
-            let next_entry = current_node.ref_node_entry(operation_offset);
+        while !current_node.is_leaf() {
+            let next_entry = current_node.ref_node_entry(self.is_exclusive, operation_offset);
             if !next_entry.is_node() {
                 self.init();
                 return;
             }
+
             let next_node = next_entry.as_node_mut().unwrap();
-            self.passed_node[*next_node.layer() as usize] = Some(next_node);
+            self.ancestors.push(current_node);
             self.move_to(next_node);
             (current_node, operation_offset) = self.state.node_info().unwrap();
         }
@@ -374,25 +388,22 @@ impl<'a, I: ItemEntry, M: ValidMark> CursorMut<'a, I, M> {
     /// This operation will also mark all nodes along the path from the head node to the target node with the input `mark`,
     /// because a marked intermediate node should be equivalent to having a child node that is marked.
     pub fn set_mark(&mut self, mark: M) -> Result<(), ()> {
+        self.ensure_exclusive_before_modify();
         if let Some((current_node, operation_offset)) = self.state.node_info() {
-            let item_entry = current_node.ref_node_entry(operation_offset);
+            let item_entry = current_node.ref_node_entry(self.is_exclusive, operation_offset);
             if item_entry.is_null() {
                 return Err(());
             }
 
             current_node.set_mark(operation_offset, mark.index());
 
-            let head_layer = *(self.xa.head().as_node().unwrap().layer()) as usize;
             let mut offset_in_parent = current_node.offset_in_parent();
-            let mut parent_layer = (*current_node.layer() + 1) as usize;
-            while parent_layer <= head_layer {
-                let parent_node = self.passed_node[parent_layer].unwrap();
-                if parent_node.is_marked(offset_in_parent, mark.index()) {
+            for ancestor in self.ancestors.iter().rev() {
+                if ancestor.is_marked(offset_in_parent, mark.index()) {
                     break;
                 }
-                parent_node.set_mark(offset_in_parent, mark.index());
-                offset_in_parent = parent_node.offset_in_parent();
-                parent_layer += 1;
+                ancestor.set_mark(offset_in_parent, mark.index());
+                offset_in_parent = ancestor.offset_in_parent();
             }
             Ok(())
         } else {
@@ -406,25 +417,24 @@ impl<'a, I: ItemEntry, M: ValidMark> CursorMut<'a, I, M> {
     /// This operation will also unset the input `mark` for all nodes along the path from the head node to the target node
     /// if the input `mark` have not marked any of their children.
     pub fn unset_mark(&mut self, mark: M) -> Result<(), ()> {
-        if let Some((mut current_node, operation_offset)) = self.state.node_info() {
-            let item_entry = current_node.ref_node_entry(operation_offset);
+        self.ensure_exclusive_before_modify();
+        if let Some((current_node, operation_offset)) = self.state.node_info() {
+            let item_entry = current_node.ref_node_entry(self.is_exclusive, operation_offset);
             if item_entry.is_null() {
                 return Err(());
             }
 
             current_node.unset_mark(operation_offset, mark.index());
 
-            let head_layer = *(self.xa.head().as_node().unwrap().layer()) as usize;
-            let mut parent_layer = (*current_node.layer() + 1) as usize;
-            while current_node.is_mark_clear(mark.index()) {
-                let offset_in_parent = current_node.offset_in_parent();
-                let parent_node = self.passed_node[parent_layer].unwrap();
-                parent_node.unset_mark(offset_in_parent, mark.index());
+            if current_node.is_mark_clear(mark.index()) {
+                let mut offset_in_parent = current_node.offset_in_parent();
+                for ancestor in self.ancestors.iter().rev() {
+                    ancestor.unset_mark(offset_in_parent, mark.index());
+                    if !ancestor.is_mark_clear(mark.index()) {
+                        break;
+                    }
 
-                current_node = parent_node;
-                parent_layer += 1;
-                if parent_layer > head_layer {
-                    break;
+                    offset_in_parent = ancestor.offset_in_parent();
                 }
             }
             Ok(())
@@ -438,6 +448,7 @@ impl<'a, I: ItemEntry, M: ValidMark> CursorMut<'a, I, M> {
     /// This is achieved by storing an empty `XEntry` at the target index using the `store` method.
     /// The method returns the replaced `XEntry` that was previously stored at the target index.
     pub fn remove(&mut self) -> Option<I> {
+        self.ensure_exclusive_before_modify();
         if let Some((current_node, operation_offset)) = self.state.node_info() {
             let old_entry = current_node.set_entry(operation_offset, XEntry::EMPTY);
             return XEntry::into_item(old_entry);
@@ -449,32 +460,30 @@ impl<'a, I: ItemEntry, M: ValidMark> CursorMut<'a, I, M> {
     /// It then returns the reference to the `XEntry` stored in the slot corresponding to the target index.
     /// A target operated XEntry must be an item entry.
     /// If can not touch the target entry, the function will return `None`.
-    fn traverse_to_target(&mut self) -> Option<&'a XEntry<I>> {
+    fn traverse_to_target(&mut self) -> Option<&'a XEntry<I, L>> {
         if self.is_arrived() {
             let (current_node, operation_offset) = self.state.node_info().unwrap();
-            return Some(current_node.ref_node_entry(operation_offset));
+            return Some(current_node.ref_node_entry(self.is_exclusive, operation_offset));
         }
 
         let max_index = self.xa.max_index();
         if max_index < self.index || max_index == 0 {
             return None;
         }
-        let head = self.xa.head_mut().as_node_mut().unwrap();
+        let head = self.xa.head_mut(self.is_exclusive).as_node_mut().unwrap();
         self.move_to(head);
 
         let (mut current_node, operation_offset) = self.state.node_info().unwrap();
-        let mut current_layer = current_node.layer();
-        let mut operated_entry = current_node.ref_node_entry(operation_offset);
-        while current_layer > 0 {
+        let mut operated_entry = current_node.ref_node_entry(self.is_exclusive, operation_offset);
+        while !current_node.is_leaf() {
             if !operated_entry.is_node() {
                 self.init();
                 return None;
             }
 
-            self.passed_node[*current_layer as usize] = Some(current_node);
+            self.ancestors.push(current_node);
 
             current_node = operated_entry.as_node_mut().unwrap();
-            *current_layer -= 1;
             operated_entry = self.move_to(current_node);
         }
         Some(operated_entry)
@@ -483,74 +492,74 @@ impl<'a, I: ItemEntry, M: ValidMark> CursorMut<'a, I, M> {
     /// Traverse the XArray and move to the node that can operate the target entry.
     /// During the traverse, the cursor may modify the XArray to let itself be able to reach the target node.
     ///
-    /// Before traverse, the cursor will first expand the layer of `XArray` to make sure it have enough capacity.
+    /// Before traverse, the cursor will first expand the height of `XArray` to make sure it have enough capacity.
     /// During the traverse, the cursor will allocate new `XNode` and put it in the appropriate slot if needed.
     ///
     /// It then returns the reference to the `XEntry` stored in the slot corresponding to the target index.
     /// A target operated XEntry must be an item entry.
-    fn expand_and_traverse_to_target(&mut self) -> &'a XEntry<I> {
+    fn expand_and_traverse_to_target(&mut self) -> &'a XEntry<I, L> {
         if self.is_arrived() {
             let (current_node, operation_offset) = self.state.node_info().unwrap();
-            return current_node.ref_node_entry(operation_offset);
+            return current_node.ref_node_entry(self.is_exclusive, operation_offset);
         }
 
-        self.expand_layer();
-        let head_ref = self.xa.head_mut().as_node_mut().unwrap();
+        self.expand_height();
+        let head_ref = self.xa.head_mut(self.is_exclusive).as_node_mut().unwrap();
         self.move_to(head_ref);
 
         let (mut current_node, operation_offset) = self.state.node_info().unwrap();
-        let mut current_layer = current_node.layer();
-        let mut operated_entry = current_node.ref_node_entry(operation_offset);
-        while current_layer > 0 {
+        let mut operated_entry = current_node.ref_node_entry(self.is_exclusive, operation_offset);
+        while !current_node.is_leaf() {
+            let current_height = current_node.height();
+
             if !operated_entry.is_node() {
                 let new_entry = {
                     let (current_node, operation_offset) = self.state.node_info().unwrap();
                     let new_owned_entry =
-                        self.alloc_node(Layer::new(*current_layer - 1), operation_offset);
+                        self.alloc_node(Height::new(*current_height - 1), operation_offset);
                     let _ = current_node.set_entry(operation_offset, new_owned_entry);
-                    current_node.ref_node_entry(operation_offset)
+                    current_node.ref_node_entry(self.is_exclusive, operation_offset)
                 };
                 operated_entry = new_entry;
             }
 
-            self.passed_node[*current_layer as usize] = Some(current_node);
+            self.ancestors.push(current_node);
 
             current_node = operated_entry.as_node_mut().unwrap();
-            *current_layer -= 1;
             operated_entry = self.move_to(current_node);
         }
         operated_entry
     }
 
-    /// Increase the number of layers for XArray to expand its capacity, allowing it to accommodate the target index,
-    /// and returns the layer of the final head node.
+    /// Increase the number of heights for XArray to expand its capacity, allowing it to accommodate the target index,
+    /// and returns the height of the final head node.
     ///
-    /// If the head node of the XArray does not exist, allocate a new head node of appropriate layer directly.
+    /// If the head node of the XArray does not exist, allocate a new head node of appropriate height directly.
     /// Otherwise, if needed, repeatedly insert new nodes on top of the current head node to serve as the new head.
-    fn expand_layer(&mut self) -> Layer {
+    fn expand_height(&mut self) -> Height {
         if self.xa.head().is_null() {
-            let mut head_layer = Layer::new(0);
-            while self.index > head_layer.max_index() {
-                *head_layer += 1;
+            let mut head_height = Height::new(1);
+            while self.index > head_height.max_index() {
+                *head_height += 1;
             }
-            let head = self.alloc_node(head_layer, 0);
+            let head = self.alloc_node(head_height, 0);
             self.xa.set_head(head);
-            return head_layer;
+            return head_height;
         } else {
             loop {
-                let head_layer = {
-                    let head = self.xa.head().as_node().unwrap();
-                    head.layer()
+                let head_height = {
+                    let head = self.xa.head_mut(self.is_exclusive).as_node().unwrap();
+                    head.height()
                 };
 
-                if head_layer.max_index() > self.index {
-                    return head_layer;
+                if head_height.max_index() > self.index {
+                    return head_height;
                 }
 
-                let new_node_entry = self.alloc_node(Layer::new(*head_layer + 1), 0);
+                let new_node_entry = self.alloc_node(Height::new(*head_height + 1), 0);
                 let old_head_entry = self.xa.set_head(new_node_entry);
                 let old_head = old_head_entry.as_node_mut().unwrap();
-                let new_head = self.xa.head_mut().as_node_mut().unwrap();
+                let new_head = self.xa.head_mut(self.is_exclusive).as_node_mut().unwrap();
                 for i in 0..3 {
                     if !old_head.mark(i).is_clear() {
                         new_head.set_mark(0, i);
@@ -561,23 +570,33 @@ impl<'a, I: ItemEntry, M: ValidMark> CursorMut<'a, I, M> {
         }
     }
 
-    /// Allocate a new XNode with the specified layer and offset,
+    /// Allocate a new XNode with the specified height and offset,
     /// then generate a node entry from it and return it to the caller.
-    fn alloc_node(&mut self, layer: Layer, offset: u8) -> XEntry<I> {
-        XEntry::from_node(XNode::<I, ReadWrite>::new(layer, offset))
+    fn alloc_node(&mut self, height: Height, offset: u8) -> XEntry<I, L> {
+        XEntry::from_node(XNode::<I, L, ReadWrite>::new(height, offset))
+    }
+
+    fn ensure_exclusive_before_modify(&mut self) {
+        if !self.is_exclusive {
+            self.is_exclusive = true;
+            if self.is_arrived() {
+                self.init();
+                self.traverse_to_target();
+            }
+        }
     }
 }
 
-impl<'a, I: ItemEntry, M: ValidMark> Deref for CursorMut<'a, I, M> {
-    type Target = Cursor<'a, I, M>;
+impl<'a, I: ItemEntry, L: XLock, M: ValidMark> Deref for CursorMut<'a, I, L, M> {
+    type Target = Cursor<'a, I, L, M>;
 
     fn deref(&self) -> &Self::Target {
-        unsafe { &*(self as *const CursorMut<'a, I, M> as *const Cursor<'a, I, M>) }
+        unsafe { &*(self as *const CursorMut<'a, I, L, M> as *const Cursor<'a, I, L, M>) }
     }
 }
 
-impl<'a, I: ItemEntry, M: ValidMark> DerefMut for CursorMut<'a, I, M> {
+impl<'a, I: ItemEntry, L: XLock, M: ValidMark> DerefMut for CursorMut<'a, I, L, M> {
     fn deref_mut(&mut self) -> &mut Self::Target {
-        unsafe { &mut *(self as *const CursorMut<'a, I, M> as *mut Cursor<'a, I, M>) }
+        unsafe { &mut *(self as *const CursorMut<'a, I, L, M> as *mut Cursor<'a, I, L, M>) }
     }
 }

+ 27 - 27
src/entry.rs

@@ -1,5 +1,7 @@
+use alloc::boxed::Box;
+use alloc::sync::Arc;
 use core::marker::PhantomData;
-use std::{mem::ManuallyDrop, sync::Arc};
+use core::mem::ManuallyDrop;
 
 use super::*;
 
@@ -60,15 +62,16 @@ unsafe impl<T> ItemEntry for Box<T> {
 /// will be transferred to the `XEntry`. If the stored item in the XArray implemented Clone trait, then the XEntry
 /// in the XArray can also implement Clone trait.
 #[derive(Eq, Debug)]
-pub(crate) struct XEntry<I>
+pub(super) struct XEntry<I, L>
 where
     I: ItemEntry,
+    L: XLock,
 {
     raw: usize,
-    _marker: core::marker::PhantomData<I>,
+    _marker: core::marker::PhantomData<(I, L)>,
 }
 
-impl<I: ItemEntry> Drop for XEntry<I> {
+impl<I: ItemEntry, L: XLock> Drop for XEntry<I, L> {
     fn drop(&mut self) {
         if self.is_item() {
             unsafe {
@@ -77,13 +80,13 @@ impl<I: ItemEntry> Drop for XEntry<I> {
         }
         if self.is_node() {
             unsafe {
-                Arc::from_raw((self.raw - 2) as *const XNode<I>);
+                Arc::from_raw((self.raw - 2) as *const XNode<I, L>);
             }
         }
     }
 }
 
-impl<I: ItemEntry + Clone> Clone for XEntry<I> {
+impl<I: ItemEntry + Clone, L: XLock> Clone for XEntry<I, L> {
     fn clone(&self) -> Self {
         if self.is_item() {
             let cloned_entry = unsafe {
@@ -94,7 +97,7 @@ impl<I: ItemEntry + Clone> Clone for XEntry<I> {
         } else {
             if self.is_node() {
                 unsafe {
-                    Arc::increment_strong_count((self.raw - 2) as *const XNode<I>);
+                    Arc::increment_strong_count((self.raw - 2) as *const XNode<I, L>);
                 }
             }
             Self {
@@ -105,48 +108,48 @@ impl<I: ItemEntry + Clone> Clone for XEntry<I> {
     }
 }
 
-impl<I: ItemEntry> PartialEq for XEntry<I> {
+impl<I: ItemEntry, L: XLock> PartialEq for XEntry<I, L> {
     fn eq(&self, o: &Self) -> bool {
         self.raw == o.raw
     }
 }
 
-impl<I: ItemEntry> XEntry<I> {
-    pub(crate) fn raw(&self) -> usize {
+impl<I: ItemEntry, L: XLock> XEntry<I, L> {
+    pub fn raw(&self) -> usize {
         self.raw
     }
 
-    pub(crate) const EMPTY: Self = unsafe { Self::new(0) };
+    pub const EMPTY: Self = unsafe { Self::new(0) };
 
-    pub(crate) const unsafe fn new(raw: usize) -> Self {
+    pub const unsafe fn new(raw: usize) -> Self {
         Self {
             raw,
             _marker: PhantomData,
         }
     }
 
-    pub(crate) fn is_null(&self) -> bool {
+    pub fn is_null(&self) -> bool {
         self.raw == 0
     }
 
-    pub(crate) fn is_internal(&self) -> bool {
+    pub fn is_internal(&self) -> bool {
         self.raw & 3 == 2
     }
 
-    pub(crate) fn is_item(&self) -> bool {
+    pub fn is_item(&self) -> bool {
         !self.is_null() && !self.is_internal()
     }
 
-    pub(crate) fn is_node(&self) -> bool {
+    pub fn is_node(&self) -> bool {
         self.is_internal() && self.raw > (SLOT_SIZE << 2)
     }
 
-    pub(crate) fn from_item(item: I) -> Self {
+    pub fn from_item(item: I) -> Self {
         let raw = I::into_raw(item);
         unsafe { Self::new(raw as usize) }
     }
 
-    pub(crate) fn into_item(self) -> Option<I> {
+    pub fn into_item(self) -> Option<I> {
         if self.is_item() {
             let item = unsafe { I::from_raw(self.raw) };
             core::mem::forget(self);
@@ -156,7 +159,7 @@ impl<I: ItemEntry> XEntry<I> {
         }
     }
 
-    pub(crate) fn from_node<Operation>(node: XNode<I, Operation>) -> Self {
+    pub fn from_node<Operation>(node: XNode<I, L, Operation>) -> Self {
         let node_ptr = {
             let arc_node = Arc::new(node);
             Arc::into_raw(arc_node)
@@ -164,10 +167,10 @@ impl<I: ItemEntry> XEntry<I> {
         unsafe { Self::new(node_ptr as usize | 2) }
     }
 
-    pub(crate) fn as_node(&self) -> Option<&XNode<I>> {
+    pub fn as_node(&self) -> Option<&XNode<I, L>> {
         if self.is_node() {
             unsafe {
-                let node_ref = &*((self.raw - 2) as *const XNode<I>);
+                let node_ref = &*((self.raw - 2) as *const XNode<I, L>);
                 Some(node_ref)
             }
         } else {
@@ -175,10 +178,10 @@ impl<I: ItemEntry> XEntry<I> {
         }
     }
 
-    pub(crate) fn as_node_mut<'a>(&self) -> Option<&'a XNode<I, ReadWrite>> {
+    pub fn as_node_mut<'a>(&self) -> Option<&'a XNode<I, L, ReadWrite>> {
         if self.is_node() {
             unsafe {
-                let node_ref = &*((self.raw - 2) as *const XNode<I, ReadWrite>);
+                let node_ref = &*((self.raw - 2) as *const XNode<I, L, ReadWrite>);
                 Some(node_ref)
             }
         } else {
@@ -186,7 +189,7 @@ impl<I: ItemEntry> XEntry<I> {
         }
     }
 
-    pub(crate) fn node_strong_count(&self) -> Option<usize> {
+    pub fn node_strong_count(&self) -> Option<usize> {
         if self.is_node() {
             let raw_ptr = (self.raw - 2) as *const u8;
             unsafe {
@@ -200,6 +203,3 @@ impl<I: ItemEntry> XEntry<I> {
         }
     }
 }
-
-unsafe impl<I: ItemEntry + Sync> Sync for XEntry<I> {}
-unsafe impl<I: ItemEntry + Send> Send for XEntry<I> {}

+ 36 - 0
src/lib.rs

@@ -1,7 +1,12 @@
+#![no_std]
+#![allow(incomplete_features)]
 #![feature(pointer_is_aligned)]
 #![feature(specialization)]
 #![feature(associated_type_defaults)]
 
+extern crate alloc;
+extern crate smallvec;
+
 use cow::*;
 use cursor::*;
 use entry::*;
@@ -16,4 +21,35 @@ mod mark;
 mod node;
 mod xarray;
 
+#[cfg(all(test, feature = "std"))]
 mod test;
+
+#[cfg(feature = "std")]
+pub use std_specific::*;
+
+#[cfg(feature = "std")]
+mod std_specific {
+    extern crate std;
+
+    use crate::*;
+    use std::sync::{Mutex, MutexGuard};
+
+    impl<T> ValidLock<T> for Mutex<T> {
+        type Target<'a> = MutexGuard<'a, T>
+        where T: 'a;
+
+        fn new(inner: T) -> Self {
+            Mutex::new(inner)
+        }
+
+        fn lock(&self) -> Self::Target<'_> {
+            self.lock().unwrap()
+        }
+    }
+
+    pub struct StdMutex;
+
+    impl XLock for StdMutex {
+        type Lock<T> = Mutex<T>;
+    }
+}

+ 8 - 8
src/mark.rs

@@ -2,34 +2,34 @@
 /// A mark can be used to indicate which slots in an XNode contain items that have been marked.
 /// It internally stores a u64, functioning as a bitmap,
 /// where each bit that is set to 1 represents a slot at the corresponding offset that has been marked.
-pub(crate) struct Mark {
+pub(super) struct Mark {
     inner: u64,
 }
 
 impl Mark {
-    pub(crate) const EMPTY: Self = Self::new(0);
+    pub const EMPTY: Self = Self::new(0);
 
-    pub(crate) const fn new(inner: u64) -> Self {
+    pub const fn new(inner: u64) -> Self {
         Self { inner }
     }
 
-    pub(crate) fn set(&mut self, offset: u8) {
+    pub fn set(&mut self, offset: u8) {
         self.inner |= 1 << offset as u64;
     }
 
-    pub(crate) fn unset(&mut self, offset: u8) {
+    pub fn unset(&mut self, offset: u8) {
         self.inner &= !(1 << offset as u64);
     }
 
-    pub(crate) fn clear(&mut self) {
+    pub fn clear(&mut self) {
         self.inner = 0
     }
 
-    pub(crate) fn is_marked(&self, offset: u8) -> bool {
+    pub fn is_marked(&self, offset: u8) -> bool {
         (self.inner & 1 << offset as u64) != 0
     }
 
-    pub(crate) fn is_clear(&self) -> bool {
+    pub fn is_clear(&self) -> bool {
         self.inner == 0
     }
 }

+ 102 - 94
src/node.rs

@@ -1,195 +1,201 @@
 use core::cmp::Ordering;
-use std::{
+use core::{
     marker::PhantomData,
     ops::{Deref, DerefMut},
-    sync::Mutex,
 };
 
 use super::*;
 
-pub(crate) struct ReadOnly {}
-pub(crate) struct ReadWrite {}
+pub(super) struct ReadOnly {}
+pub(super) struct ReadWrite {}
 
-/// The layer of an XNode within an XArray.
+/// The height of an XNode within an XArray.
 ///
-/// In an XArray, the head has the highest layer, while the XNodes that directly store items are at the lowest layer,
-/// with a layer value of 0. Each level up from the bottom layer increases the layer number by 1.
-/// The layer of an XArray is the layer of its head.
+/// In an XArray, the head has the highest height, while the XNodes that directly store items are at the lowest height,
+/// with a height value of 1. Each level up from the bottom height increases the height number by 1.
+/// The height of an XArray is the height of its head.
 #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Copy, Clone)]
-pub(crate) struct Layer {
-    layer: u8,
+pub(super) struct Height {
+    height: u8,
 }
 
-impl Deref for Layer {
+impl Deref for Height {
     type Target = u8;
 
     fn deref(&self) -> &Self::Target {
-        &self.layer
+        &self.height
     }
 }
 
-impl DerefMut for Layer {
+impl DerefMut for Height {
     fn deref_mut(&mut self) -> &mut Self::Target {
-        &mut self.layer
+        &mut self.height
     }
 }
 
-impl PartialEq<u8> for Layer {
+impl PartialEq<u8> for Height {
     fn eq(&self, other: &u8) -> bool {
-        self.layer == *other
+        self.height == *other
     }
 }
 
-impl PartialOrd<u8> for Layer {
+impl PartialOrd<u8> for Height {
     fn partial_cmp(&self, other: &u8) -> Option<Ordering> {
-        self.layer.partial_cmp(other)
+        self.height.partial_cmp(other)
     }
 }
 
-impl Layer {
-    pub(crate) fn new(layer: u8) -> Self {
-        Self { layer }
+impl Height {
+    pub fn new(height: u8) -> Self {
+        Self { height }
     }
 
-    fn layer_shift(&self) -> u8 {
-        self.layer * BITS_PER_LAYER as u8
+    fn height_shift(&self) -> u8 {
+        (self.height - 1) * BITS_PER_LAYER as u8
     }
 
-    /// Calculate the corresponding offset for the target index at the current layer.
-    pub(crate) fn layer_offset(&self, index: u64) -> u8 {
-        ((index >> self.layer_shift()) & SLOT_MASK as u64) as u8
+    /// Calculate the corresponding offset for the target index at the current height.
+    pub fn height_offset(&self, index: u64) -> u8 {
+        ((index >> self.height_shift()) & SLOT_MASK as u64) as u8
     }
 
-    /// Calculate the maximum index that can be represented in XArray at the current layer.
-    pub(crate) fn max_index(&self) -> u64 {
-        ((SLOT_SIZE as u64) << self.layer_shift()) - 1
+    /// Calculate the maximum index that can be represented in XArray at the current height.
+    pub fn max_index(&self) -> u64 {
+        ((SLOT_SIZE as u64) << self.height_shift()) - 1
     }
 }
 
 /// `XNode` is the intermediate node in the tree-like structure of XArray.
 ///
 /// It contains `SLOT_SIZE` number of XEntries, meaning it can accommodate up to `SLOT_SIZE` child nodes.
-/// The 'layer' and 'offset_in_parent' attributes of an XNode are determined at initialization and remain unchanged thereafter.
+/// The 'height' and 'offset_in_parent' attributes of an XNode are determined at initialization and remain unchanged thereafter.
 ///
 /// XNode has a generic parameter called 'Operation', which has two possible instances: `ReadOnly` and `ReadWrite`.
 /// These instances indicate whether the XNode will only perform read operations or both read and write operations
 /// (where write operations imply potential modifications to the contents of slots).
-pub(crate) struct XNode<I: ItemEntry, Operation = ReadOnly> {
-    /// The node's layer from the bottom of the tree. The layer of a lead node,
-    /// which stores the user-given items, is 0.
-    layer: Layer,
+pub(super) struct XNode<I, L, Operation = ReadOnly>
+where
+    I: ItemEntry,
+    L: XLock,
+{
+    /// The height of the subtree rooted at the current node. The height of a leaf node,
+    /// which stores the user-given items, is 1.
+    height: Height,
     /// This node is its parent's `offset_in_parent`-th child.
     /// This field is meaningless if this node is the root (will be 0).
     offset_in_parent: u8,
-    inner: Mutex<XNodeInner<I>>,
+    inner: L::Lock<XNodeInner<I, L>>,
     _marker: PhantomData<Operation>,
 }
 
-pub(crate) struct XNodeInner<I: ItemEntry> {
-    slots: [XEntry<I>; SLOT_SIZE],
+struct XNodeInner<I, L>
+where
+    I: ItemEntry,
+    L: XLock,
+{
+    slots: [XEntry<I, L>; SLOT_SIZE],
     marks: [Mark; 3],
 }
 
-impl<I: ItemEntry, Operation> XNode<I, Operation> {
-    pub(crate) fn new(layer: Layer, offset: u8) -> Self {
+impl<I: ItemEntry, L: XLock, Operation> XNode<I, L, Operation> {
+    pub fn new(height: Height, offset: u8) -> Self {
         Self {
-            layer,
+            height,
             offset_in_parent: offset,
-            inner: Mutex::new(XNodeInner::new()),
+            inner: L::new(XNodeInner::new()),
             _marker: PhantomData,
         }
     }
 
     /// Get the offset in the slots of the current XNode corresponding to the XEntry for the target index.
-    pub(crate) fn entry_offset(&self, target_index: u64) -> u8 {
-        self.layer.layer_offset(target_index)
+    pub fn entry_offset(&self, target_index: u64) -> u8 {
+        self.height.height_offset(target_index)
     }
 
-    pub(crate) fn layer(&self) -> Layer {
-        self.layer
+    pub fn height(&self) -> Height {
+        self.height
     }
 
-    pub(crate) fn offset_in_parent(&self) -> u8 {
+    pub fn offset_in_parent(&self) -> u8 {
         self.offset_in_parent
     }
 
-    pub(crate) fn is_marked(&self, offset: u8, mark: usize) -> bool {
-        self.inner.lock().unwrap().is_marked(offset, mark)
+    pub fn is_marked(&self, offset: u8, mark: usize) -> bool {
+        self.inner.lock().is_marked(offset, mark)
     }
 
-    pub(crate) fn is_mark_clear(&self, mark: usize) -> bool {
-        self.inner.lock().unwrap().is_mark_clear(mark)
+    pub fn is_mark_clear(&self, mark: usize) -> bool {
+        self.inner.lock().is_mark_clear(mark)
     }
 
-    pub(crate) fn mark(&self, mark: usize) -> Mark {
-        self.inner.lock().unwrap().marks[mark]
+    pub fn mark(&self, mark: usize) -> Mark {
+        self.inner.lock().marks[mark]
     }
-}
 
-impl<I: ItemEntry> XNode<I, ReadOnly> {
-    fn entry<'a>(&'a self, offset: u8) -> *const XEntry<I> {
-        let lock = self.inner.lock().unwrap();
-        &lock.slots[offset as usize] as *const XEntry<I>
+    pub fn is_leaf(&self) -> bool {
+        self.height == 1
     }
+}
 
+impl<I: ItemEntry, L: XLock> XNode<I, L, ReadOnly> {
     /// Obtain a reference to the XEntry in the slots of the node. The input `offset` indicate
     /// the offset of the target XEntry in the slots.
-    pub(crate) fn ref_node_entry(&self, offset: u8) -> &XEntry<I> {
-        let target_entry_ptr = self.entry(offset);
+    pub fn ref_node_entry(&self, offset: u8) -> &XEntry<I, L> {
+        let lock = self.inner.lock();
+
+        let entry_ptr = &lock.slots[offset as usize] as *const XEntry<I, L>;
         // Safety: The returned entry has the same lifetime with the XNode that owns it.
         // Hence the position that `target_entry_ptr` points to will be valid during the usage of returned reference.
-        unsafe { &*target_entry_ptr }
+        unsafe { &*entry_ptr }
     }
 }
 
-impl<I: ItemEntry> XNode<I, ReadWrite> {
-    fn entry<'a>(&'a self, offset: u8) -> *const XEntry<I> {
-        let mut lock = self.inner.lock().unwrap();
+impl<I: ItemEntry, L: XLock> XNode<I, L, ReadWrite> {
+    /// Obtain a reference to the XEntry in the slots of the node. The input `offset` indicate
+    /// the offset of target XEntry in the slots.
+    pub fn ref_node_entry<'a>(&self, is_exclusive: bool, offset: u8) -> &XEntry<I, L> {
+        let mut lock = self.inner.lock();
 
         // When a modification to the target entry is needed, it first checks whether the entry is shared with other XArrays.
         // If it is, then it performs COW by allocating a new entry and using it,
         // to prevent the modification from affecting the read or write operations on other XArrays.
-        if let Some(new_entry) = self.copy_if_shared(&lock.slots[offset as usize]) {
-            lock.set_entry(offset, new_entry);
+        if is_exclusive {
+            if let Some(new_entry) = self.copy_if_shared(&lock.slots[offset as usize]) {
+                lock.set_entry(offset, new_entry);
+            }
         }
-        &lock.slots[offset as usize] as *const XEntry<I>
-    }
-
-    /// Obtain a reference to the XEntry in the slots of the node. The input `offset` indicate
-    /// the offset of target XEntry in the slots.
-    pub(crate) fn ref_node_entry<'a>(&self, offset: u8) -> &XEntry<I> {
-        let target_entry_ptr = self.entry(offset);
+        let entry_ptr = &lock.slots[offset as usize] as *const XEntry<I, L>;
         // Safety: The returned entry has the same lifetime with the XNode that owns it.
         // Hence the position that `target_entry_ptr` points to will be valid during the usage of returned reference.
-        unsafe { &*target_entry_ptr }
+        unsafe { &*entry_ptr }
     }
 
-    pub(crate) fn set_entry(&self, offset: u8, entry: XEntry<I>) -> XEntry<I> {
-        self.inner.lock().unwrap().set_entry(offset, entry)
+    pub fn set_entry(&self, offset: u8, entry: XEntry<I, L>) -> XEntry<I, L> {
+        self.inner.lock().set_entry(offset, entry)
     }
 
-    pub(crate) fn set_mark(&self, offset: u8, mark: usize) {
-        self.inner.lock().unwrap().set_mark(offset, mark)
+    pub fn set_mark(&self, offset: u8, mark: usize) {
+        self.inner.lock().set_mark(offset, mark)
     }
 
-    pub(crate) fn unset_mark(&self, offset: u8, mark: usize) {
-        self.inner.lock().unwrap().unset_mark(offset, mark)
+    pub fn unset_mark(&self, offset: u8, mark: usize) {
+        self.inner.lock().unset_mark(offset, mark)
     }
 
-    pub(crate) fn clear_mark(&self, mark: usize) {
-        self.inner.lock().unwrap().clear_mark(mark)
+    pub fn clear_mark(&self, mark: usize) {
+        self.inner.lock().clear_mark(mark)
     }
 }
 
-impl<I: ItemEntry> XNodeInner<I> {
-    pub(crate) fn new() -> Self {
+impl<I: ItemEntry, L: XLock> XNodeInner<I, L> {
+    fn new() -> Self {
         Self {
             slots: [XEntry::EMPTY; SLOT_SIZE],
             marks: [Mark::EMPTY; 3],
         }
     }
 
-    pub(crate) fn set_entry(&mut self, offset: u8, entry: XEntry<I>) -> XEntry<I> {
+    fn set_entry(&mut self, offset: u8, entry: XEntry<I, L>) -> XEntry<I, L> {
         for i in 0..3 {
             self.marks[i].unset(offset);
         }
@@ -197,35 +203,37 @@ impl<I: ItemEntry> XNodeInner<I> {
         old_entry
     }
 
-    pub(crate) fn set_mark(&mut self, offset: u8, mark: usize) {
+    fn set_mark(&mut self, offset: u8, mark: usize) {
         self.marks[mark].set(offset);
     }
 
-    pub(crate) fn unset_mark(&mut self, offset: u8, mark: usize) {
+    fn unset_mark(&mut self, offset: u8, mark: usize) {
         self.marks[mark].unset(offset);
     }
 
-    pub(crate) fn is_marked(&self, offset: u8, mark: usize) -> bool {
+    fn is_marked(&self, offset: u8, mark: usize) -> bool {
         self.marks[mark].is_marked(offset)
     }
 
-    pub(crate) fn is_mark_clear(&self, mark: usize) -> bool {
+    fn is_mark_clear(&self, mark: usize) -> bool {
         self.marks[mark].is_clear()
     }
 
-    pub(crate) fn clear_mark(&mut self, mark: usize) {
+    fn clear_mark(&mut self, mark: usize) {
         self.marks[mark].clear();
     }
 }
 
-pub(crate) fn deep_clone_node_entry<I: ItemEntry + Clone>(entry: &XEntry<I>) -> XEntry<I> {
+pub(super) fn deep_clone_node_entry<I: ItemEntry + Clone, L: XLock>(
+    entry: &XEntry<I, L>,
+) -> XEntry<I, L> {
     debug_assert!(entry.is_node());
     let new_node = {
-        let cloned_node: &XNode<I> = entry.as_node().unwrap();
+        let cloned_node: &XNode<I, L> = entry.as_node().unwrap();
         let new_node =
-            XNode::<I, ReadWrite>::new(cloned_node.layer(), cloned_node.offset_in_parent());
-        let mut new_node_lock = new_node.inner.lock().unwrap();
-        let cloned_node_lock = cloned_node.inner.lock().unwrap();
+            XNode::<I, L, ReadWrite>::new(cloned_node.height(), cloned_node.offset_in_parent());
+        let mut new_node_lock = new_node.inner.lock();
+        let cloned_node_lock = cloned_node.inner.lock();
         new_node_lock.marks = cloned_node_lock.marks;
         for i in 0..SLOT_SIZE {
             let entry = &cloned_node_lock.slots[i];

+ 68 - 47
src/test.rs

@@ -1,55 +1,71 @@
-#[cfg(test)]
-use super::*;
-#[cfg(test)]
+extern crate std;
+use crate::*;
 use std::sync::Arc;
 
+#[derive(Clone, Copy)]
+enum MarkDemo {
+    Mark0,
+    Mark1,
+    Mark2,
+}
+
+impl ValidMark for MarkDemo {
+    fn index_raw(&self) -> usize {
+        match self {
+            Self::Mark0 => 0,
+            Self::Mark1 => 1,
+            Self::Mark2 => 2,
+        }
+    }
+}
+
 #[test]
-fn test_store() {
-    let mut xarray_arc: XArray<Arc<i32>> = XArray::new();
-    for i in 1..10000 {
+fn test_simple_store() {
+    let mut xarray_arc: XArray<Arc<i32>, StdMutex> = XArray::new();
+    for i in 0..10000 {
         let value = Arc::new(i * 2);
         xarray_arc.store((i * 3) as u64, value);
     }
-    for i in 1..10000 {
+    for i in 0..10000 {
         let value = xarray_arc.load((i * 3) as u64).unwrap();
         assert!(*value.as_ref() == i * 2)
     }
 }
 
+#[test]
+fn test_overwrite_store() {
+    let mut xarray_arc: XArray<Arc<i32>, StdMutex> = XArray::new();
+
+    let value = Arc::new(20);
+    xarray_arc.store(10, value);
+    let v = xarray_arc.load(10).unwrap();
+    assert!(*v.as_ref() == 20);
+
+    let value = Arc::new(40);
+    xarray_arc.store(10, value);
+    let v = xarray_arc.load(10).unwrap();
+    assert!(*v.as_ref() == 40);
+}
+
 #[test]
 fn test_remove() {
-    let mut xarray_arc: XArray<Arc<i32>> = XArray::new();
+    let mut xarray_arc: XArray<Arc<i32>, StdMutex> = XArray::new();
+    assert!(xarray_arc.remove(66).is_none());
     for i in 0..10000 {
         let value = Arc::new(i * 2);
         xarray_arc.store(i as u64, value);
     }
     for i in 0..10000 {
-        xarray_arc.remove(i as u64);
+        assert!(xarray_arc.remove(i as u64).is_some());
         let value = xarray_arc.load(i as u64);
-        assert!(value == None)
+        assert!(value == None);
+        assert!(xarray_arc.remove(i as u64).is_none());
     }
 }
 
 #[test]
 fn test_mark() {
-    #[derive(Clone, Copy)]
-    enum MarkDemo {
-        Mark0,
-        Mark1,
-        Mark2,
-    }
-
-    impl ValidMark for MarkDemo {
-        fn index_raw(&self) -> usize {
-            match self {
-                Self::Mark0 => 0,
-                Self::Mark1 => 1,
-                Self::Mark2 => 2,
-            }
-        }
-    }
-
-    let mut xarray_arc: XArray<Arc<i32>, MarkDemo> = XArray::new();
+    let mut xarray_arc: XArray<Arc<i32>, StdMutex, MarkDemo> = XArray::new();
     for i in 1..10000 {
         let value = Arc::new(i * 2);
         xarray_arc.store(i as u64, value);
@@ -62,6 +78,7 @@ fn test_mark() {
     cursor.reset_to(20000);
     assert!(Err(()) == cursor.set_mark(MarkDemo::Mark1));
     assert!(None == cursor.load());
+    drop(cursor);
     let (value1, value1_mark0) = xarray_arc.load_with_mark(1000, MarkDemo::Mark0).unwrap();
     let (_, value1_mark1) = xarray_arc.load_with_mark(1000, MarkDemo::Mark1).unwrap();
     let (value2, value2_mark1) = xarray_arc.load_with_mark(2000, MarkDemo::Mark1).unwrap();
@@ -79,6 +96,7 @@ fn test_mark() {
     let mut cursor = xarray_arc.cursor_mut(1000);
     cursor.unset_mark(MarkDemo::Mark0).unwrap();
     cursor.unset_mark(MarkDemo::Mark2).unwrap();
+    drop(cursor);
     let (_, value1_mark0) = xarray_arc.load_with_mark(1000, MarkDemo::Mark0).unwrap();
     let (_, value1_mark2) = xarray_arc.load_with_mark(1000, MarkDemo::Mark2).unwrap();
     assert!(value1_mark0 == false);
@@ -112,7 +130,7 @@ fn test_cow() {
             Self { raw }
         }
     }
-    let mut xarray_arc: XArray<Arc<Wrapper>> = XArray::new();
+    let mut xarray_arc: XArray<Arc<Wrapper>, StdMutex> = XArray::new();
     for i in 1..10000 {
         let value = Arc::new(Wrapper::new(i * 2));
         xarray_arc.store(i as u64, value);
@@ -147,22 +165,7 @@ fn test_cow() {
 
 #[test]
 fn test_cow_mark() {
-    #[derive(Clone, Copy)]
-    enum MarkDemo {
-        Mark0,
-        Mark1,
-    }
-
-    impl ValidMark for MarkDemo {
-        fn index_raw(&self) -> usize {
-            match self {
-                Self::Mark0 => 0,
-                Self::Mark1 => 1,
-            }
-        }
-    }
-
-    let mut xarray_arc: XArray<Arc<i32>, MarkDemo> = XArray::new();
+    let mut xarray_arc: XArray<Arc<i32>, StdMutex, MarkDemo> = XArray::new();
     for i in 1..10000 {
         let value = Arc::new(i * 2);
         xarray_arc.store(i as u64, value);
@@ -177,7 +180,8 @@ fn test_cow_mark() {
     cursor_arc.set_mark(MarkDemo::Mark0).unwrap();
 
     cursor_clone.set_mark(MarkDemo::Mark1).unwrap();
-
+    drop(cursor_arc);
+    drop(cursor_clone);
     let (_, mark0_1000_arc) = xarray_arc.load_with_mark(1000, MarkDemo::Mark0).unwrap();
     let (_, mark0_2000_arc) = xarray_arc.load_with_mark(2000, MarkDemo::Mark0).unwrap();
     let (_, mark1_1000_arc) = xarray_arc.load_with_mark(1000, MarkDemo::Mark1).unwrap();
@@ -199,7 +203,7 @@ fn test_cow_mark() {
 
 #[test]
 fn test_next() {
-    let mut xarray_arc: XArray<Arc<i32>> = XArray::new();
+    let mut xarray_arc: XArray<Arc<i32>, StdMutex> = XArray::new();
     for i in 1..10000 {
         let value = Arc::new(i * 2);
         xarray_arc.store(i as u64, value);
@@ -215,8 +219,25 @@ fn test_next() {
         let value = Arc::new((10000 + i) * 2);
         cursor.store(value);
     }
+    drop(cursor);
     for i in 10000..20000 {
         let value = xarray_arc.load(i as u64).unwrap();
         assert!(*value.as_ref() == i * 2)
     }
 }
+
+#[test]
+fn test_range() {
+    let mut xarray_arc: XArray<Arc<i32>, StdMutex> = XArray::new();
+    for i in 0..10000 {
+        let value = Arc::new(i * 2);
+        xarray_arc.store((i * 2) as u64, value);
+    }
+
+    let mut count = 0;
+    for (index, item) in xarray_arc.range(1000..2000) {
+        assert!(*item.as_ref() as u64 == index);
+        count += 1;
+    }
+    assert!(count == 500);
+}

+ 90 - 25
src/xarray.rs

@@ -1,11 +1,15 @@
-use std::{collections::VecDeque, marker::PhantomData};
+use alloc::collections::VecDeque;
+use core::{
+    marker::PhantomData,
+    ops::{Deref, DerefMut},
+};
 
 use super::*;
 
-pub(crate) const BITS_PER_LAYER: usize = 6;
-pub(crate) const SLOT_SIZE: usize = 1 << BITS_PER_LAYER;
-pub(crate) const SLOT_MASK: usize = SLOT_SIZE - 1;
-pub(crate) const MAX_LAYER: usize = 64 / BITS_PER_LAYER + 1;
+pub(super) const BITS_PER_LAYER: usize = 6;
+pub(super) const SLOT_SIZE: usize = 1 << BITS_PER_LAYER;
+pub(super) const SLOT_MASK: usize = SLOT_SIZE - 1;
+pub(super) const MAX_HEIGHT: usize = 64 / BITS_PER_LAYER + 1;
 
 /// `XArray` is an abstract data type functioning like an expansive array of items where each item must be an 8-byte object, such as `Arc<T>` or `Box<T>`.
 /// User-stored pointers must have a minimum alignment of 4 bytes. `XArray` facilitates efficient sequential access to adjacent entries,
@@ -31,9 +35,10 @@ pub(crate) const MAX_LAYER: usize = 64 / BITS_PER_LAYER + 1;
 ///
 /// ```
 /// use std::sync::Arc;
+/// use std::sync::{Mutex, MutexGuard};
 /// use xarray::*;
 ///
-/// let mut xarray_arc: XArray<Arc<i32>> = XArray::new();
+/// let mut xarray_arc: XArray<Arc<i32>, StdMutex> = XArray::new();
 /// let value = Arc::new(10);
 /// xarray_arc.store(333, value);
 /// assert!(*xarray_arc.load(333).unwrap().as_ref() == 10);
@@ -49,17 +54,17 @@ pub(crate) const MAX_LAYER: usize = 64 / BITS_PER_LAYER + 1;
 ///
 /// The concepts XArray are originally introduced by Linux, which keeps the data structure of Linux's radix tree
 /// [Linux Radix Trees](https://lwn.net/Articles/175432/).
-pub struct XArray<I, M = NoneMark>
+pub struct XArray<I, L: XLock, M = NoneMark>
 where
     I: ItemEntry,
     M: ValidMark,
 {
     marks: [bool; 3],
-    head: XEntry<I>,
+    head: XEntry<I, L>,
     _marker: PhantomData<(I, M)>,
 }
 
-impl<I: ItemEntry, M: ValidMark> XArray<I, M> {
+impl<I: ItemEntry, L: XLock, M: ValidMark> XArray<I, L, M> {
     /// Make a new, empty XArray.
     pub const fn new() -> Self {
         Self {
@@ -85,31 +90,33 @@ impl<I: ItemEntry, M: ValidMark> XArray<I, M> {
     }
 
     /// Return a reference to the head entry, and later will not modify the XNode pointed to by the `head`.
-    pub(crate) fn head(&self) -> &XEntry<I> {
+    pub(super) fn head(&self) -> &XEntry<I, L> {
         &self.head
     }
 
-    /// Return a reference to the head entry, and later will modify the XNode pointed to by the `head`.
-    pub(crate) fn head_mut(&mut self) -> &XEntry<I> {
-        // When a modification to the head is needed, it first checks whether the head is shared with other XArrays.
-        // If it is, then it performs COW by allocating a new head and using it,
-        // to prevent the modification from affecting the read or write operations on other XArrays.
-        if let Some(new_head) = self.copy_if_shared(&self.head) {
-            self.set_head(new_head);
+    /// Return a reference to the head entry, and later may modify the XNode pointed to by the `head`.
+    pub(super) fn head_mut(&mut self, is_exclusive: bool) -> &XEntry<I, L> {
+        if is_exclusive {
+            // When a modification to the head is needed, it first checks whether the head is shared with other XArrays.
+            // If it is, then it performs COW by allocating a new head and using it,
+            // to prevent the modification from affecting the read or write operations on other XArrays.
+            if let Some(new_head) = self.copy_if_shared(&self.head) {
+                self.set_head(new_head);
+            }
         }
         &self.head
     }
 
-    pub(crate) fn max_index(&self) -> u64 {
+    pub(super) fn max_index(&self) -> u64 {
         if let Some(node) = self.head.as_node() {
-            node.layer().max_index()
+            node.height().max_index()
         } else {
             0
         }
     }
 
     /// Set the head of the `XArray` with the new `XEntry`, and return the old `head`.
-    pub(crate) fn set_head(&mut self, head: XEntry<I>) -> XEntry<I> {
+    pub(super) fn set_head(&mut self, head: XEntry<I, L>) -> XEntry<I, L> {
         let old_head = core::mem::replace(&mut self.head, head);
         old_head
     }
@@ -153,7 +160,7 @@ impl<I: ItemEntry, M: ValidMark> XArray<I, M> {
     /// Unset the input `mark` for all of the items in the `XArray`.
     pub fn unset_mark_all(&mut self, mark: M) {
         let mut handle_list = VecDeque::new();
-        if let Some(node) = self.head_mut().as_node_mut() {
+        if let Some(node) = self.head_mut(true).as_node_mut() {
             handle_list.push_back(node);
         }
         while !handle_list.is_empty() {
@@ -162,7 +169,7 @@ impl<I: ItemEntry, M: ValidMark> XArray<I, M> {
             let node_mark = node.mark(mark.index());
             while (offset as usize) < SLOT_SIZE {
                 if node_mark.is_marked(offset) {
-                    let entry = node.ref_node_entry(offset);
+                    let entry = node.ref_node_entry(true, offset);
                     if let Some(node) = entry.as_node_mut() {
                         handle_list.push_back(node);
                     }
@@ -181,17 +188,25 @@ impl<I: ItemEntry, M: ValidMark> XArray<I, M> {
     }
 
     /// Create an `Cursor` to perform read related operations on the `XArray`.
-    pub fn cursor<'a>(&'a self, index: u64) -> Cursor<'a, I, M> {
+    pub fn cursor<'a>(&'a self, index: u64) -> Cursor<'a, I, L, M> {
         Cursor::new(self, index)
     }
 
     /// Create an `CursorMut` to perform read and write operations on the `XArray`.
-    pub fn cursor_mut<'a>(&'a mut self, index: u64) -> CursorMut<'a, I, M> {
+    pub fn cursor_mut<'a>(&'a mut self, index: u64) -> CursorMut<'a, I, L, M> {
         CursorMut::new(self, index)
     }
+
+    pub fn range<'a>(&'a self, range: core::ops::Range<u64>) -> Range<'a, I, L, M> {
+        let cursor = Cursor::new(self, range.start);
+        Range {
+            cursor,
+            end: range.end,
+        }
+    }
 }
 
-impl<I: ItemEntry + Clone, M: ValidMark> Clone for XArray<I, M> {
+impl<I: ItemEntry + Clone, L: XLock, M: ValidMark> Clone for XArray<I, L, M> {
     /// Clone with cow mechanism.
     fn clone(&self) -> Self {
         let cloned_head = self.head.clone();
@@ -202,3 +217,53 @@ impl<I: ItemEntry + Clone, M: ValidMark> Clone for XArray<I, M> {
         }
     }
 }
+
+pub trait ValidLock<T>: Sized {
+    type Target<'a>: Deref<Target = T> + DerefMut<Target = T>
+    where
+        Self: 'a;
+
+    fn new(inner: T) -> Self;
+
+    fn lock(&self) -> Self::Target<'_>;
+}
+
+pub trait XLock {
+    type Lock<T>: ValidLock<T>;
+
+    fn new<T>(inner: T) -> Self::Lock<T> {
+        Self::Lock::<T>::new(inner)
+    }
+}
+
+pub struct Range<'a, I, L, M>
+where
+    I: ItemEntry,
+    L: XLock,
+    M: ValidMark,
+{
+    cursor: Cursor<'a, I, L, M>,
+    end: u64,
+}
+
+impl<'a, I: ItemEntry, L: XLock, M: ValidMark> core::iter::Iterator for Range<'a, I, L, M> {
+    type Item = (u64, &'a I);
+
+    fn next(&mut self) -> Option<Self::Item> {
+        loop {
+            if self.cursor.index() >= self.end {
+                return None;
+            }
+
+            let item = self.cursor.load();
+            if item.is_none() {
+                self.cursor.next();
+                continue;
+            }
+
+            let res = item.map(|item| (self.cursor.index(), item));
+            self.cursor.next();
+            return res;
+        }
+    }
+}