node.rs 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260
  1. use core::cmp::Ordering;
  2. use std::{
  3. marker::PhantomData,
  4. ops::{Deref, DerefMut},
  5. sync::{Arc, Mutex, Weak},
  6. };
  7. use crate::*;
  8. pub(crate) struct ReadOnly {}
  9. pub(crate) struct ReadWrite {}
  10. /// The layer of an XNode within an XArray.
  11. ///
  12. /// In an XArray, the head has the highest layer, while the XNodes that directly store items are at the lowest layer,
  13. /// with a layer value of 0. Each level up from the bottom layer increases the layer number by 1.
  14. /// The layer of an XArray is the layer of its head.
  15. #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Copy, Clone)]
  16. pub(crate) struct Layer {
  17. layer: u8,
  18. }
  19. impl Deref for Layer {
  20. type Target = u8;
  21. fn deref(&self) -> &Self::Target {
  22. &self.layer
  23. }
  24. }
  25. impl DerefMut for Layer {
  26. fn deref_mut(&mut self) -> &mut Self::Target {
  27. &mut self.layer
  28. }
  29. }
  30. impl PartialEq<u8> for Layer {
  31. fn eq(&self, other: &u8) -> bool {
  32. self.layer == *other
  33. }
  34. }
  35. impl PartialOrd<u8> for Layer {
  36. fn partial_cmp(&self, other: &u8) -> Option<Ordering> {
  37. self.layer.partial_cmp(other)
  38. }
  39. }
  40. impl Layer {
  41. pub(crate) fn new(layer: u8) -> Self {
  42. Self { layer }
  43. }
  44. fn layer_shift(&self) -> u8 {
  45. self.layer * BITS_PER_LAYER as u8
  46. }
  47. /// Calculate the corresponding offset for the target index at the current layer.
  48. pub(crate) fn layer_offset(&self, index: u64) -> u8 {
  49. ((index >> self.layer_shift()) & SLOT_MASK as u64) as u8
  50. }
  51. /// Calculate the maximum index that can be represented in XArray at the current layer.
  52. pub(crate) fn max_index(&self) -> u64 {
  53. ((SLOT_SIZE as u64) << self.layer_shift()) - 1
  54. }
  55. }
  56. /// `XNode` is the intermediate node in the tree-like structure of XArray.
  57. ///
  58. /// It contains `SLOT_SIZE` number of XEntries, meaning it can accommodate up to `SLOT_SIZE` child nodes.
  59. /// The 'layer' and 'offset_in_parent' attributes of an XNode are determined at initialization and remain unchanged thereafter.
  60. ///
  61. /// XNode has a generic parameter called 'Operation', which has two possible instances: `ReadOnly` and `ReadWrite`.
  62. /// These instances indicate whether the XNode will only perform read operations or both read and write operations
  63. /// (where write operations imply potential modifications to the contents of slots).
  64. pub(crate) struct XNode<I: ItemEntry, Operation = ReadOnly> {
  65. /// The node's layer from the bottom of the tree. The layer of a lead node,
  66. /// which stores the user-given items, is 0.
  67. layer: Layer,
  68. /// This node is its parent's `offset_in_parent`-th child.
  69. /// This field is meaningless if this node is the root (will be 0).
  70. offset_in_parent: u8,
  71. inner: Mutex<XNodeInner<I>>,
  72. _marker: PhantomData<Operation>,
  73. }
  74. pub(crate) struct XNodeInner<I: ItemEntry> {
  75. parent: Option<Weak<XNode<I, ReadWrite>>>,
  76. slots: [XEntry<I>; SLOT_SIZE],
  77. marks: [Mark; 3],
  78. }
  79. impl<I: ItemEntry, Operation> XNode<I, Operation> {
  80. pub(crate) fn new(layer: Layer, offset: u8, parent: Option<Weak<XNode<I, ReadWrite>>>) -> Self {
  81. Self {
  82. layer,
  83. offset_in_parent: offset,
  84. inner: Mutex::new(XNodeInner::new(parent)),
  85. _marker: PhantomData,
  86. }
  87. }
  88. /// Get the offset in the slots of the current XNode corresponding to the XEntry for the target index.
  89. pub(crate) fn entry_offset(&self, target_index: u64) -> u8 {
  90. self.layer.layer_offset(target_index)
  91. }
  92. pub(crate) fn layer(&self) -> Layer {
  93. self.layer
  94. }
  95. pub(crate) fn offset_in_parent(&self) -> u8 {
  96. self.offset_in_parent
  97. }
  98. pub(crate) fn is_marked(&self, offset: u8, mark: usize) -> bool {
  99. self.inner.lock().unwrap().is_marked(offset, mark)
  100. }
  101. pub(crate) fn is_mark_clear(&self, mark: usize) -> bool {
  102. self.inner.lock().unwrap().is_mark_clear(mark)
  103. }
  104. pub(crate) fn mark(&self, mark: usize) -> Mark {
  105. self.inner.lock().unwrap().marks[mark]
  106. }
  107. }
  108. impl<I: ItemEntry> XNode<I, ReadOnly> {
  109. pub(crate) fn parent(&self) -> Option<&XNode<I, ReadOnly>> {
  110. self.inner
  111. .lock()
  112. .unwrap()
  113. .parent
  114. .as_ref()
  115. .map(|parent| unsafe { &*(parent.as_ptr() as *const XNode<I, ReadOnly>) })
  116. }
  117. pub(crate) fn entry<'a>(&'a self, offset: u8) -> *const XEntry<I> {
  118. let lock = self.inner.lock().unwrap();
  119. let entry = lock.entry(offset);
  120. entry
  121. }
  122. }
  123. impl<I: ItemEntry> XNode<I, ReadWrite> {
  124. pub(crate) fn parent(&self) -> Option<&XNode<I, ReadWrite>> {
  125. self.inner
  126. .lock()
  127. .unwrap()
  128. .parent
  129. .as_ref()
  130. .map(|parent| unsafe { &*(parent.as_ptr()) })
  131. }
  132. pub(crate) fn entry<'a>(&'a self, offset: u8) -> *const XEntry<I> {
  133. let mut lock = self.inner.lock().unwrap();
  134. let entry = lock.entry_mut(offset);
  135. entry
  136. }
  137. pub(crate) fn set_parent(&self, parent: &XNode<I, ReadWrite>) {
  138. let parent = {
  139. let arc = unsafe { Arc::from_raw(parent as *const XNode<I, ReadWrite>) };
  140. let weak = Arc::downgrade(&arc);
  141. core::mem::forget(arc);
  142. weak
  143. };
  144. self.inner.lock().unwrap().parent = Some(parent);
  145. }
  146. pub(crate) fn set_entry(&self, offset: u8, entry: XEntry<I>) -> XEntry<I> {
  147. self.inner.lock().unwrap().set_entry(offset, entry)
  148. }
  149. pub(crate) fn set_mark(&self, offset: u8, mark: usize) {
  150. self.inner.lock().unwrap().set_mark(offset, mark)
  151. }
  152. pub(crate) fn unset_mark(&self, offset: u8, mark: usize) {
  153. self.inner.lock().unwrap().unset_mark(offset, mark)
  154. }
  155. pub(crate) fn clear_mark(&self, mark: usize) {
  156. self.inner.lock().unwrap().clear_mark(mark)
  157. }
  158. }
  159. impl<I: ItemEntry> XNodeInner<I> {
  160. pub(crate) fn new(parent: Option<Weak<XNode<I, ReadWrite>>>) -> Self {
  161. Self {
  162. parent,
  163. slots: [XEntry::EMPTY; SLOT_SIZE],
  164. marks: [Mark::EMPTY; 3],
  165. }
  166. }
  167. pub(crate) fn entry(&self, offset: u8) -> *const XEntry<I> {
  168. &self.slots[offset as usize] as *const XEntry<I>
  169. }
  170. pub(crate) fn entry_mut(&mut self, offset: u8) -> *const XEntry<I> {
  171. // When a modification to the target entry is needed, it first checks whether the entry is shared with other XArrays.
  172. // If it is, then it performs COW by allocating a new entry and using it,
  173. // to prevent the modification from affecting the read or write operations on other XArrays.
  174. if let Some(new_entry) = self.copy_if_shared(&self.slots[offset as usize]) {
  175. self.set_entry(offset, new_entry);
  176. }
  177. &self.slots[offset as usize] as *const XEntry<I>
  178. }
  179. pub(crate) fn set_entry(&mut self, offset: u8, entry: XEntry<I>) -> XEntry<I> {
  180. let old_entry = core::mem::replace(&mut self.slots[offset as usize], entry);
  181. old_entry
  182. }
  183. pub(crate) fn set_mark(&mut self, offset: u8, mark: usize) {
  184. self.marks[mark].set(offset);
  185. }
  186. pub(crate) fn unset_mark(&mut self, offset: u8, mark: usize) {
  187. self.marks[mark].unset(offset);
  188. }
  189. pub(crate) fn is_marked(&self, offset: u8, mark: usize) -> bool {
  190. self.marks[mark].is_marked(offset)
  191. }
  192. pub(crate) fn is_mark_clear(&self, mark: usize) -> bool {
  193. self.marks[mark].is_clear()
  194. }
  195. pub(crate) fn clear_mark(&mut self, mark: usize) {
  196. self.marks[mark].clear();
  197. }
  198. }
  199. pub(crate) fn deep_clone_node_entry<I: ItemEntry + Clone>(entry: &XEntry<I>) -> XEntry<I> {
  200. debug_assert!(entry.is_node());
  201. let new_node = {
  202. let cloned_node: &XNode<I> = entry.as_node().unwrap();
  203. let new_node = XNode::<I, ReadWrite>::new(
  204. cloned_node.layer(),
  205. cloned_node.offset_in_parent(),
  206. cloned_node.inner.lock().unwrap().parent.clone(),
  207. );
  208. let mut new_node_lock = new_node.inner.lock().unwrap();
  209. let cloned_node_lock = cloned_node.inner.lock().unwrap();
  210. new_node_lock.marks = cloned_node_lock.marks;
  211. for i in 0..SLOT_SIZE {
  212. let entry = &cloned_node_lock.slots[i];
  213. let new_entry = entry.clone();
  214. new_node_lock.slots[i as usize] = new_entry;
  215. }
  216. drop(new_node_lock);
  217. new_node
  218. };
  219. XEntry::from_node(new_node)
  220. }