Jelajahi Sumber

chore: adopt Rust 1.57.0 as MSRV (#38)

This branch makes Rust 1.57.0 `thingbuf`'s official minimum supported
Rust version. Unfortunately, it was necessary to feature-flag the
`StaticChannel` and `StaticThingBuf` APIs, as they only compile on Rust
1.60.0. This is because the type definitions of the static APIs include
both a const generic _and_ a default type parameter, which is only
possible on 1.60+.

Signed-off-by: Eliza Weisman <eliza@buoyant.io>
Eliza Weisman 3 tahun lalu
induk
melakukan
9e2f543297
8 mengubah file dengan 690 tambahan dan 643 penghapusan
  1. 21 3
      .github/workflows/tests.yml
  2. 2 0
      Cargo.toml
  3. 24 1
      README.md
  4. 14 12
      src/lib.rs
  5. 400 405
      src/mpsc/async_impl.rs
  6. 227 222
      src/mpsc/sync.rs
  7. 1 0
      src/static_thingbuf.rs
  8. 1 0
      tests/static_storage.rs

+ 21 - 3
.github/workflows/tests.yml

@@ -13,22 +13,40 @@ on:
       - '**/Cargo.toml'
       - '.github/workflows/tests.yml'
 
+env:
+  RUSTFLAGS: -Dwarnings
+  RUST_BACKTRACE: 1
+  msrv: 1.57.0
+
 name: Tests
 jobs:
 
   tests:
     name: Tests
     runs-on: ubuntu-latest
+    strategy:
+      matrix:
+        rust:
+          - stable
+          - nightly
+          - 1.57.0
     steps:
       - uses: actions/checkout@v2
-      - name: Install stable toolchain
+      - name: Install toolchain
         uses: actions-rs/toolchain@v1
         with:
           profile: minimal
-          toolchain: stable
+          toolchain: ${{ matrix.rust }}
           override: true
           components: rustfmt
-      - name: Run cargo test
+      - name: Run cargo test (with static APIs)
+        if: ${{ matrix.rust != env.msrv }}
+        uses: actions-rs/cargo@v1
+        with:
+          command: test
+          args: --features static
+      - name: Run cargo test (no static APIs)
+        if: ${{ matrix.rust == env.msrv }}
         uses: actions-rs/cargo@v1
         with:
           command: test

+ 2 - 0
Cargo.toml

@@ -8,6 +8,7 @@ members = [
 name = "thingbuf"
 version = "0.1.0"
 edition = "2021"
+rust-version = "1.57.0"
 
 # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
 
@@ -15,6 +16,7 @@ edition = "2021"
 std = ["alloc", "parking_lot"]
 alloc = []
 default = ["std"]
+static = []
 
 [dependencies]
 pin-project = "1"

+ 24 - 1
README.md

@@ -109,7 +109,8 @@ thingbuf = { version = "0.1", default-features = false }
 With the `std` feature disabled, `thingbuf` will depend only on `libcore`. This
 means that APIs that require dynamic memory allocation will not be enabled.
 Statically allocated [channels][static-mpsc] and [queues][static-queue] are
-available for code without a memory allocator.
+available for code without a memory allocator, if the `static` feature flag is
+enabled.
 
 However, if a memory allocator _is_ available, `#![no_std]` code can also enable
 the `alloc` feature flag to depend on `liballoc`:
@@ -119,6 +120,28 @@ the `alloc` feature flag to depend on `liballoc`:
 thingbuf = { version = "0.1", default-features = false, features = ["alloc"] }
 ```
 
+### Crate Feature Flags
+
+- **std**: Enables features that require the Rust standard library, such as
+  synchronous (blocking) channels. This implicitly enables the "alloc" feature
+  flag. _Enabled by default_.
+- **alloc**: Enables features that require `liballoc` (but not `libstd`). This
+  enables `thingbuf` queues and asynchronous channels where the size of the
+  channel is determined at runtime.
+- **static**: Enables the static (const-generic-based) `thingbuf` queues and
+  channels. These can be used without dynamic memory allocation when the size of
+  a queue or channel is known at compile-time. _Disabled by default (requires
+  Rust 1.60 or newer)_.
+
+### Compiler Support
+
+`thingbuf` is built against the latest stable release. The minimum supported
+version is Rust 1.57. The current `thingbuf` version is not guaranteed to build on Rust
+versions earlier than the minimum supported version.
+
+Some feature flags may require newer Rust releases. For example, the "static"
+feature flag requries Rust 1.60+.
+
 ## FAQs
 
 - **Q: Why did you make this?**

+ 14 - 12
src/lib.rs

@@ -19,7 +19,7 @@ pub mod mpsc_perf_comparison {
 }
 
 feature! {
-    #![not(all(loom, test))]
+    #![all(feature = "static", not(all(loom, test)))]
     mod static_thingbuf;
     pub use self::static_thingbuf::StaticThingBuf;
 }
@@ -471,19 +471,21 @@ impl<T> Slot<T> {
         (0..capacity).map(|i| Slot::new(i)).collect()
     }
 
-    #[cfg(not(all(loom, test)))]
-    const EMPTY: Self = Self::new(usize::MAX);
+    feature! {
+        #![all(feature = "static", not(all(loom, test)))]
 
-    #[cfg(not(all(loom, test)))]
-    pub(crate) const fn make_static_array<const CAPACITY: usize>() -> [Self; CAPACITY] {
-        let mut array = [Self::EMPTY; CAPACITY];
-        let mut i = 0;
-        while i < CAPACITY {
-            array[i] = Self::new(i);
-            i += 1;
-        }
+        const EMPTY: Self = Self::new(usize::MAX);
 
-        array
+        pub(crate) const fn make_static_array<const CAPACITY: usize>() -> [Self; CAPACITY] {
+            let mut array = [Self::EMPTY; CAPACITY];
+            let mut i = 0;
+            while i < CAPACITY {
+                array[i] = Self::new(i);
+                i += 1;
+            }
+
+            array
+        }
     }
 
     #[cfg(not(all(loom, test)))]

+ 400 - 405
src/mpsc/async_impl.rs

@@ -1,6 +1,6 @@
 use super::*;
 use crate::{
-    loom::atomic::{self, AtomicBool, Ordering},
+    loom::atomic::{self, Ordering},
     recycling::{self, Recycle},
     wait::queue,
     Ref,
@@ -41,7 +41,6 @@ feature! {
     }
 
     #[derive(Debug)]
-
     pub struct Receiver<T, R = recycling::DefaultRecycle> {
         inner: Arc<Inner<T, R>>,
     }
@@ -56,114 +55,163 @@ feature! {
         slots: Box<[Slot<T>]>,
         recycle: R,
     }
-}
 
-/// A statically-allocated, asynchronous bounded MPSC channel.
-///
-/// A statically-allocated channel allows using a MPSC channel without
-/// requiring _any_ heap allocations, and can be used in environments that
-/// don't support `liballoc`.
-///
-/// In order to use a statically-allocated channel, a `StaticChannel` must
-/// be constructed in a `static` initializer. This reserves storage for the
-/// channel's message queue at compile-time. Then, at runtime, the channel
-/// is [`split`] into a [`StaticSender`]/[`StaticReceiver`] pair in order to
-/// be used.
-///
-/// # Examples
-///
-/// ```
-/// use thingbuf::mpsc::StaticChannel;
-///
-/// // Construct a statically-allocated channel of `usize`s with a capacity
-/// // of 16 messages.
-/// static MY_CHANNEL: StaticChannel<usize, 16> = StaticChannel::new();
-///
-/// fn main() {
-///     // Split the `StaticChannel` into a sender-receiver pair.
-///     let (tx, rx) = MY_CHANNEL.split();
-///
-///     // Now, `tx` and `rx` can be used just like any other async MPSC
-///     // channel...
-/// # drop(tx); drop(rx);
-/// }
-/// ```
-/// [`split`]: StaticChannel::split
-#[cfg_attr(all(loom, test), allow(dead_code))]
-pub struct StaticChannel<T, const CAPACITY: usize, R = recycling::DefaultRecycle> {
-    core: ChannelCore<Waker>,
-    recycle: R,
-    slots: [Slot<T>; CAPACITY],
-    is_split: AtomicBool,
-}
+    // === impl Sender ===
 
-pub struct StaticSender<T: 'static, R: 'static = recycling::DefaultRecycle> {
-    core: &'static ChannelCore<Waker>,
-    recycle: &'static R,
-    slots: &'static [Slot<T>],
-}
+    impl<T, R> Sender<T, R>
+    where
+        R: Recycle<T>,
+    {
+        pub fn try_send_ref(&self) -> Result<SendRef<'_, T>, TrySendError> {
+            self.inner
+                .core
+                .try_send_ref(self.inner.slots.as_ref(), &self.inner.recycle)
+                .map(SendRef)
+        }
 
-pub struct StaticReceiver<T: 'static, R: 'static = recycling::DefaultRecycle> {
-    core: &'static ChannelCore<Waker>,
-    recycle: &'static R,
-    slots: &'static [Slot<T>],
-}
+        pub fn try_send(&self, val: T) -> Result<(), TrySendError<T>> {
+            self.inner
+                .core
+                .try_send(self.inner.slots.as_ref(), val, &self.inner.recycle)
+        }
 
-impl_send_ref! {
-    pub struct SendRef<Waker>;
-}
+        pub async fn send_ref(&self) -> Result<SendRef<'_, T>, Closed> {
+            SendRefFuture {
+                core: &self.inner.core,
+                slots: self.inner.slots.as_ref(),
+                recycle: &self.inner.recycle,
+                state: State::Start,
+                waiter: queue::Waiter::new(),
+            }
+            .await
+        }
 
-impl_recv_ref! {
-    pub struct RecvRef<Waker>;
-}
+        pub async fn send(&self, val: T) -> Result<(), Closed<T>> {
+            match self.send_ref().await {
+                Err(Closed(())) => Err(Closed(val)),
+                Ok(mut slot) => {
+                    slot.with_mut(|slot| *slot = val);
+                    Ok(())
+                }
+            }
+        }
+    }
 
-/// A [`Future`] that tries to receive a reference from a [`Receiver`].
-///
-/// This type is returned by [`Receiver::recv_ref`].
-#[must_use = "futures do nothing unless you `.await` or poll them"]
-pub struct RecvRefFuture<'a, T> {
-    core: &'a ChannelCore<Waker>,
-    slots: &'a [Slot<T>],
-}
+    impl<T, R> Clone for Sender<T, R> {
+        fn clone(&self) -> Self {
+            test_dbg!(self.inner.core.tx_count.fetch_add(1, Ordering::Relaxed));
+            Self {
+                inner: self.inner.clone(),
+            }
+        }
+    }
 
-/// A [`Future`] that tries to receive a value from a [`Receiver`].
-///
-/// This type is returned by [`Receiver::recv`].
-///
-/// This is equivalent to the [`RecvRefFuture`] future, but the value is moved out of
-/// the [`ThingBuf`] after it is received. This means that allocations are not
-/// reused.
-///
-/// [`ThingBuf`]: crate::ThingBuf
-#[must_use = "futures do nothing unless you `.await` or poll them"]
-pub struct RecvFuture<'a, T, R = recycling::DefaultRecycle> {
-    core: &'a ChannelCore<Waker>,
-    slots: &'a [Slot<T>],
-    recycle: &'a R,
-}
+    impl<T, R> Drop for Sender<T, R> {
+        fn drop(&mut self) {
+            if test_dbg!(self.inner.core.tx_count.fetch_sub(1, Ordering::Release)) > 1 {
+                return;
+            }
 
-#[pin_project::pin_project(PinnedDrop)]
-struct SendRefFuture<'sender, T, R> {
-    core: &'sender ChannelCore<Waker>,
-    slots: &'sender [Slot<T>],
-    recycle: &'sender R,
-    state: State,
-    #[pin]
-    waiter: queue::Waiter<Waker>,
-}
+            // if we are the last sender, synchronize
+            test_dbg!(atomic::fence(Ordering::SeqCst));
+            self.inner.core.core.close();
+            self.inner.core.rx_wait.close_tx();
+        }
+    }
 
-#[derive(Debug, Copy, Clone, Eq, PartialEq)]
-enum State {
-    Start,
-    Waiting,
-    Done,
-}
+    // === impl Receiver ===
+
+    impl<T, R> Receiver<T, R> {
+        pub fn recv_ref(&self) -> RecvRefFuture<'_, T> {
+            RecvRefFuture {
+                core: &self.inner.core,
+                slots: self.inner.slots.as_ref(),
+            }
+        }
+
+        pub fn recv(&self) -> RecvFuture<'_, T, R>
+        where
+            R: Recycle<T>,
+        {
+            RecvFuture {
+                core: &self.inner.core,
+                slots: self.inner.slots.as_ref(),
+                recycle: &self.inner.recycle,
+            }
+        }
+
+        /// # Returns
+        ///
+        ///  * `Poll::Pending` if no messages are available but the channel is not
+        ///    closed, or if a spurious failure happens.
+        ///  * `Poll::Ready(Some(Ref<T>))` if a message is available.
+        ///  * `Poll::Ready(None)` if the channel has been closed and all messages
+        ///    sent before it was closed have been received.
+        ///
+        /// When the method returns [`Poll::Pending`], the [`Waker`] in the provided
+        /// [`Context`] is scheduled to receive a wakeup when a message is sent on any
+        /// sender, or when the channel is closed.  Note that on multiple calls to
+        /// `poll_recv_ref`, only the [`Waker`] from the [`Context`] passed to the most
+        /// recent call is scheduled to receive a wakeup.
+        pub fn poll_recv_ref(&self, cx: &mut Context<'_>) -> Poll<Option<RecvRef<'_, T>>> {
+            poll_recv_ref(&self.inner.core, &self.inner.slots, cx)
+        }
+
+        /// # Returns
+        ///
+        ///  * `Poll::Pending` if no messages are available but the channel is not
+        ///    closed, or if a spurious failure happens.
+        ///  * `Poll::Ready(Some(message))` if a message is available.
+        ///  * `Poll::Ready(None)` if the channel has been closed and all messages
+        ///    sent before it was closed have been received.
+        ///
+        /// When the method returns [`Poll::Pending`], the [`Waker`] in the provided
+        /// [`Context`] is scheduled to receive a wakeup when a message is sent on any
+        /// sender, or when the channel is closed.  Note that on multiple calls to
+        /// `poll_recv`, only the [`Waker`] from the [`Context`] passed to the most
+        /// recent call is scheduled to receive a wakeup.
+        pub fn poll_recv(&self, cx: &mut Context<'_>) -> Poll<Option<T>>
+        where
+            R: Recycle<T>,
+        {
+            self.poll_recv_ref(cx)
+                .map(|opt| opt.map(|mut r| recycling::take(&mut *r, &self.inner.recycle)))
+        }
+
+        pub fn is_closed(&self) -> bool {
+            test_dbg!(self.inner.core.tx_count.load(Ordering::SeqCst)) <= 1
+        }
+    }
+
+    impl<T, R> Drop for Receiver<T, R> {
+        fn drop(&mut self) {
+            self.inner.core.close_rx();
+        }
+    }
 
-// === impl StaticChannel ===
+    impl<T, R: fmt::Debug> fmt::Debug for Inner<T, R> {
+        fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+            f.debug_struct("Inner")
+                .field("core", &self.core)
+                .field("slots", &format_args!("Box<[..]>"))
+                .field("recycle", &self.recycle)
+                .finish()
+        }
+    }
+
+    impl<T, R> Drop for Inner<T, R> {
+        fn drop(&mut self) {
+            self.core.core.drop_slots(&mut self.slots[..])
+        }
+    }
+}
 
 #[cfg(not(all(loom, test)))]
-impl<T, const CAPACITY: usize> StaticChannel<T, CAPACITY> {
-    /// Constructs a new statically-allocated, asynchronous bounded MPSC channel.
+feature! {
+    #![feature = "static"]
+    use crate::loom::atomic::AtomicBool;
+
+    /// A statically-allocated, asynchronous bounded MPSC channel.
     ///
     /// A statically-allocated channel allows using a MPSC channel without
     /// requiring _any_ heap allocations, and can be used in environments that
@@ -194,344 +242,310 @@ impl<T, const CAPACITY: usize> StaticChannel<T, CAPACITY> {
     /// }
     /// ```
     /// [`split`]: StaticChannel::split
-    pub const fn new() -> Self {
-        Self {
-            core: ChannelCore::new(CAPACITY),
-            slots: Slot::make_static_array::<CAPACITY>(),
-            is_split: AtomicBool::new(false),
-            recycle: recycling::DefaultRecycle::new(),
+    pub struct StaticChannel<T, const CAPACITY: usize, R = recycling::DefaultRecycle> {
+        core: ChannelCore<Waker>,
+        recycle: R,
+        slots: [Slot<T>; CAPACITY],
+        is_split: AtomicBool,
+    }
+
+    pub struct StaticSender<T: 'static, R: 'static = recycling::DefaultRecycle> {
+        core: &'static ChannelCore<Waker>,
+        recycle: &'static R,
+        slots: &'static [Slot<T>],
+    }
+
+    pub struct StaticReceiver<T: 'static, R: 'static = recycling::DefaultRecycle> {
+        core: &'static ChannelCore<Waker>,
+        recycle: &'static R,
+        slots: &'static [Slot<T>],
+    }
+
+    // === impl StaticChannel ===
+
+    impl<T, const CAPACITY: usize> StaticChannel<T, CAPACITY> {
+        /// Constructs a new statically-allocated, asynchronous bounded MPSC channel.
+        ///
+        /// A statically-allocated channel allows using a MPSC channel without
+        /// requiring _any_ heap allocations, and can be used in environments that
+        /// don't support `liballoc`.
+        ///
+        /// In order to use a statically-allocated channel, a `StaticChannel` must
+        /// be constructed in a `static` initializer. This reserves storage for the
+        /// channel's message queue at compile-time. Then, at runtime, the channel
+        /// is [`split`] into a [`StaticSender`]/[`StaticReceiver`] pair in order to
+        /// be used.
+        ///
+        /// # Examples
+        ///
+        /// ```
+        /// use thingbuf::mpsc::StaticChannel;
+        ///
+        /// // Construct a statically-allocated channel of `usize`s with a capacity
+        /// // of 16 messages.
+        /// static MY_CHANNEL: StaticChannel<usize, 16> = StaticChannel::new();
+        ///
+        /// fn main() {
+        ///     // Split the `StaticChannel` into a sender-receiver pair.
+        ///     let (tx, rx) = MY_CHANNEL.split();
+        ///
+        ///     // Now, `tx` and `rx` can be used just like any other async MPSC
+        ///     // channel...
+        /// # drop(tx); drop(rx);
+        /// }
+        /// ```
+        /// [`split`]: StaticChannel::split
+        pub const fn new() -> Self {
+            Self {
+                core: ChannelCore::new(CAPACITY),
+                slots: Slot::make_static_array::<CAPACITY>(),
+                is_split: AtomicBool::new(false),
+                recycle: recycling::DefaultRecycle::new(),
+            }
         }
     }
-}
 
-impl<T, R, const CAPACITY: usize> StaticChannel<T, CAPACITY, R> {
-    /// Split a [`StaticChannel`] into a [`StaticSender`]/[`StaticReceiver`]
-    /// pair.
-    ///
-    /// A static channel can only be split a single time. If
-    /// [`StaticChannel::split`] or [`StaticChannel::try_split`] have been
-    /// called previously, this method will panic. For a non-panicking version
-    /// of this method, see [`StaticChannel::try_split`].
-    ///
-    /// # Panics
-    ///
-    /// If the channel has already been split.
-    pub fn split(&'static self) -> (StaticSender<T, R>, StaticReceiver<T, R>) {
-        self.try_split().expect("channel already split")
-    }
+    impl<T, R, const CAPACITY: usize> StaticChannel<T, CAPACITY, R> {
+        /// Split a [`StaticChannel`] into a [`StaticSender`]/[`StaticReceiver`]
+        /// pair.
+        ///
+        /// A static channel can only be split a single time. If
+        /// [`StaticChannel::split`] or [`StaticChannel::try_split`] have been
+        /// called previously, this method will panic. For a non-panicking version
+        /// of this method, see [`StaticChannel::try_split`].
+        ///
+        /// # Panics
+        ///
+        /// If the channel has already been split.
+        pub fn split(&'static self) -> (StaticSender<T, R>, StaticReceiver<T, R>) {
+            self.try_split().expect("channel already split")
+        }
 
-    /// Try to split a [`StaticChannel`] into a [`StaticSender`]/[`StaticReceiver`]
-    /// pair, returning `None` if it has already been split.
-    ///
-    /// A static channel can only be split a single time. If
-    /// [`StaticChannel::split`] or [`StaticChannel::try_split`] have been
-    /// called previously, this method returns `None`.
-    pub fn try_split(&'static self) -> Option<(StaticSender<T, R>, StaticReceiver<T, R>)> {
-        self.is_split
-            .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
-            .ok()?;
-        let tx = StaticSender {
-            core: &self.core,
-            recycle: &self.recycle,
-            slots: &self.slots[..],
-        };
-        let rx = StaticReceiver {
-            core: &self.core,
-            recycle: &self.recycle,
-            slots: &self.slots[..],
-        };
-        Some((tx, rx))
+        /// Try to split a [`StaticChannel`] into a [`StaticSender`]/[`StaticReceiver`]
+        /// pair, returning `None` if it has already been split.
+        ///
+        /// A static channel can only be split a single time. If
+        /// [`StaticChannel::split`] or [`StaticChannel::try_split`] have been
+        /// called previously, this method returns `None`.
+        pub fn try_split(&'static self) -> Option<(StaticSender<T, R>, StaticReceiver<T, R>)> {
+            self.is_split
+                .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
+                .ok()?;
+            let tx = StaticSender {
+                core: &self.core,
+                recycle: &self.recycle,
+                slots: &self.slots[..],
+            };
+            let rx = StaticReceiver {
+                core: &self.core,
+                recycle: &self.recycle,
+                slots: &self.slots[..],
+            };
+            Some((tx, rx))
+        }
     }
-}
 
-// === impl Sender ===
+    // === impl StaticSender ===
 
-#[cfg(feature = "alloc")]
-impl<T, R> Sender<T, R>
-where
-    R: Recycle<T>,
-{
-    pub fn try_send_ref(&self) -> Result<SendRef<'_, T>, TrySendError> {
-        self.inner
-            .core
-            .try_send_ref(self.inner.slots.as_ref(), &self.inner.recycle)
-            .map(SendRef)
-    }
-
-    pub fn try_send(&self, val: T) -> Result<(), TrySendError<T>> {
-        self.inner
-            .core
-            .try_send(self.inner.slots.as_ref(), val, &self.inner.recycle)
-    }
+    impl<T, R> StaticSender<T, R>
+    where
+        R: Recycle<T>,
+    {
+        pub fn try_send_ref(&self) -> Result<SendRef<'_, T>, TrySendError> {
+            self.core
+                .try_send_ref(self.slots, self.recycle)
+                .map(SendRef)
+        }
 
-    pub async fn send_ref(&self) -> Result<SendRef<'_, T>, Closed> {
-        SendRefFuture {
-            core: &self.inner.core,
-            slots: self.inner.slots.as_ref(),
-            recycle: &self.inner.recycle,
-            state: State::Start,
-            waiter: queue::Waiter::new(),
+        pub fn try_send(&self, val: T) -> Result<(), TrySendError<T>> {
+            self.core.try_send(self.slots, val, self.recycle)
         }
-        .await
-    }
 
-    pub async fn send(&self, val: T) -> Result<(), Closed<T>> {
-        match self.send_ref().await {
-            Err(Closed(())) => Err(Closed(val)),
-            Ok(mut slot) => {
-                slot.with_mut(|slot| *slot = val);
-                Ok(())
+        pub async fn send_ref(&self) -> Result<SendRef<'_, T>, Closed> {
+            SendRefFuture {
+                core: self.core,
+                slots: self.slots,
+                recycle: self.recycle,
+                state: State::Start,
+                waiter: queue::Waiter::new(),
             }
+            .await
         }
-    }
-}
 
-#[cfg(feature = "alloc")]
-impl<T, R> Clone for Sender<T, R> {
-    fn clone(&self) -> Self {
-        test_dbg!(self.inner.core.tx_count.fetch_add(1, Ordering::Relaxed));
-        Self {
-            inner: self.inner.clone(),
+        pub async fn send(&self, val: T) -> Result<(), Closed<T>> {
+            match self.send_ref().await {
+                Err(Closed(())) => Err(Closed(val)),
+                Ok(mut slot) => {
+                    slot.with_mut(|slot| *slot = val);
+                    Ok(())
+                }
+            }
         }
     }
-}
 
-#[cfg(feature = "alloc")]
-impl<T, R> Drop for Sender<T, R> {
-    fn drop(&mut self) {
-        if test_dbg!(self.inner.core.tx_count.fetch_sub(1, Ordering::Release)) > 1 {
-            return;
+    impl<T> Clone for StaticSender<T> {
+        fn clone(&self) -> Self {
+            test_dbg!(self.core.tx_count.fetch_add(1, Ordering::Relaxed));
+            Self {
+                core: self.core,
+                slots: self.slots,
+                recycle: self.recycle,
+            }
         }
-
-        // if we are the last sender, synchronize
-        test_dbg!(atomic::fence(Ordering::SeqCst));
-        self.inner.core.core.close();
-        self.inner.core.rx_wait.close_tx();
     }
-}
 
-// === impl Receiver ===
+    impl<T, R> Drop for StaticSender<T, R> {
+        fn drop(&mut self) {
+            if test_dbg!(self.core.tx_count.fetch_sub(1, Ordering::Release)) > 1 {
+                return;
+            }
 
-#[cfg(feature = "alloc")]
-impl<T, R> Receiver<T, R> {
-    pub fn recv_ref(&self) -> RecvRefFuture<'_, T> {
-        RecvRefFuture {
-            core: &self.inner.core,
-            slots: self.inner.slots.as_ref(),
+            // if we are the last sender, synchronize
+            test_dbg!(atomic::fence(Ordering::SeqCst));
+            self.core.core.close();
+            self.core.rx_wait.close_tx();
         }
     }
 
-    pub fn recv(&self) -> RecvFuture<'_, T, R>
-    where
-        R: Recycle<T>,
-    {
-        RecvFuture {
-            core: &self.inner.core,
-            slots: self.inner.slots.as_ref(),
-            recycle: &self.inner.recycle,
+    impl<T, R: fmt::Debug> fmt::Debug for StaticSender<T, R> {
+        fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+            f.debug_struct("StaticSender")
+                .field("core", &self.core)
+                .field("slots", &format_args!("&[..]"))
+                .field("recycle", self.recycle)
+                .finish()
         }
     }
 
-    /// # Returns
-    ///
-    ///  * `Poll::Pending` if no messages are available but the channel is not
-    ///    closed, or if a spurious failure happens.
-    ///  * `Poll::Ready(Some(Ref<T>))` if a message is available.
-    ///  * `Poll::Ready(None)` if the channel has been closed and all messages
-    ///    sent before it was closed have been received.
-    ///
-    /// When the method returns [`Poll::Pending`], the [`Waker`] in the provided
-    /// [`Context`] is scheduled to receive a wakeup when a message is sent on any
-    /// sender, or when the channel is closed.  Note that on multiple calls to
-    /// `poll_recv_ref`, only the [`Waker`] from the [`Context`] passed to the most
-    /// recent call is scheduled to receive a wakeup.
-    pub fn poll_recv_ref(&self, cx: &mut Context<'_>) -> Poll<Option<RecvRef<'_, T>>> {
-        poll_recv_ref(&self.inner.core, &self.inner.slots, cx)
-    }
+    // === impl StaticReceiver ===
 
-    /// # Returns
-    ///
-    ///  * `Poll::Pending` if no messages are available but the channel is not
-    ///    closed, or if a spurious failure happens.
-    ///  * `Poll::Ready(Some(message))` if a message is available.
-    ///  * `Poll::Ready(None)` if the channel has been closed and all messages
-    ///    sent before it was closed have been received.
-    ///
-    /// When the method returns [`Poll::Pending`], the [`Waker`] in the provided
-    /// [`Context`] is scheduled to receive a wakeup when a message is sent on any
-    /// sender, or when the channel is closed.  Note that on multiple calls to
-    /// `poll_recv`, only the [`Waker`] from the [`Context`] passed to the most
-    /// recent call is scheduled to receive a wakeup.
-    pub fn poll_recv(&self, cx: &mut Context<'_>) -> Poll<Option<T>>
-    where
-        R: Recycle<T>,
-    {
-        self.poll_recv_ref(cx)
-            .map(|opt| opt.map(|mut r| recycling::take(&mut *r, &self.inner.recycle)))
-    }
-
-    pub fn is_closed(&self) -> bool {
-        test_dbg!(self.inner.core.tx_count.load(Ordering::SeqCst)) <= 1
-    }
-}
-
-#[cfg(feature = "alloc")]
-impl<T, R> Drop for Receiver<T, R> {
-    fn drop(&mut self) {
-        self.inner.core.close_rx();
-    }
-}
-
-// === impl StaticSender ===
-
-impl<T, R> StaticSender<T, R>
-where
-    R: Recycle<T>,
-{
-    pub fn try_send_ref(&self) -> Result<SendRef<'_, T>, TrySendError> {
-        self.core
-            .try_send_ref(self.slots, self.recycle)
-            .map(SendRef)
-    }
-
-    pub fn try_send(&self, val: T) -> Result<(), TrySendError<T>> {
-        self.core.try_send(self.slots, val, self.recycle)
-    }
-
-    pub async fn send_ref(&self) -> Result<SendRef<'_, T>, Closed> {
-        SendRefFuture {
-            core: self.core,
-            slots: self.slots,
-            recycle: self.recycle,
-            state: State::Start,
-            waiter: queue::Waiter::new(),
+    impl<T, R> StaticReceiver<T, R> {
+        pub fn recv_ref(&self) -> RecvRefFuture<'_, T> {
+            RecvRefFuture {
+                core: self.core,
+                slots: self.slots,
+            }
         }
-        .await
-    }
 
-    pub async fn send(&self, val: T) -> Result<(), Closed<T>> {
-        match self.send_ref().await {
-            Err(Closed(())) => Err(Closed(val)),
-            Ok(mut slot) => {
-                slot.with_mut(|slot| *slot = val);
-                Ok(())
+        pub fn recv(&self) -> RecvFuture<'_, T, R>
+        where
+            R: Recycle<T>,
+        {
+            RecvFuture {
+                core: self.core,
+                slots: self.slots,
+                recycle: self.recycle,
             }
         }
-    }
-}
 
-impl<T> Clone for StaticSender<T> {
-    fn clone(&self) -> Self {
-        test_dbg!(self.core.tx_count.fetch_add(1, Ordering::Relaxed));
-        Self {
-            core: self.core,
-            slots: self.slots,
-            recycle: self.recycle,
+        /// # Returns
+        ///
+        ///  * `Poll::Pending` if no messages are available but the channel is not
+        ///    closed, or if a spurious failure happens.
+        ///  * `Poll::Ready(Some(Ref<T>))` if a message is available.
+        ///  * `Poll::Ready(None)` if the channel has been closed and all messages
+        ///    sent before it was closed have been received.
+        ///
+        /// When the method returns [`Poll::Pending`], the [`Waker`] in the provided
+        /// [`Context`] is scheduled to receive a wakeup when a message is sent on any
+        /// sender, or when the channel is closed.  Note that on multiple calls to
+        /// `poll_recv_ref`, only the [`Waker`] from the [`Context`] passed to the most
+        /// recent call is scheduled to receive a wakeup.
+        pub fn poll_recv_ref(&self, cx: &mut Context<'_>) -> Poll<Option<RecvRef<'_, T>>> {
+            poll_recv_ref(self.core, self.slots, cx)
         }
-    }
-}
 
-impl<T, R> Drop for StaticSender<T, R> {
-    fn drop(&mut self) {
-        if test_dbg!(self.core.tx_count.fetch_sub(1, Ordering::Release)) > 1 {
-            return;
+        /// # Returns
+        ///
+        ///  * `Poll::Pending` if no messages are available but the channel is not
+        ///    closed, or if a spurious failure happens.
+        ///  * `Poll::Ready(Some(message))` if a message is available.
+        ///  * `Poll::Ready(None)` if the channel has been closed and all messages
+        ///    sent before it was closed have been received.
+        ///
+        /// When the method returns [`Poll::Pending`], the [`Waker`] in the provided
+        /// [`Context`] is scheduled to receive a wakeup when a message is sent on any
+        /// sender, or when the channel is closed.  Note that on multiple calls to
+        /// `poll_recv`, only the [`Waker`] from the [`Context`] passed to the most
+        /// recent call is scheduled to receive a wakeup.
+        pub fn poll_recv(&self, cx: &mut Context<'_>) -> Poll<Option<T>>
+        where
+            R: Recycle<T>,
+        {
+            self.poll_recv_ref(cx)
+                .map(|opt| opt.map(|mut r| recycling::take(&mut *r, self.recycle)))
         }
 
-        // if we are the last sender, synchronize
-        test_dbg!(atomic::fence(Ordering::SeqCst));
-        self.core.core.close();
-        self.core.rx_wait.close_tx();
-    }
-}
-
-impl<T, R: fmt::Debug> fmt::Debug for StaticSender<T, R> {
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        f.debug_struct("StaticSender")
-            .field("core", &self.core)
-            .field("slots", &format_args!("&[..]"))
-            .field("recycle", self.recycle)
-            .finish()
+        pub fn is_closed(&self) -> bool {
+            test_dbg!(self.core.tx_count.load(Ordering::SeqCst)) <= 1
+        }
     }
-}
 
-// === impl StaticReceiver ===
-
-impl<T, R> StaticReceiver<T, R> {
-    pub fn recv_ref(&self) -> RecvRefFuture<'_, T> {
-        RecvRefFuture {
-            core: self.core,
-            slots: self.slots,
+    impl<T, R> Drop for StaticReceiver<T, R> {
+        fn drop(&mut self) {
+            self.core.close_rx();
         }
     }
 
-    pub fn recv(&self) -> RecvFuture<'_, T, R>
-    where
-        R: Recycle<T>,
-    {
-        RecvFuture {
-            core: self.core,
-            slots: self.slots,
-            recycle: self.recycle,
+    impl<T, R: fmt::Debug> fmt::Debug for StaticReceiver<T, R> {
+        fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+            f.debug_struct("StaticReceiver")
+                .field("core", &self.core)
+                .field("slots", &format_args!("&[..]"))
+                .field("recycle", &self.recycle)
+                .finish()
         }
     }
+}
 
-    /// # Returns
-    ///
-    ///  * `Poll::Pending` if no messages are available but the channel is not
-    ///    closed, or if a spurious failure happens.
-    ///  * `Poll::Ready(Some(Ref<T>))` if a message is available.
-    ///  * `Poll::Ready(None)` if the channel has been closed and all messages
-    ///    sent before it was closed have been received.
-    ///
-    /// When the method returns [`Poll::Pending`], the [`Waker`] in the provided
-    /// [`Context`] is scheduled to receive a wakeup when a message is sent on any
-    /// sender, or when the channel is closed.  Note that on multiple calls to
-    /// `poll_recv_ref`, only the [`Waker`] from the [`Context`] passed to the most
-    /// recent call is scheduled to receive a wakeup.
-    pub fn poll_recv_ref(&self, cx: &mut Context<'_>) -> Poll<Option<RecvRef<'_, T>>> {
-        poll_recv_ref(self.core, self.slots, cx)
-    }
+impl_send_ref! {
+    pub struct SendRef<Waker>;
+}
 
-    /// # Returns
-    ///
-    ///  * `Poll::Pending` if no messages are available but the channel is not
-    ///    closed, or if a spurious failure happens.
-    ///  * `Poll::Ready(Some(message))` if a message is available.
-    ///  * `Poll::Ready(None)` if the channel has been closed and all messages
-    ///    sent before it was closed have been received.
-    ///
-    /// When the method returns [`Poll::Pending`], the [`Waker`] in the provided
-    /// [`Context`] is scheduled to receive a wakeup when a message is sent on any
-    /// sender, or when the channel is closed.  Note that on multiple calls to
-    /// `poll_recv`, only the [`Waker`] from the [`Context`] passed to the most
-    /// recent call is scheduled to receive a wakeup.
-    pub fn poll_recv(&self, cx: &mut Context<'_>) -> Poll<Option<T>>
-    where
-        R: Recycle<T>,
-    {
-        self.poll_recv_ref(cx)
-            .map(|opt| opt.map(|mut r| recycling::take(&mut *r, self.recycle)))
-    }
+impl_recv_ref! {
+    pub struct RecvRef<Waker>;
+}
 
-    pub fn is_closed(&self) -> bool {
-        test_dbg!(self.core.tx_count.load(Ordering::SeqCst)) <= 1
-    }
+/// A [`Future`] that tries to receive a reference from a [`Receiver`].
+///
+/// This type is returned by [`Receiver::recv_ref`].
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct RecvRefFuture<'a, T> {
+    core: &'a ChannelCore<Waker>,
+    slots: &'a [Slot<T>],
 }
 
-impl<T, R> Drop for StaticReceiver<T, R> {
-    fn drop(&mut self) {
-        self.core.close_rx();
-    }
+/// A [`Future`] that tries to receive a value from a [`Receiver`].
+///
+/// This type is returned by [`Receiver::recv`].
+///
+/// This is equivalent to the [`RecvRefFuture`] future, but the value is moved out of
+/// the [`ThingBuf`] after it is received. This means that allocations are not
+/// reused.
+///
+/// [`ThingBuf`]: crate::ThingBuf
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct RecvFuture<'a, T, R = recycling::DefaultRecycle> {
+    core: &'a ChannelCore<Waker>,
+    slots: &'a [Slot<T>],
+    recycle: &'a R,
 }
 
-impl<T, R: fmt::Debug> fmt::Debug for StaticReceiver<T, R> {
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        f.debug_struct("StaticReceiver")
-            .field("core", &self.core)
-            .field("slots", &format_args!("&[..]"))
-            .field("recycle", &self.recycle)
-            .finish()
-    }
+#[pin_project::pin_project(PinnedDrop)]
+struct SendRefFuture<'sender, T, R> {
+    core: &'sender ChannelCore<Waker>,
+    slots: &'sender [Slot<T>],
+    recycle: &'sender R,
+    state: State,
+    #[pin]
+    waiter: queue::Waiter<Waker>,
+}
+
+#[derive(Debug, Copy, Clone, Eq, PartialEq)]
+enum State {
+    Start,
+    Waiting,
+    Done,
 }
 
 // === impl RecvRefFuture ===
@@ -650,25 +664,6 @@ impl<T, R> PinnedDrop for SendRefFuture<'_, T, R> {
     }
 }
 
-feature! {
-    #![feature = "alloc"]
-    impl<T, R: fmt::Debug> fmt::Debug for Inner<T, R> {
-        fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-            f.debug_struct("Inner")
-                .field("core", &self.core)
-                .field("slots", &format_args!("Box<[..]>"))
-                .field("recycle", &self.recycle)
-                .finish()
-        }
-    }
-
-    impl<T, R> Drop for Inner<T, R> {
-        fn drop(&mut self) {
-            self.core.core.drop_slots(&mut self.slots[..])
-        }
-    }
-}
-
 #[cfg(feature = "alloc")]
 #[cfg(test)]
 mod tests {

+ 227 - 222
src/mpsc/sync.rs

@@ -6,7 +6,7 @@
 use super::*;
 use crate::{
     loom::{
-        atomic::{self, AtomicBool, Ordering},
+        atomic::{self, Ordering},
         sync::Arc,
         thread::{self, Thread},
     },
@@ -53,83 +53,19 @@ pub struct Receiver<T, R = recycling::DefaultRecycle> {
     inner: Arc<Inner<T, R>>,
 }
 
-/// A statically-allocated, blocking bounded MPSC channel.
-///
-/// A statically-allocated channel allows using a MPSC channel without
-/// requiring _any_ heap allocations. The [asynchronous variant][async] may be
-/// used in `#![no_std]` environments without requiring `liballoc`. This is a
-/// synchronous version which requires the Rust standard library, because it
-/// blocks the current thread in order to wait for send capacity. However, in
-/// some cases, it may offer _very slightly_ better performance than the
-/// non-static blocking channel due to requiring fewer heap pointer
-/// dereferences.
-///
-/// In order to use a statically-allocated channel, a `StaticChannel` must
-/// be constructed in a `static` initializer. This reserves storage for the
-/// channel's message queue at compile-time. Then, at runtime, the channel
-/// is [`split`] into a [`StaticSender`]/[`StaticReceiver`] pair in order to
-/// be used.
-///
-/// # Examples
-///
-/// ```
-/// use thingbuf::mpsc::StaticChannel;
-///
-/// // Construct a statically-allocated channel of `usize`s with a capacity
-/// // of 16 messages.
-/// static MY_CHANNEL: StaticChannel<usize, 16> = StaticChannel::new();
-///
-/// fn main() {
-///     // Split the `StaticChannel` into a sender-receiver pair.
-///     let (tx, rx) = MY_CHANNEL.split();
-///
-///     // Now, `tx` and `rx` can be used just like any other async MPSC
-///     // channel...
-/// # drop(tx); drop(rx);
-/// }
-/// ```
-///
-/// [async]: crate::mpsc::StaticChannel
-/// [`split`]: StaticChannel::split
-#[cfg_attr(all(loom, test), allow(dead_code))]
-pub struct StaticChannel<T, const CAPACITY: usize, R = recycling::DefaultRecycle> {
-    core: ChannelCore<Thread>,
-    slots: [Slot<T>; CAPACITY],
-    is_split: AtomicBool,
-    recycle: R,
-}
-
-pub struct StaticSender<T: 'static, R: 'static = recycling::DefaultRecycle> {
-    core: &'static ChannelCore<Thread>,
-    slots: &'static [Slot<T>],
-    recycle: &'static R,
-}
-
-pub struct StaticReceiver<T: 'static, R: 'static = recycling::DefaultRecycle> {
-    core: &'static ChannelCore<Thread>,
-    slots: &'static [Slot<T>],
-    recycle: &'static R,
-}
-
 struct Inner<T, R> {
     core: super::ChannelCore<Thread>,
     slots: Box<[Slot<T>]>,
     recycle: R,
 }
 
-impl_send_ref! {
-    pub struct SendRef<Thread>;
-}
-
-impl_recv_ref! {
-    pub struct RecvRef<Thread>;
-}
+#[cfg(not(all(loom, test)))]
+feature! {
+    #![feature = "static"]
 
-// === impl StaticChannel ===
+    use crate::loom::atomic::AtomicBool;
 
-#[cfg(not(all(loom, test)))]
-impl<T, const CAPACITY: usize> StaticChannel<T, CAPACITY> {
-    /// Constructs a new statically-allocated, blocking bounded MPSC channel.
+    /// A statically-allocated, blocking bounded MPSC channel.
     ///
     /// A statically-allocated channel allows using a MPSC channel without
     /// requiring _any_ heap allocations. The [asynchronous variant][async] may be
@@ -167,169 +103,260 @@ impl<T, const CAPACITY: usize> StaticChannel<T, CAPACITY> {
     ///
     /// [async]: crate::mpsc::StaticChannel
     /// [`split`]: StaticChannel::split
-    pub const fn new() -> Self {
-        Self {
-            core: ChannelCore::new(CAPACITY),
-            slots: Slot::make_static_array::<CAPACITY>(),
-            is_split: AtomicBool::new(false),
-            recycle: recycling::DefaultRecycle::new(),
+    pub struct StaticChannel<T, const CAPACITY: usize, R = recycling::DefaultRecycle> {
+        core: ChannelCore<Thread>,
+        slots: [Slot<T>; CAPACITY],
+        is_split: AtomicBool,
+        recycle: R,
+    }
+
+    pub struct StaticSender<T: 'static, R: 'static = recycling::DefaultRecycle> {
+        core: &'static ChannelCore<Thread>,
+        slots: &'static [Slot<T>],
+        recycle: &'static R,
+    }
+
+    pub struct StaticReceiver<T: 'static, R: 'static = recycling::DefaultRecycle> {
+        core: &'static ChannelCore<Thread>,
+        slots: &'static [Slot<T>],
+        recycle: &'static R,
+    }
+
+    // === impl StaticChannel ===
+
+    impl<T, const CAPACITY: usize> StaticChannel<T, CAPACITY> {
+        /// Constructs a new statically-allocated, blocking bounded MPSC channel.
+        ///
+        /// A statically-allocated channel allows using a MPSC channel without
+        /// requiring _any_ heap allocations. The [asynchronous variant][async] may be
+        /// used in `#![no_std]` environments without requiring `liballoc`. This is a
+        /// synchronous version which requires the Rust standard library, because it
+        /// blocks the current thread in order to wait for send capacity. However, in
+        /// some cases, it may offer _very slightly_ better performance than the
+        /// non-static blocking channel due to requiring fewer heap pointer
+        /// dereferences.
+        ///
+        /// In order to use a statically-allocated channel, a `StaticChannel` must
+        /// be constructed in a `static` initializer. This reserves storage for the
+        /// channel's message queue at compile-time. Then, at runtime, the channel
+        /// is [`split`] into a [`StaticSender`]/[`StaticReceiver`] pair in order to
+        /// be used.
+        ///
+        /// # Examples
+        ///
+        /// ```
+        /// use thingbuf::mpsc::StaticChannel;
+        ///
+        /// // Construct a statically-allocated channel of `usize`s with a capacity
+        /// // of 16 messages.
+        /// static MY_CHANNEL: StaticChannel<usize, 16> = StaticChannel::new();
+        ///
+        /// fn main() {
+        ///     // Split the `StaticChannel` into a sender-receiver pair.
+        ///     let (tx, rx) = MY_CHANNEL.split();
+        ///
+        ///     // Now, `tx` and `rx` can be used just like any other async MPSC
+        ///     // channel...
+        /// # drop(tx); drop(rx);
+        /// }
+        /// ```
+        ///
+        /// [async]: crate::mpsc::StaticChannel
+        /// [`split`]: StaticChannel::split
+        pub const fn new() -> Self {
+            Self {
+                core: ChannelCore::new(CAPACITY),
+                slots: Slot::make_static_array::<CAPACITY>(),
+                is_split: AtomicBool::new(false),
+                recycle: recycling::DefaultRecycle::new(),
+            }
         }
     }
-}
 
-impl<T, R, const CAPACITY: usize> StaticChannel<T, CAPACITY, R> {
-    /// Split a [`StaticChannel`] into a [`StaticSender`]/[`StaticReceiver`]
-    /// pair.
-    ///
-    /// A static channel can only be split a single time. If
-    /// [`StaticChannel::split`] or [`StaticChannel::try_split`] have been
-    /// called previously, this method will panic. For a non-panicking version
-    /// of this method, see [`StaticChannel::try_split`].
-    ///
-    /// # Panics
-    ///
-    /// If the channel has already been split.
-    pub fn split(&'static self) -> (StaticSender<T, R>, StaticReceiver<T, R>) {
-        self.try_split().expect("channel already split")
-    }
+    impl<T, R, const CAPACITY: usize> StaticChannel<T, CAPACITY, R> {
+        /// Split a [`StaticChannel`] into a [`StaticSender`]/[`StaticReceiver`]
+        /// pair.
+        ///
+        /// A static channel can only be split a single time. If
+        /// [`StaticChannel::split`] or [`StaticChannel::try_split`] have been
+        /// called previously, this method will panic. For a non-panicking version
+        /// of this method, see [`StaticChannel::try_split`].
+        ///
+        /// # Panics
+        ///
+        /// If the channel has already been split.
+        pub fn split(&'static self) -> (StaticSender<T, R>, StaticReceiver<T, R>) {
+            self.try_split().expect("channel already split")
+        }
 
-    /// Try to split a [`StaticChannel`] into a [`StaticSender`]/[`StaticReceiver`]
-    /// pair, returning `None` if it has already been split.
-    ///
-    /// A static channel can only be split a single time. If
-    /// [`StaticChannel::split`] or [`StaticChannel::try_split`] have been
-    /// called previously, this method returns `None`.
-    pub fn try_split(&'static self) -> Option<(StaticSender<T, R>, StaticReceiver<T, R>)> {
-        self.is_split
-            .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
-            .ok()?;
-        let tx = StaticSender {
-            core: &self.core,
-            slots: &self.slots[..],
-            recycle: &self.recycle,
-        };
-        let rx = StaticReceiver {
-            core: &self.core,
-            slots: &self.slots[..],
-            recycle: &self.recycle,
-        };
-        Some((tx, rx))
+        /// Try to split a [`StaticChannel`] into a [`StaticSender`]/[`StaticReceiver`]
+        /// pair, returning `None` if it has already been split.
+        ///
+        /// A static channel can only be split a single time. If
+        /// [`StaticChannel::split`] or [`StaticChannel::try_split`] have been
+        /// called previously, this method returns `None`.
+        pub fn try_split(&'static self) -> Option<(StaticSender<T, R>, StaticReceiver<T, R>)> {
+            self.is_split
+                .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
+                .ok()?;
+            let tx = StaticSender {
+                core: &self.core,
+                slots: &self.slots[..],
+                recycle: &self.recycle,
+            };
+            let rx = StaticReceiver {
+                core: &self.core,
+                slots: &self.slots[..],
+                recycle: &self.recycle,
+            };
+            Some((tx, rx))
+        }
     }
-}
 
-// === impl Sender ===
+    // === impl StaticSender ===
 
-impl<T, R> Sender<T, R>
-where
-    R: Recycle<T>,
-{
-    pub fn try_send_ref(&self) -> Result<SendRef<'_, T>, TrySendError> {
-        self.inner
-            .core
-            .try_send_ref(self.inner.slots.as_ref(), &self.inner.recycle)
-            .map(SendRef)
-    }
+    impl<T, R> StaticSender<T, R>
+    where
+        R: Recycle<T>,
+    {
+        pub fn try_send_ref(&self) -> Result<SendRef<'_, T>, TrySendError> {
+            self.core
+                .try_send_ref(self.slots, self.recycle)
+                .map(SendRef)
+        }
 
-    pub fn try_send(&self, val: T) -> Result<(), TrySendError<T>> {
-        self.inner
-            .core
-            .try_send(self.inner.slots.as_ref(), val, &self.inner.recycle)
+        pub fn try_send(&self, val: T) -> Result<(), TrySendError<T>> {
+            self.core.try_send(self.slots, val, self.recycle)
+        }
+
+        pub fn send_ref(&self) -> Result<SendRef<'_, T>, Closed> {
+            send_ref(self.core, self.slots, self.recycle)
+        }
+
+        pub fn send(&self, val: T) -> Result<(), Closed<T>> {
+            match self.send_ref() {
+                Err(Closed(())) => Err(Closed(val)),
+                Ok(mut slot) => {
+                    slot.with_mut(|slot| *slot = val);
+                    Ok(())
+                }
+            }
+        }
     }
 
-    pub fn send_ref(&self) -> Result<SendRef<'_, T>, Closed> {
-        send_ref(
-            &self.inner.core,
-            self.inner.slots.as_ref(),
-            &self.inner.recycle,
-        )
+    impl<T, R> Clone for StaticSender<T, R> {
+        fn clone(&self) -> Self {
+            test_dbg!(self.core.tx_count.fetch_add(1, Ordering::Relaxed));
+            Self {
+                core: self.core,
+                slots: self.slots,
+                recycle: self.recycle,
+            }
+        }
     }
 
-    pub fn send(&self, val: T) -> Result<(), Closed<T>> {
-        match self.send_ref() {
-            Err(Closed(())) => Err(Closed(val)),
-            Ok(mut slot) => {
-                slot.with_mut(|slot| *slot = val);
-                Ok(())
+    impl<T, R> Drop for StaticSender<T, R> {
+        fn drop(&mut self) {
+            if test_dbg!(self.core.tx_count.fetch_sub(1, Ordering::Release)) > 1 {
+                return;
+            }
+
+            // if we are the last sender, synchronize
+            test_dbg!(atomic::fence(Ordering::SeqCst));
+            if self.core.core.close() {
+                self.core.rx_wait.close_tx();
             }
         }
     }
-}
 
-impl<T, R> Clone for Sender<T, R> {
-    fn clone(&self) -> Self {
-        test_dbg!(self.inner.core.tx_count.fetch_add(1, Ordering::Relaxed));
-        Self {
-            inner: self.inner.clone(),
+    impl<T, R: fmt::Debug> fmt::Debug for StaticSender<T, R> {
+        fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+            f.debug_struct("StaticSender")
+                .field("core", &self.core)
+                .field("slots", &format_args!("&[..]"))
+                .field("recycle", &self.recycle)
+                .finish()
         }
     }
-}
 
-impl<T, R> Drop for Sender<T, R> {
-    fn drop(&mut self) {
-        if test_dbg!(self.inner.core.tx_count.fetch_sub(1, Ordering::Release)) > 1 {
-            return;
+    // === impl StaticReceiver ===
+
+    impl<T, R> StaticReceiver<T, R> {
+        pub fn recv_ref(&self) -> Option<RecvRef<'_, T>> {
+            recv_ref(self.core, self.slots)
         }
 
-        // if we are the last sender, synchronize
-        test_dbg!(atomic::fence(Ordering::SeqCst));
-        if self.inner.core.core.close() {
-            self.inner.core.rx_wait.close_tx();
+        pub fn recv(&self) -> Option<T>
+        where
+            R: Recycle<T>,
+        {
+            let mut val = self.recv_ref()?;
+            Some(recycling::take(&mut *val, self.recycle))
+        }
+
+        pub fn is_closed(&self) -> bool {
+            test_dbg!(self.core.tx_count.load(Ordering::SeqCst)) <= 1
         }
     }
-}
 
-// === impl Receiver ===
+    impl<'a, T, R> Iterator for &'a StaticReceiver<T, R> {
+        type Item = RecvRef<'a, T>;
 
-impl<T, R> Receiver<T, R> {
-    pub fn recv_ref(&self) -> Option<RecvRef<'_, T>> {
-        recv_ref(&self.inner.core, self.inner.slots.as_ref())
+        fn next(&mut self) -> Option<Self::Item> {
+            self.recv_ref()
+        }
     }
 
-    pub fn recv(&self) -> Option<T>
-    where
-        R: Recycle<T>,
-    {
-        let mut val = self.recv_ref()?;
-        Some(recycling::take(&mut *val, &self.inner.recycle))
+    impl<T, R> Drop for StaticReceiver<T, R> {
+        fn drop(&mut self) {
+            self.core.close_rx();
+        }
     }
 
-    pub fn is_closed(&self) -> bool {
-        test_dbg!(self.inner.core.tx_count.load(Ordering::SeqCst)) <= 1
+    impl<T, R: fmt::Debug> fmt::Debug for StaticReceiver<T, R> {
+        fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+            f.debug_struct("StaticReceiver")
+                .field("core", &self.core)
+                .field("slots", &format_args!("&[..]"))
+                .field("recycle", &self.recycle)
+                .finish()
+        }
     }
 }
 
-impl<'a, T, R> Iterator for &'a Receiver<T, R> {
-    type Item = RecvRef<'a, T>;
-
-    fn next(&mut self) -> Option<Self::Item> {
-        self.recv_ref()
-    }
+impl_send_ref! {
+    pub struct SendRef<Thread>;
 }
 
-impl<T, R> Drop for Receiver<T, R> {
-    fn drop(&mut self) {
-        self.inner.core.close_rx();
-    }
+impl_recv_ref! {
+    pub struct RecvRef<Thread>;
 }
 
-// === impl StaticSender ===
+// === impl Sender ===
 
-impl<T, R> StaticSender<T, R>
+impl<T, R> Sender<T, R>
 where
     R: Recycle<T>,
 {
     pub fn try_send_ref(&self) -> Result<SendRef<'_, T>, TrySendError> {
-        self.core
-            .try_send_ref(self.slots, self.recycle)
+        self.inner
+            .core
+            .try_send_ref(self.inner.slots.as_ref(), &self.inner.recycle)
             .map(SendRef)
     }
 
     pub fn try_send(&self, val: T) -> Result<(), TrySendError<T>> {
-        self.core.try_send(self.slots, val, self.recycle)
+        self.inner
+            .core
+            .try_send(self.inner.slots.as_ref(), val, &self.inner.recycle)
     }
 
     pub fn send_ref(&self) -> Result<SendRef<'_, T>, Closed> {
-        send_ref(self.core, self.slots, self.recycle)
+        send_ref(
+            &self.inner.core,
+            self.inner.slots.as_ref(),
+            &self.inner.recycle,
+        )
     }
 
     pub fn send(&self, val: T) -> Result<(), Closed<T>> {
@@ -343,46 +370,34 @@ where
     }
 }
 
-impl<T, R> Clone for StaticSender<T, R> {
+impl<T, R> Clone for Sender<T, R> {
     fn clone(&self) -> Self {
-        test_dbg!(self.core.tx_count.fetch_add(1, Ordering::Relaxed));
+        test_dbg!(self.inner.core.tx_count.fetch_add(1, Ordering::Relaxed));
         Self {
-            core: self.core,
-            slots: self.slots,
-            recycle: self.recycle,
+            inner: self.inner.clone(),
         }
     }
 }
 
-impl<T, R> Drop for StaticSender<T, R> {
+impl<T, R> Drop for Sender<T, R> {
     fn drop(&mut self) {
-        if test_dbg!(self.core.tx_count.fetch_sub(1, Ordering::Release)) > 1 {
+        if test_dbg!(self.inner.core.tx_count.fetch_sub(1, Ordering::Release)) > 1 {
             return;
         }
 
         // if we are the last sender, synchronize
         test_dbg!(atomic::fence(Ordering::SeqCst));
-        if self.core.core.close() {
-            self.core.rx_wait.close_tx();
+        if self.inner.core.core.close() {
+            self.inner.core.rx_wait.close_tx();
         }
     }
 }
 
-impl<T, R: fmt::Debug> fmt::Debug for StaticSender<T, R> {
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        f.debug_struct("StaticSender")
-            .field("core", &self.core)
-            .field("slots", &format_args!("&[..]"))
-            .field("recycle", &self.recycle)
-            .finish()
-    }
-}
-
-// === impl StaticReceiver ===
+// === impl Receiver ===
 
-impl<T, R> StaticReceiver<T, R> {
+impl<T, R> Receiver<T, R> {
     pub fn recv_ref(&self) -> Option<RecvRef<'_, T>> {
-        recv_ref(self.core, self.slots)
+        recv_ref(&self.inner.core, self.inner.slots.as_ref())
     }
 
     pub fn recv(&self) -> Option<T>
@@ -390,15 +405,15 @@ impl<T, R> StaticReceiver<T, R> {
         R: Recycle<T>,
     {
         let mut val = self.recv_ref()?;
-        Some(recycling::take(&mut *val, self.recycle))
+        Some(recycling::take(&mut *val, &self.inner.recycle))
     }
 
     pub fn is_closed(&self) -> bool {
-        test_dbg!(self.core.tx_count.load(Ordering::SeqCst)) <= 1
+        test_dbg!(self.inner.core.tx_count.load(Ordering::SeqCst)) <= 1
     }
 }
 
-impl<'a, T, R> Iterator for &'a StaticReceiver<T, R> {
+impl<'a, T, R> Iterator for &'a Receiver<T, R> {
     type Item = RecvRef<'a, T>;
 
     fn next(&mut self) -> Option<Self::Item> {
@@ -406,19 +421,9 @@ impl<'a, T, R> Iterator for &'a StaticReceiver<T, R> {
     }
 }
 
-impl<T, R> Drop for StaticReceiver<T, R> {
+impl<T, R> Drop for Receiver<T, R> {
     fn drop(&mut self) {
-        self.core.close_rx();
-    }
-}
-
-impl<T, R: fmt::Debug> fmt::Debug for StaticReceiver<T, R> {
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        f.debug_struct("StaticReceiver")
-            .field("core", &self.core)
-            .field("slots", &format_args!("&[..]"))
-            .field("recycle", &self.recycle)
-            .finish()
+        self.inner.core.close_rx();
     }
 }
 

+ 1 - 0
src/static_thingbuf.rs

@@ -191,6 +191,7 @@ use core::fmt;
 /// [`ThingBuf`]: crate::ThingBuf
 /// [vyukov]: https://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue
 /// [object pool]: https://en.wikipedia.org/wiki/Object_pool_pattern
+#[cfg_attr(docsrs, doc(cfg(feature = "static")))]
 pub struct StaticThingBuf<T, const CAP: usize, R = recycling::DefaultRecycle> {
     core: Core,
     recycle: R,

+ 1 - 0
tests/static_storage.rs

@@ -1,3 +1,4 @@
+#![cfg(feature = "static")]
 use std::{
     fmt::Write,
     sync::atomic::{AtomicBool, Ordering},