diff --git a/bbqtest/src/benches.rs b/bbqtest/src/benches.rs index 181a3c3..e6a4a7f 100644 --- a/bbqtest/src/benches.rs +++ b/bbqtest/src/benches.rs @@ -1,4 +1,4 @@ -use bbqueue::BBBuffer; +use bbqueue::BBQueue; use criterion::{black_box, criterion_group, criterion_main, Criterion}; use std::cmp::min; @@ -17,7 +17,7 @@ pub fn criterion_benchmark(c: &mut Criterion) { c.bench_function("bbq 2048/4096", |bench| bench.iter(|| chunky(&data, 2048))); - let buffy: BBBuffer = BBBuffer::new(); + let buffy: BBQueue = BBQueue::new(); let (mut prod, mut cons) = buffy.try_split().unwrap(); c.bench_function("bbq 8192/65536", |bench| { @@ -196,7 +196,7 @@ pub fn criterion_benchmark(c: &mut Criterion) { use crossbeam_utils::thread; fn chunky(data: &[u8], chunksz: usize) { - let buffy: BBBuffer = BBBuffer::new(); + let buffy: BBQueue = BBQueue::new(); let (mut prod, mut cons) = buffy.try_split().unwrap(); thread::scope(|sc| { diff --git a/bbqtest/src/framed.rs b/bbqtest/src/framed.rs index 10ea8fa..810dd08 100644 --- a/bbqtest/src/framed.rs +++ b/bbqtest/src/framed.rs @@ -1,10 +1,10 @@ #[cfg(test)] mod tests { - use bbqueue::BBBuffer; + use bbqueue::BBQueue; #[test] fn frame_wrong_size() { - let bb: BBBuffer<256> = BBBuffer::new(); + let bb: BBQueue<256> = BBQueue::new(); let (mut prod, mut cons) = bb.try_split_framed().unwrap(); // Create largeish grants @@ -25,7 +25,7 @@ mod tests { #[test] fn full_size() { - let bb: BBBuffer<256> = BBBuffer::new(); + let bb: BBQueue<256> = BBQueue::new(); let (mut prod, mut cons) = bb.try_split_framed().unwrap(); let mut ctr = 0; @@ -66,7 +66,7 @@ mod tests { #[test] fn frame_overcommit() { - let bb: BBBuffer<256> = BBBuffer::new(); + let bb: BBQueue<256> = BBQueue::new(); let (mut prod, mut cons) = bb.try_split_framed().unwrap(); // Create largeish grants @@ -93,7 +93,7 @@ mod tests { #[test] fn frame_undercommit() { - let bb: BBBuffer<512> = BBBuffer::new(); + let bb: BBQueue<512> = BBQueue::new(); let (mut prod, mut cons) = bb.try_split_framed().unwrap(); for _ in 0..100_000 { @@ -132,7 +132,7 @@ mod tests { #[test] fn frame_auto_commit_release() { - let bb: BBBuffer<256> = BBBuffer::new(); + let bb: BBQueue<256> = BBQueue::new(); let (mut prod, mut cons) = bb.try_split_framed().unwrap(); for _ in 0..100 { diff --git a/bbqtest/src/lib.rs b/bbqtest/src/lib.rs index aff1c2b..c0f5b32 100644 --- a/bbqtest/src/lib.rs +++ b/bbqtest/src/lib.rs @@ -1,18 +1,19 @@ //! NOTE: this crate is really just a shim for testing //! the other no-std crate. -mod framed; -mod multi_thread; -mod ring_around_the_senders; -mod single_thread; +// mod framed; +// mod multi_thread; +// mod ring_around_the_senders; +// mod single_thread; #[cfg(test)] mod tests { - use bbqueue::{BBBuffer, Error as BBQError}; + use bbqueue::{BBQueue, Error as BBQError, OwnedBBBuffer as Obbb}; #[test] fn deref_deref_mut() { - let bb: BBBuffer<6> = BBBuffer::new(); + let bb: BBQueue> = BBQueue::new(Obbb::new()); + let (mut prod, mut cons) = bb.try_split().unwrap(); let mut wgr = prod.grant_exact(1).unwrap(); @@ -35,8 +36,8 @@ mod tests { #[test] fn static_allocator() { // Check we can make multiple static items... - static BBQ1: BBBuffer<6> = BBBuffer::new(); - static BBQ2: BBBuffer<6> = BBBuffer::new(); + static BBQ1: BBQueue> = BBQueue::new(Obbb::new()); + static BBQ2: BBQueue> = BBQueue::new(Obbb::new()); let (mut prod1, mut cons1) = BBQ1.try_split().unwrap(); let (mut _prod2, mut cons2) = BBQ2.try_split().unwrap(); @@ -53,346 +54,346 @@ mod tests { assert_eq!(&*rgr1, &[1, 2, 3]); } - #[test] - fn release() { - // Check we can make multiple static items... - static BBQ1: BBBuffer<6> = BBBuffer::new(); - static BBQ2: BBBuffer<6> = BBBuffer::new(); - let (prod1, cons1) = BBQ1.try_split().unwrap(); - let (prod2, cons2) = BBQ2.try_split().unwrap(); - - // We cannot release with the wrong prod/cons - let (prod2, cons2) = BBQ1.try_release(prod2, cons2).unwrap_err(); - let (prod1, cons1) = BBQ2.try_release(prod1, cons1).unwrap_err(); - - // We cannot release with the wrong consumer... - let (prod1, cons2) = BBQ1.try_release(prod1, cons2).unwrap_err(); - - // ...or the wrong producer - let (prod2, cons1) = BBQ1.try_release(prod2, cons1).unwrap_err(); - - // We cannot release with a write grant in progress - let mut prod1 = prod1; - let wgr1 = prod1.grant_exact(3).unwrap(); - let (prod1, mut cons1) = BBQ1.try_release(prod1, cons1).unwrap_err(); - - // We cannot release with a read grant in progress - wgr1.commit(3); - let rgr1 = cons1.read().unwrap(); - let (prod1, cons1) = BBQ1.try_release(prod1, cons1).unwrap_err(); - - // But we can when everything is resolved - rgr1.release(3); - assert!(BBQ1.try_release(prod1, cons1).is_ok()); - assert!(BBQ2.try_release(prod2, cons2).is_ok()); - - // And we can re-split on-demand - let _ = BBQ1.try_split().unwrap(); - let _ = BBQ2.try_split().unwrap(); - } - - #[test] - fn direct_usage_sanity() { - // Initialize - let bb: BBBuffer<6> = BBBuffer::new(); - let (mut prod, mut cons) = bb.try_split().unwrap(); - assert_eq!(cons.read(), Err(BBQError::InsufficientSize)); - - // Initial grant, shouldn't roll over - let mut x = prod.grant_exact(4).unwrap(); - - // Still no data available yet - assert_eq!(cons.read(), Err(BBQError::InsufficientSize)); - - // Add full data from grant - x.copy_from_slice(&[1, 2, 3, 4]); - - // Still no data available yet - assert_eq!(cons.read(), Err(BBQError::InsufficientSize)); - - // Commit data - x.commit(4); - - ::std::sync::atomic::fence(std::sync::atomic::Ordering::SeqCst); - - let a = cons.read().unwrap(); - assert_eq!(&*a, &[1, 2, 3, 4]); - - // Release the first two bytes - a.release(2); - - let r = cons.read().unwrap(); - assert_eq!(&*r, &[3, 4]); - r.release(0); + // #[test] + // fn release() { + // // Check we can make multiple static items... + // static BBQ1: BBQueue<6> = BBQueue::new(); + // static BBQ2: BBQueue<6> = BBQueue::new(); + // let (prod1, cons1) = BBQ1.try_split().unwrap(); + // let (prod2, cons2) = BBQ2.try_split().unwrap(); - // Grant two more - let mut x = prod.grant_exact(2).unwrap(); - let r = cons.read().unwrap(); - assert_eq!(&*r, &[3, 4]); - r.release(0); + // // We cannot release with the wrong prod/cons + // let (prod2, cons2) = BBQ1.try_release(prod2, cons2).unwrap_err(); + // let (prod1, cons1) = BBQ2.try_release(prod1, cons1).unwrap_err(); - // Add more data - x.copy_from_slice(&[11, 12]); - let r = cons.read().unwrap(); - assert_eq!(&*r, &[3, 4]); - r.release(0); + // // We cannot release with the wrong consumer... + // let (prod1, cons2) = BBQ1.try_release(prod1, cons2).unwrap_err(); - // Commit - x.commit(2); + // // ...or the wrong producer + // let (prod2, cons1) = BBQ1.try_release(prod2, cons1).unwrap_err(); - let a = cons.read().unwrap(); - assert_eq!(&*a, &[3, 4, 11, 12]); + // // We cannot release with a write grant in progress + // let mut prod1 = prod1; + // let wgr1 = prod1.grant_exact(3).unwrap(); + // let (prod1, mut cons1) = BBQ1.try_release(prod1, cons1).unwrap_err(); - a.release(2); - let r = cons.read().unwrap(); - assert_eq!(&*r, &[11, 12]); - r.release(0); + // // We cannot release with a read grant in progress + // wgr1.commit(3); + // let rgr1 = cons1.read().unwrap(); + // let (prod1, cons1) = BBQ1.try_release(prod1, cons1).unwrap_err(); - let mut x = prod.grant_exact(3).unwrap(); - let r = cons.read().unwrap(); - assert_eq!(&*r, &[11, 12]); - r.release(0); + // // But we can when everything is resolved + // rgr1.release(3); + // assert!(BBQ1.try_release(prod1, cons1).is_ok()); + // assert!(BBQ2.try_release(prod2, cons2).is_ok()); - x.copy_from_slice(&[21, 22, 23]); + // // And we can re-split on-demand + // let _ = BBQ1.try_split().unwrap(); + // let _ = BBQ2.try_split().unwrap(); + // } - let r = cons.read().unwrap(); - assert_eq!(&*r, &[11, 12]); - r.release(0); - x.commit(3); + // #[test] + // fn direct_usage_sanity() { + // // Initialize + // let bb: BBQueue<6> = BBQueue::new(); + // let (mut prod, mut cons) = bb.try_split().unwrap(); + // assert_eq!(cons.read(), Err(BBQError::InsufficientSize)); - let a = cons.read().unwrap(); - - // NOTE: The data we just added isn't available yet, - // since it has wrapped around - assert_eq!(&*a, &[11, 12]); - - a.release(2); - - // And now we can see it - let r = cons.read().unwrap(); - assert_eq!(&*r, &[21, 22, 23]); - r.release(0); - - // Ask for something way too big - assert!(prod.grant_exact(10).is_err()); - } - - #[test] - fn zero_sized_grant() { - let bb: BBBuffer<1000> = BBBuffer::new(); - let (mut prod, mut _cons) = bb.try_split().unwrap(); - - let size = 1000; - let grant = prod.grant_exact(size).unwrap(); - grant.commit(size); - - let grant = prod.grant_exact(0).unwrap(); - grant.commit(0); - } - - #[test] - fn frame_sanity() { - let bb: BBBuffer<1000> = BBBuffer::new(); - let (mut prod, mut cons) = bb.try_split_framed().unwrap(); - - // One frame in, one frame out - let mut wgrant = prod.grant(128).unwrap(); - assert_eq!(wgrant.len(), 128); - for (idx, i) in wgrant.iter_mut().enumerate() { - *i = idx as u8; - } - wgrant.commit(128); - - let rgrant = cons.read().unwrap(); - assert_eq!(rgrant.len(), 128); - for (idx, i) in rgrant.iter().enumerate() { - assert_eq!(*i, idx as u8); - } - rgrant.release(); - - // Three frames in, three frames out - let mut state = 0; - let states = [16usize, 32, 24]; - - for step in &states { - let mut wgrant = prod.grant(*step).unwrap(); - assert_eq!(wgrant.len(), *step); - for (idx, i) in wgrant.iter_mut().enumerate() { - *i = (idx + state) as u8; - } - wgrant.commit(*step); - state += *step; - } - - state = 0; - - for step in &states { - let rgrant = cons.read().unwrap(); - assert_eq!(rgrant.len(), *step); - for (idx, i) in rgrant.iter().enumerate() { - assert_eq!(*i, (idx + state) as u8); - } - rgrant.release(); - state += *step; - } - } - - #[test] - fn frame_wrap() { - let bb: BBBuffer<22> = BBBuffer::new(); - let (mut prod, mut cons) = bb.try_split_framed().unwrap(); - - // 10 + 1 used - let mut wgrant = prod.grant(10).unwrap(); - assert_eq!(wgrant.len(), 10); - for (idx, i) in wgrant.iter_mut().enumerate() { - *i = idx as u8; - } - wgrant.commit(10); - // 1 frame in queue - - // 20 + 2 used (assuming u64 test platform) - let mut wgrant = prod.grant(10).unwrap(); - assert_eq!(wgrant.len(), 10); - for (idx, i) in wgrant.iter_mut().enumerate() { - *i = idx as u8; - } - wgrant.commit(10); - // 2 frames in queue - - let rgrant = cons.read().unwrap(); - assert_eq!(rgrant.len(), 10); - for (idx, i) in rgrant.iter().enumerate() { - assert_eq!(*i, idx as u8); - } - rgrant.release(); - // 1 frame in queue - - // No more room! - assert!(prod.grant(10).is_err()); - - let rgrant = cons.read().unwrap(); - assert_eq!(rgrant.len(), 10); - for (idx, i) in rgrant.iter().enumerate() { - assert_eq!(*i, idx as u8); - } - rgrant.release(); - // 0 frames in queue - - // 10 + 1 used (assuming u64 test platform) - let mut wgrant = prod.grant(10).unwrap(); - assert_eq!(wgrant.len(), 10); - for (idx, i) in wgrant.iter_mut().enumerate() { - *i = idx as u8; - } - wgrant.commit(10); - // 1 frame in queue - - // No more room! - assert!(prod.grant(10).is_err()); - - let rgrant = cons.read().unwrap(); - assert_eq!(rgrant.len(), 10); - for (idx, i) in rgrant.iter().enumerate() { - assert_eq!(*i, idx as u8); - } - rgrant.release(); - // 0 frames in queue - - // No more frames! - assert!(cons.read().is_none()); - } - - #[test] - fn frame_big_little() { - let bb: BBBuffer<65536> = BBBuffer::new(); - let (mut prod, mut cons) = bb.try_split_framed().unwrap(); - - // Create a frame that should take 3 bytes for the header - assert!(prod.grant(65534).is_err()); - - let mut wgrant = prod.grant(65533).unwrap(); - assert_eq!(wgrant.len(), 65533); - for (idx, i) in wgrant.iter_mut().enumerate() { - *i = idx as u8; - } - // Only commit 127 bytes, which fit into a header of 1 byte - wgrant.commit(127); - - let rgrant = cons.read().unwrap(); - assert_eq!(rgrant.len(), 127); - for (idx, i) in rgrant.iter().enumerate() { - assert_eq!(*i, idx as u8); - } - rgrant.release(); - } - - #[test] - fn split_sanity_check() { - let bb: BBBuffer<10> = BBBuffer::new(); - let (mut prod, mut cons) = bb.try_split().unwrap(); - - // Fill buffer - let mut wgrant = prod.grant_exact(10).unwrap(); - assert_eq!(wgrant.len(), 10); - wgrant.copy_from_slice(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]); - wgrant.commit(10); - - let rgrant = cons.split_read().unwrap(); - assert_eq!(rgrant.combined_len(), 10); - assert_eq!( - rgrant.bufs(), - (&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10][..], &[][..]) - ); - // Release part of the buffer - rgrant.release(6); - - // Almost fill buffer again => | 11 | 12 | 13 | 14 | 15 | x | 7 | 8 | 9 | 10 | - let mut wgrant = prod.grant_exact(5).unwrap(); - assert_eq!(wgrant.len(), 5); - wgrant.copy_from_slice(&[11, 12, 13, 14, 15]); - wgrant.commit(5); - - let rgrant = cons.split_read().unwrap(); - assert_eq!(rgrant.combined_len(), 9); - assert_eq!( - rgrant.bufs(), - (&[7, 8, 9, 10][..], &[11, 12, 13, 14, 15][..]) - ); - - // Release part of the buffer => | x | x | x | 14 | 15 | x | x | x | x | x | - rgrant.release(7); - - // Check that it is not possible to claim more space than what should be available - assert!(prod.grant_exact(6).is_err()); - - // Fill buffer to the end => | x | x | x | 14 | 15 | 21 | 22 | 23 | 24 | 25 | - let mut wgrant = prod.grant_exact(5).unwrap(); - wgrant.copy_from_slice(&[21, 22, 23, 24, 25]); - wgrant.commit(5); - - let rgrant = cons.split_read().unwrap(); - assert_eq!(rgrant.combined_len(), 7); - assert_eq!(rgrant.bufs(), (&[14, 15, 21, 22, 23, 24, 25][..], &[][..])); - rgrant.release(0); - - // Fill buffer to the end => | 26 | 27 | x | 14 | 15 | 21 | 22 | 23 | 24 | 25 | - let mut wgrant = prod.grant_exact(2).unwrap(); - wgrant.copy_from_slice(&[26, 27]); - wgrant.commit(2); - - // Fill buffer to the end => | x | 27 | x | x | x | x | x | x | x | x | - let rgrant = cons.split_read().unwrap(); - assert_eq!(rgrant.combined_len(), 9); - assert_eq!( - rgrant.bufs(), - (&[14, 15, 21, 22, 23, 24, 25][..], &[26, 27][..]) - ); - rgrant.release(8); - - let rgrant = cons.split_read().unwrap(); - assert_eq!(rgrant.combined_len(), 1); - assert_eq!(rgrant.bufs(), (&[27][..], &[][..])); - rgrant.release(1); - } + // // Initial grant, shouldn't roll over + // let mut x = prod.grant_exact(4).unwrap(); + + // // Still no data available yet + // assert_eq!(cons.read(), Err(BBQError::InsufficientSize)); + + // // Add full data from grant + // x.copy_from_slice(&[1, 2, 3, 4]); + + // // Still no data available yet + // assert_eq!(cons.read(), Err(BBQError::InsufficientSize)); + + // // Commit data + // x.commit(4); + + // ::std::sync::atomic::fence(std::sync::atomic::Ordering::SeqCst); + + // let a = cons.read().unwrap(); + // assert_eq!(&*a, &[1, 2, 3, 4]); + + // // Release the first two bytes + // a.release(2); + + // let r = cons.read().unwrap(); + // assert_eq!(&*r, &[3, 4]); + // r.release(0); + + // // Grant two more + // let mut x = prod.grant_exact(2).unwrap(); + // let r = cons.read().unwrap(); + // assert_eq!(&*r, &[3, 4]); + // r.release(0); + + // // Add more data + // x.copy_from_slice(&[11, 12]); + // let r = cons.read().unwrap(); + // assert_eq!(&*r, &[3, 4]); + // r.release(0); + + // // Commit + // x.commit(2); + + // let a = cons.read().unwrap(); + // assert_eq!(&*a, &[3, 4, 11, 12]); + + // a.release(2); + // let r = cons.read().unwrap(); + // assert_eq!(&*r, &[11, 12]); + // r.release(0); + + // let mut x = prod.grant_exact(3).unwrap(); + // let r = cons.read().unwrap(); + // assert_eq!(&*r, &[11, 12]); + // r.release(0); + + // x.copy_from_slice(&[21, 22, 23]); + + // let r = cons.read().unwrap(); + // assert_eq!(&*r, &[11, 12]); + // r.release(0); + // x.commit(3); + + // let a = cons.read().unwrap(); + + // // NOTE: The data we just added isn't available yet, + // // since it has wrapped around + // assert_eq!(&*a, &[11, 12]); + + // a.release(2); + + // // And now we can see it + // let r = cons.read().unwrap(); + // assert_eq!(&*r, &[21, 22, 23]); + // r.release(0); + + // // Ask for something way too big + // assert!(prod.grant_exact(10).is_err()); + // } + + // #[test] + // fn zero_sized_grant() { + // let bb: BBQueue<1000> = BBQueue::new(); + // let (mut prod, mut _cons) = bb.try_split().unwrap(); + + // let size = 1000; + // let grant = prod.grant_exact(size).unwrap(); + // grant.commit(size); + + // let grant = prod.grant_exact(0).unwrap(); + // grant.commit(0); + // } + + // #[test] + // fn frame_sanity() { + // let bb: BBQueue<1000> = BBQueue::new(); + // let (mut prod, mut cons) = bb.try_split_framed().unwrap(); + + // // One frame in, one frame out + // let mut wgrant = prod.grant(128).unwrap(); + // assert_eq!(wgrant.len(), 128); + // for (idx, i) in wgrant.iter_mut().enumerate() { + // *i = idx as u8; + // } + // wgrant.commit(128); + + // let rgrant = cons.read().unwrap(); + // assert_eq!(rgrant.len(), 128); + // for (idx, i) in rgrant.iter().enumerate() { + // assert_eq!(*i, idx as u8); + // } + // rgrant.release(); + + // // Three frames in, three frames out + // let mut state = 0; + // let states = [16usize, 32, 24]; + + // for step in &states { + // let mut wgrant = prod.grant(*step).unwrap(); + // assert_eq!(wgrant.len(), *step); + // for (idx, i) in wgrant.iter_mut().enumerate() { + // *i = (idx + state) as u8; + // } + // wgrant.commit(*step); + // state += *step; + // } + + // state = 0; + + // for step in &states { + // let rgrant = cons.read().unwrap(); + // assert_eq!(rgrant.len(), *step); + // for (idx, i) in rgrant.iter().enumerate() { + // assert_eq!(*i, (idx + state) as u8); + // } + // rgrant.release(); + // state += *step; + // } + // } + + // #[test] + // fn frame_wrap() { + // let bb: BBQueue<22> = BBQueue::new(); + // let (mut prod, mut cons) = bb.try_split_framed().unwrap(); + + // // 10 + 1 used + // let mut wgrant = prod.grant(10).unwrap(); + // assert_eq!(wgrant.len(), 10); + // for (idx, i) in wgrant.iter_mut().enumerate() { + // *i = idx as u8; + // } + // wgrant.commit(10); + // // 1 frame in queue + + // // 20 + 2 used (assuming u64 test platform) + // let mut wgrant = prod.grant(10).unwrap(); + // assert_eq!(wgrant.len(), 10); + // for (idx, i) in wgrant.iter_mut().enumerate() { + // *i = idx as u8; + // } + // wgrant.commit(10); + // // 2 frames in queue + + // let rgrant = cons.read().unwrap(); + // assert_eq!(rgrant.len(), 10); + // for (idx, i) in rgrant.iter().enumerate() { + // assert_eq!(*i, idx as u8); + // } + // rgrant.release(); + // // 1 frame in queue + + // // No more room! + // assert!(prod.grant(10).is_err()); + + // let rgrant = cons.read().unwrap(); + // assert_eq!(rgrant.len(), 10); + // for (idx, i) in rgrant.iter().enumerate() { + // assert_eq!(*i, idx as u8); + // } + // rgrant.release(); + // // 0 frames in queue + + // // 10 + 1 used (assuming u64 test platform) + // let mut wgrant = prod.grant(10).unwrap(); + // assert_eq!(wgrant.len(), 10); + // for (idx, i) in wgrant.iter_mut().enumerate() { + // *i = idx as u8; + // } + // wgrant.commit(10); + // // 1 frame in queue + + // // No more room! + // assert!(prod.grant(10).is_err()); + + // let rgrant = cons.read().unwrap(); + // assert_eq!(rgrant.len(), 10); + // for (idx, i) in rgrant.iter().enumerate() { + // assert_eq!(*i, idx as u8); + // } + // rgrant.release(); + // // 0 frames in queue + + // // No more frames! + // assert!(cons.read().is_none()); + // } + + // #[test] + // fn frame_big_little() { + // let bb: BBQueue<65536> = BBQueue::new(); + // let (mut prod, mut cons) = bb.try_split_framed().unwrap(); + + // // Create a frame that should take 3 bytes for the header + // assert!(prod.grant(65534).is_err()); + + // let mut wgrant = prod.grant(65533).unwrap(); + // assert_eq!(wgrant.len(), 65533); + // for (idx, i) in wgrant.iter_mut().enumerate() { + // *i = idx as u8; + // } + // // Only commit 127 bytes, which fit into a header of 1 byte + // wgrant.commit(127); + + // let rgrant = cons.read().unwrap(); + // assert_eq!(rgrant.len(), 127); + // for (idx, i) in rgrant.iter().enumerate() { + // assert_eq!(*i, idx as u8); + // } + // rgrant.release(); + // } + + // #[test] + // fn split_sanity_check() { + // let bb: BBQueue<10> = BBQueue::new(); + // let (mut prod, mut cons) = bb.try_split().unwrap(); + + // // Fill buffer + // let mut wgrant = prod.grant_exact(10).unwrap(); + // assert_eq!(wgrant.len(), 10); + // wgrant.copy_from_slice(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]); + // wgrant.commit(10); + + // let rgrant = cons.split_read().unwrap(); + // assert_eq!(rgrant.combined_len(), 10); + // assert_eq!( + // rgrant.bufs(), + // (&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10][..], &[][..]) + // ); + // // Release part of the buffer + // rgrant.release(6); + + // // Almost fill buffer again => | 11 | 12 | 13 | 14 | 15 | x | 7 | 8 | 9 | 10 | + // let mut wgrant = prod.grant_exact(5).unwrap(); + // assert_eq!(wgrant.len(), 5); + // wgrant.copy_from_slice(&[11, 12, 13, 14, 15]); + // wgrant.commit(5); + + // let rgrant = cons.split_read().unwrap(); + // assert_eq!(rgrant.combined_len(), 9); + // assert_eq!( + // rgrant.bufs(), + // (&[7, 8, 9, 10][..], &[11, 12, 13, 14, 15][..]) + // ); + + // // Release part of the buffer => | x | x | x | 14 | 15 | x | x | x | x | x | + // rgrant.release(7); + + // // Check that it is not possible to claim more space than what should be available + // assert!(prod.grant_exact(6).is_err()); + + // // Fill buffer to the end => | x | x | x | 14 | 15 | 21 | 22 | 23 | 24 | 25 | + // let mut wgrant = prod.grant_exact(5).unwrap(); + // wgrant.copy_from_slice(&[21, 22, 23, 24, 25]); + // wgrant.commit(5); + + // let rgrant = cons.split_read().unwrap(); + // assert_eq!(rgrant.combined_len(), 7); + // assert_eq!(rgrant.bufs(), (&[14, 15, 21, 22, 23, 24, 25][..], &[][..])); + // rgrant.release(0); + + // // Fill buffer to the end => | 26 | 27 | x | 14 | 15 | 21 | 22 | 23 | 24 | 25 | + // let mut wgrant = prod.grant_exact(2).unwrap(); + // wgrant.copy_from_slice(&[26, 27]); + // wgrant.commit(2); + + // // Fill buffer to the end => | x | 27 | x | x | x | x | x | x | x | x | + // let rgrant = cons.split_read().unwrap(); + // assert_eq!(rgrant.combined_len(), 9); + // assert_eq!( + // rgrant.bufs(), + // (&[14, 15, 21, 22, 23, 24, 25][..], &[26, 27][..]) + // ); + // rgrant.release(8); + + // let rgrant = cons.split_read().unwrap(); + // assert_eq!(rgrant.combined_len(), 1); + // assert_eq!(rgrant.bufs(), (&[27][..], &[][..])); + // rgrant.release(1); + // } } diff --git a/bbqtest/src/multi_thread.rs b/bbqtest/src/multi_thread.rs index 33610a8..cec8a9b 100644 --- a/bbqtest/src/multi_thread.rs +++ b/bbqtest/src/multi_thread.rs @@ -1,7 +1,7 @@ #[cfg_attr(not(feature = "verbose"), allow(unused_variables))] #[cfg(test)] mod tests { - use bbqueue::{BBBuffer, Error}; + use bbqueue::{BBQueue, Error}; use rand::prelude::*; use std::thread::spawn; use std::time::{Duration, Instant}; @@ -48,7 +48,7 @@ mod tests { #[cfg(feature = "verbose")] println!("RTX: Running test..."); - static BB: BBBuffer = BBBuffer::new(); + static BB: BBQueue = BBQueue::new(); let (mut tx, mut rx) = BB.try_split().unwrap(); let mut last_tx = Instant::now(); @@ -140,7 +140,7 @@ mod tests { #[test] fn sanity_check() { - static BB: BBBuffer = BBBuffer::new(); + static BB: BBQueue = BBQueue::new(); let (mut tx, mut rx) = BB.try_split().unwrap(); let mut last_tx = Instant::now(); @@ -234,7 +234,7 @@ mod tests { #[test] fn sanity_check_grant_max() { - static BB: BBBuffer = BBBuffer::new(); + static BB: BBQueue = BBQueue::new(); let (mut tx, mut rx) = BB.try_split().unwrap(); #[cfg(feature = "verbose")] diff --git a/bbqtest/src/ring_around_the_senders.rs b/bbqtest/src/ring_around_the_senders.rs index c544609..0cc95fc 100644 --- a/bbqtest/src/ring_around_the_senders.rs +++ b/bbqtest/src/ring_around_the_senders.rs @@ -1,7 +1,7 @@ #[cfg(test)] mod tests { - use bbqueue::{BBBuffer, Consumer, GrantR, GrantW, Producer}; + use bbqueue::{BBQueue, Consumer, GrantR, GrantW, Producer}; enum Potato<'a, const N: usize> { Tx((Producer<'a, N>, u8)), @@ -76,7 +76,7 @@ mod tests { } } - static BB: BBBuffer = BBBuffer::new(); + static BB: BBQueue = BBQueue::new(); use std::sync::mpsc::{channel, Receiver, Sender}; use std::thread::spawn; diff --git a/bbqtest/src/single_thread.rs b/bbqtest/src/single_thread.rs index d4ce82c..1fdc96f 100644 --- a/bbqtest/src/single_thread.rs +++ b/bbqtest/src/single_thread.rs @@ -1,10 +1,10 @@ #[cfg(test)] mod tests { - use bbqueue::BBBuffer; + use bbqueue::BBQueue; #[test] fn sanity_check() { - let bb: BBBuffer<6> = BBBuffer::new(); + let bb: BBQueue<6> = BBQueue::new(); let (mut prod, mut cons) = bb.try_split().unwrap(); const ITERS: usize = 100000; diff --git a/core/Cargo.toml b/core/Cargo.toml index 56424a1..43d5976 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -19,6 +19,7 @@ cortex-m = { version = "0.6.0", optional = true } [features] thumbv6 = ["cortex-m"] +alloc = [] [package.metadata.docs.rs] all-features = true diff --git a/core/src/bbbuffer.rs b/core/src/bbbuffer.rs index e2c5c8a..1446901 100644 --- a/core/src/bbbuffer.rs +++ b/core/src/bbbuffer.rs @@ -6,7 +6,7 @@ use core::{ cell::UnsafeCell, cmp::min, marker::PhantomData, - mem::{forget, transmute, MaybeUninit}, + mem::forget, ops::{Deref, DerefMut}, ptr::NonNull, result::Result as CoreResult, @@ -17,11 +17,15 @@ use core::{ }, }; -/// A backing structure for a BBQueue. Can be used to create either -/// a BBQueue or a split Producer/Consumer pair -pub struct BBBuffer { - buf: UnsafeCell>, +pub struct OwnedBBBuffer { + hdr: BBHeader, + storage: UnsafeCell<[u8; N]> +} + +unsafe impl Sync for OwnedBBBuffer<{ A }> {} + +pub struct BBHeader { /// Where the next byte will be written write: AtomicUsize, @@ -51,10 +55,82 @@ pub struct BBBuffer { already_split: AtomicBool, } -unsafe impl Sync for BBBuffer<{ A }> {} +// TODO(AJM): Seal this trait? Unsafe to impl? +// Do I ever need the header XOR the storage? Or should +// they always just travel together? +// -> Yes, they do, but it's probably not worth separating them, instead just handing +// back some combined type. They will probably always be "allocated" together. +// +// Maybe the BBGetter trait can be replaced with AsRef or something? +// Also, this would probably let anyone get access to the header of bbqueue, which +// would be wildly unsafe +pub(crate) mod sealed { + use crate::bbbuffer::BBHeader; + use crate::bbbuffer::OwnedBBBuffer; + + pub trait BBGetter<'a> { + type Duplicate: BBGetter<'a>; + + fn get_header(&self) -> &BBHeader; + fn get_storage(&self) -> (*mut u8, usize); + fn duplicate(&self) -> Self::Duplicate; + } + + impl<'a, const N: usize> BBGetter<'a> for OwnedBBBuffer { + type Duplicate = &'a OwnedBBBuffer; + + fn get_header(&self) -> &BBHeader { + &self.hdr + } + + fn get_storage(&self) -> (*mut u8, usize) { + let ptr = self.storage.get().cast(); + (ptr, N) + } + + fn duplicate(&self) -> Self::Duplicate { + todo!() + } + } + + impl<'a, const N: usize> BBGetter<'a> for &'a OwnedBBBuffer { + type Duplicate = &'a OwnedBBBuffer; + + fn get_header(&self) -> &BBHeader { + &self.hdr + } + + fn get_storage(&self) -> (*mut u8, usize) { + let ptr = self.storage.get().cast(); + (ptr, N) + } + + fn duplicate(&self) -> Self::Duplicate { + todo!() + } + } +} +use crate::sealed::BBGetter; -impl<'a, const N: usize> BBBuffer<{ N }> { - /// Attempt to split the `BBBuffer` into `Consumer` and `Producer` halves to gain access to the +/// A backing structure for a BBQueue. Can be used to create either +/// a BBQueue or a split Producer/Consumer pair +// +// NOTE: The BBQueue is generic over ANY type, +pub struct BBQueue +{ + sto: STO +} + +impl<'a, STO> BBQueue { + pub const fn new(storage: STO) -> Self { + Self { + sto: storage, + } + } +} + +impl<'a, STO: BBGetter<'a>> BBQueue { + /// Attempt to split the `BBQueue` into `Consumer` and `Producer` halves to gain access to the /// buffer. If buffer has already been split, an error will be returned. /// /// NOTE: When splitting, the underlying buffer will be explicitly initialized @@ -69,10 +145,10 @@ impl<'a, const N: usize> BBBuffer<{ N }> { /// ```rust /// # // bbqueue test shim! /// # fn bbqtest() { - /// use bbqueue_ng::BBBuffer; + /// use bbqueue_ng::BBQueue; /// /// // Create and split a new buffer - /// let buffer: BBBuffer<6> = BBBuffer::new(); + /// let buffer: BBQueue<6> = BBQueue::new(); /// let (prod, cons) = buffer.try_split().unwrap(); /// /// // Not possible to split twice @@ -85,35 +161,24 @@ impl<'a, const N: usize> BBBuffer<{ N }> { /// # bbqtest(); /// # } /// ``` - pub fn try_split(&'a self) -> Result<(Producer<'a, { N }>, Consumer<'a, { N }>)> { - if atomic::swap(&self.already_split, true, AcqRel) { + pub fn try_split(&'a self) -> Result<(Producer<'a, STO::Duplicate>, Consumer<'a, STO::Duplicate>)> { + if atomic::swap(&self.sto.get_header().already_split, true, AcqRel) { return Err(Error::AlreadySplit); } - unsafe { - // Explicitly zero the data to avoid undefined behavior. - // This is required, because we hand out references to the buffers, - // which mean that creating them as references is technically UB for now - let mu_ptr = self.buf.get(); - (*mu_ptr).as_mut_ptr().write_bytes(0u8, 1); - - let nn1 = NonNull::new_unchecked(self as *const _ as *mut _); - let nn2 = NonNull::new_unchecked(self as *const _ as *mut _); - - Ok(( - Producer { - bbq: nn1, - pd: PhantomData, - }, - Consumer { - bbq: nn2, - pd: PhantomData, - }, - )) - } + Ok(( + Producer { + bbq: self.sto.duplicate(), + pd: PhantomData, + }, + Consumer { + bbq: self.sto.duplicate(), + pd: PhantomData, + }, + )) } - /// Attempt to split the `BBBuffer` into `FrameConsumer` and `FrameProducer` halves + /// Attempt to split the `BBQueue` into `FrameConsumer` and `FrameProducer` halves /// to gain access to the buffer. If buffer has already been split, an error /// will be returned. /// @@ -127,7 +192,7 @@ impl<'a, const N: usize> BBBuffer<{ N }> { /// section while splitting. pub fn try_split_framed( &'a self, - ) -> Result<(FrameProducer<'a, { N }>, FrameConsumer<'a, { N }>)> { + ) -> Result<(FrameProducer<'a, STO::Duplicate>, FrameConsumer<'a, STO::Duplicate>)> { let (producer, consumer) = self.try_split()?; Ok((FrameProducer { producer }, FrameConsumer { consumer })) } @@ -137,16 +202,16 @@ impl<'a, const N: usize> BBBuffer<{ N }> { /// This re-initializes the buffer so it may be split in a different mode at a later /// time. There must be no read or write grants active, or an error will be returned. /// - /// The `Producer` and `Consumer` must be from THIS `BBBuffer`, or an error will + /// The `Producer` and `Consumer` must be from THIS `BBQueue`, or an error will /// be returned. /// /// ```rust /// # // bbqueue test shim! /// # fn bbqtest() { - /// use bbqueue_ng::BBBuffer; + /// use bbqueue_ng::BBQueue; /// /// // Create and split a new buffer - /// let buffer: BBBuffer<6> = BBBuffer::new(); + /// let buffer: BBQueue<6> = BBQueue::new(); /// let (prod, cons) = buffer.try_split().unwrap(); /// /// // Not possible to split twice @@ -167,24 +232,27 @@ impl<'a, const N: usize> BBBuffer<{ N }> { /// ``` pub fn try_release( &'a self, - prod: Producer<'a, { N }>, - cons: Consumer<'a, { N }>, - ) -> CoreResult<(), (Producer<'a, { N }>, Consumer<'a, { N }>)> { + prod: Producer<'a, STO>, + cons: Consumer<'a, STO>, + ) -> CoreResult<(), (Producer<'a, STO>, Consumer<'a, STO>)> { // Note: Re-entrancy is not possible because we require ownership // of the producer and consumer, which are not cloneable. We also // can assume the buffer has been split, because // Are these our producers and consumers? - let our_prod = prod.bbq.as_ptr() as *const Self == self; - let our_cons = cons.bbq.as_ptr() as *const Self == self; + // NOTE: Check the header rather than the data + let our_prod = prod.bbq.get_header() as *const BBHeader == self.sto.get_header() as *const BBHeader; + let our_cons = cons.bbq.get_header() as *const BBHeader == self.sto.get_header() as *const BBHeader; if !(our_prod && our_cons) { // Can't release, not our producer and consumer return Err((prod, cons)); } - let wr_in_progress = self.write_in_progress.load(Acquire); - let rd_in_progress = self.read_in_progress.load(Acquire); + let hdr = self.sto.get_header(); + + let wr_in_progress = hdr.write_in_progress.load(Acquire); + let rd_in_progress = hdr.read_in_progress.load(Acquire); if wr_in_progress || rd_in_progress { // Can't release, active grant(s) in progress @@ -196,13 +264,13 @@ impl<'a, const N: usize> BBBuffer<{ N }> { drop(cons); // Re-initialize the buffer (not totally needed, but nice to do) - self.write.store(0, Release); - self.read.store(0, Release); - self.reserve.store(0, Release); - self.last.store(0, Release); + hdr.write.store(0, Release); + hdr.read.store(0, Release); + hdr.reserve.store(0, Release); + hdr.last.store(0, Release); // Mark the buffer as ready to be split - self.already_split.store(false, Release); + hdr.already_split.store(false, Release); Ok(()) } @@ -212,13 +280,13 @@ impl<'a, const N: usize> BBBuffer<{ N }> { /// This re-initializes the buffer so it may be split in a different mode at a later /// time. There must be no read or write grants active, or an error will be returned. /// - /// The `FrameProducer` and `FrameConsumer` must be from THIS `BBBuffer`, or an error + /// The `FrameProducer` and `FrameConsumer` must be from THIS `BBQueue`, or an error /// will be returned. pub fn try_release_framed( &'a self, - prod: FrameProducer<'a, { N }>, - cons: FrameConsumer<'a, { N }>, - ) -> CoreResult<(), (FrameProducer<'a, { N }>, FrameConsumer<'a, { N }>)> { + prod: FrameProducer<'a, STO>, + cons: FrameConsumer<'a, STO>, + ) -> CoreResult<(), (FrameProducer<'a, STO>, FrameConsumer<'a, STO>)> { self.try_release(prod.producer, cons.consumer) .map_err(|(producer, consumer)| { // Restore the wrapper types @@ -227,18 +295,18 @@ impl<'a, const N: usize> BBBuffer<{ N }> { } } -impl BBBuffer<{ A }> { - /// Create a new constant inner portion of a `BBBuffer`. +impl OwnedBBBuffer<{ N }> { + /// Create a new constant inner portion of a `BBQueue`. /// - /// NOTE: This is only necessary to use when creating a `BBBuffer` at static + /// NOTE: This is only necessary to use when creating a `BBQueue` at static /// scope, and is generally never used directly. This process is necessary to /// work around current limitations in `const fn`, and will be replaced in /// the future. /// /// ```rust,no_run - /// use bbqueue_ng::BBBuffer; + /// use bbqueue_ng::BBQueue; /// - /// static BUF: BBBuffer<6> = BBBuffer::new(); + /// static BUF: BBQueue<6> = BBQueue::new(); /// /// fn main() { /// let (prod, cons) = BUF.try_split().unwrap(); @@ -247,43 +315,45 @@ impl BBBuffer<{ A }> { pub const fn new() -> Self { Self { // This will not be initialized until we split the buffer - buf: UnsafeCell::new(MaybeUninit::uninit()), - - /// Owned by the writer - write: AtomicUsize::new(0), - - /// Owned by the reader - read: AtomicUsize::new(0), - - /// Cooperatively owned - /// - /// NOTE: This should generally be initialized as size_of::(), however - /// this would prevent the structure from being entirely zero-initialized, - /// and can cause the .data section to be much larger than necessary. By - /// forcing the `last` pointer to be zero initially, we place the structure - /// in an "inverted" condition, which will be resolved on the first commited - /// bytes that are written to the structure. - /// - /// When read == last == write, no bytes will be allowed to be read (good), but - /// write grants can be given out (also good). - last: AtomicUsize::new(0), - - /// Owned by the Writer, "private" - reserve: AtomicUsize::new(0), - - /// Owned by the Reader, "private" - read_in_progress: AtomicBool::new(false), - - /// Owned by the Writer, "private" - write_in_progress: AtomicBool::new(false), - - /// We haven't split at the start - already_split: AtomicBool::new(false), + storage: UnsafeCell::new([0u8; N]), + + hdr: BBHeader { + /// Owned by the writer + write: AtomicUsize::new(0), + + /// Owned by the reader + read: AtomicUsize::new(0), + + /// Cooperatively owned + /// + /// NOTE: This should generally be initialized as size_of::(), however + /// this would prevent the structure from being entirely zero-initialized, + /// and can cause the .data section to be much larger than necessary. By + /// forcing the `last` pointer to be zero initially, we place the structure + /// in an "inverted" condition, which will be resolved on the first commited + /// bytes that are written to the structure. + /// + /// When read == last == write, no bytes will be allowed to be read (good), but + /// write grants can be given out (also good). + last: AtomicUsize::new(0), + + /// Owned by the Writer, "private" + reserve: AtomicUsize::new(0), + + /// Owned by the Reader, "private" + read_in_progress: AtomicBool::new(false), + + /// Owned by the Writer, "private" + write_in_progress: AtomicBool::new(false), + + /// We haven't split at the start + already_split: AtomicBool::new(false), + } } } } -/// `Producer` is the primary interface for pushing data into a `BBBuffer`. +/// `Producer` is the primary interface for pushing data into a `BBQueue`. /// There are various methods for obtaining a grant to write to the buffer, with /// different potential tradeoffs. As all grants are required to be a contiguous /// range of data, different strategies are sometimes useful when making the decision @@ -307,14 +377,24 @@ impl BBBuffer<{ A }> { /// /// See [this github issue](https://github.com/jamesmunns/bbqueue/issues/38) for a /// discussion of grant methods that could be added in the future. -pub struct Producer<'a, const N: usize> { - bbq: NonNull>, +pub struct Producer<'a, STO> +where + STO: BBGetter<'a>, +{ + // TODO: Is 'a the right lifetime? + bbq: STO, pd: PhantomData<&'a ()>, } -unsafe impl<'a, const N: usize> Send for Producer<'a, { N }> {} +unsafe impl<'a, STO> Send for Producer<'a, STO> +where + STO: BBGetter<'a>, +{} -impl<'a, const N: usize> Producer<'a, { N }> { +impl<'a, STO> Producer<'a, STO> +where + STO: BBGetter<'a>, +{ /// Request a writable, contiguous section of memory of exactly /// `sz` bytes. If the buffer size requested is not available, /// an error will be returned. @@ -326,10 +406,10 @@ impl<'a, const N: usize> Producer<'a, { N }> { /// ```rust /// # // bbqueue test shim! /// # fn bbqtest() { - /// use bbqueue_ng::BBBuffer; + /// use bbqueue_ng::BBQueue; /// /// // Create and split a new buffer of 6 elements - /// let buffer: BBBuffer<6> = BBBuffer::new(); + /// let buffer: BBQueue<6> = BBQueue::new(); /// let (mut prod, cons) = buffer.try_split().unwrap(); /// /// // Successfully obtain and commit a grant of four bytes @@ -347,18 +427,20 @@ impl<'a, const N: usize> Producer<'a, { N }> { /// # bbqtest(); /// # } /// ``` - pub fn grant_exact(&mut self, sz: usize) -> Result> { - let inner = unsafe { &self.bbq.as_ref() }; + pub fn grant_exact(&mut self, sz: usize) -> Result> { + let hdr = self.bbq.get_header(); + let sto = self.bbq.get_storage(); + - if atomic::swap(&inner.write_in_progress, true, AcqRel) { + if atomic::swap(&hdr.write_in_progress, true, AcqRel) { return Err(Error::GrantInProgress); } // Writer component. Must never write to `read`, // be careful writing to `load` - let write = inner.write.load(Acquire); - let read = inner.read.load(Acquire); - let max = N; + let write = hdr.write.load(Acquire); + let read = hdr.read.load(Acquire); + let max = sto.1; let already_inverted = write < read; let start = if already_inverted { @@ -367,7 +449,7 @@ impl<'a, const N: usize> Producer<'a, { N }> { write } else { // Inverted, no room is available - inner.write_in_progress.store(false, Release); + hdr.write_in_progress.store(false, Release); return Err(Error::InsufficientSize); } } else { @@ -385,25 +467,26 @@ impl<'a, const N: usize> Producer<'a, { N }> { 0 } else { // Not invertible, no space - inner.write_in_progress.store(false, Release); + hdr.write_in_progress.store(false, Release); return Err(Error::InsufficientSize); } } }; // Safe write, only viewed by this task - inner.reserve.store(start + sz, Release); + hdr.reserve.store(start + sz, Release); - // This is sound, as UnsafeCell, MaybeUninit, and GenericArray - // are all `#[repr(Transparent)] - let start_of_buf_ptr = inner.buf.get().cast::(); + // This is sound, as UnsafeCell is `#[repr(Transparent)] + // Here we are casting a `*mut [u8; N]` to a `*mut u8` + let start_of_buf_ptr = sto.0; let grant_slice = unsafe { from_raw_parts_mut(start_of_buf_ptr.offset(start as isize), sz) }; Ok(GrantW { buf: grant_slice, - bbq: self.bbq, + bbq: self.bbq.duplicate(), to_commit: 0, + pd: PhantomData, }) } @@ -417,10 +500,10 @@ impl<'a, const N: usize> Producer<'a, { N }> { /// ``` /// # // bbqueue test shim! /// # fn bbqtest() { - /// use bbqueue_ng::BBBuffer; + /// use bbqueue_ng::BBQueue; /// /// // Create and split a new buffer of 6 elements - /// let buffer: BBBuffer<6> = BBBuffer::new(); + /// let buffer: BBQueue<6> = BBQueue::new(); /// let (mut prod, mut cons) = buffer.try_split().unwrap(); /// /// // Successfully obtain and commit a grant of four bytes @@ -445,18 +528,19 @@ impl<'a, const N: usize> Producer<'a, { N }> { /// # bbqtest(); /// # } /// ``` - pub fn grant_max_remaining(&mut self, mut sz: usize) -> Result> { - let inner = unsafe { &self.bbq.as_ref() }; + pub fn grant_max_remaining(&mut self, mut sz: usize) -> Result> { + let hdr = self.bbq.get_header(); + let sto = self.bbq.get_storage(); - if atomic::swap(&inner.write_in_progress, true, AcqRel) { + if atomic::swap(&hdr.write_in_progress, true, AcqRel) { return Err(Error::GrantInProgress); } // Writer component. Must never write to `read`, // be careful writing to `load` - let write = inner.write.load(Acquire); - let read = inner.read.load(Acquire); - let max = N; + let write = hdr.write.load(Acquire); + let read = hdr.read.load(Acquire); + let max = sto.1; let already_inverted = write < read; @@ -469,7 +553,7 @@ impl<'a, const N: usize> Producer<'a, { N }> { write } else { // Inverted, no room is available - inner.write_in_progress.store(false, Release); + hdr.write_in_progress.store(false, Release); return Err(Error::InsufficientSize); } } else { @@ -488,38 +572,40 @@ impl<'a, const N: usize> Producer<'a, { N }> { 0 } else { // Not invertible, no space - inner.write_in_progress.store(false, Release); + hdr.write_in_progress.store(false, Release); return Err(Error::InsufficientSize); } } }; // Safe write, only viewed by this task - inner.reserve.store(start + sz, Release); + hdr.reserve.store(start + sz, Release); - // This is sound, as UnsafeCell, MaybeUninit, and GenericArray - // are all `#[repr(Transparent)] - let start_of_buf_ptr = inner.buf.get().cast::(); + // This is sound, as UnsafeCell is `#[repr(Transparent)] + // Here we are casting a `*mut [u8; N]` to a `*mut u8` + let start_of_buf_ptr = sto.0; let grant_slice = unsafe { from_raw_parts_mut(start_of_buf_ptr.offset(start as isize), sz) }; Ok(GrantW { buf: grant_slice, - bbq: self.bbq, + bbq: self.bbq.duplicate(), to_commit: 0, + pd: PhantomData, }) } } -/// `Consumer` is the primary interface for reading data from a `BBBuffer`. -pub struct Consumer<'a, const N: usize> { - bbq: NonNull>, +/// `Consumer` is the primary interface for reading data from a `BBQueue`. +pub struct Consumer<'a, STO: BBGetter<'a>> { + // TODO: Is 'a the right lifetime? + bbq: STO, pd: PhantomData<&'a ()>, } -unsafe impl<'a, const N: usize> Send for Consumer<'a, { N }> {} +unsafe impl<'a, STO: BBGetter<'a>> Send for Consumer<'a, STO> {} -impl<'a, const N: usize> Consumer<'a, { N }> { +impl<'a, STO: BBGetter<'a>> Consumer<'a, STO> { /// Obtains a contiguous slice of committed bytes. This slice may not /// contain ALL available bytes, if the writer has wrapped around. The /// remaining bytes will be available after all readable bytes are @@ -528,10 +614,10 @@ impl<'a, const N: usize> Consumer<'a, { N }> { /// ```rust /// # // bbqueue test shim! /// # fn bbqtest() { - /// use bbqueue_ng::BBBuffer; + /// use bbqueue_ng::BBQueue; /// /// // Create and split a new buffer of 6 elements - /// let buffer: BBBuffer<6> = BBBuffer::new(); + /// let buffer: BBQueue<6> = BBQueue::new(); /// let (mut prod, mut cons) = buffer.try_split().unwrap(); /// /// // Successfully obtain and commit a grant of four bytes @@ -550,16 +636,17 @@ impl<'a, const N: usize> Consumer<'a, { N }> { /// # bbqtest(); /// # } /// ``` - pub fn read(&mut self) -> Result> { - let inner = unsafe { &self.bbq.as_ref() }; + pub fn read(&mut self) -> Result> { + let hdr = self.bbq.get_header(); + let sto = self.bbq.get_storage(); - if atomic::swap(&inner.read_in_progress, true, AcqRel) { + if atomic::swap(&hdr.read_in_progress, true, AcqRel) { return Err(Error::GrantInProgress); } - let write = inner.write.load(Acquire); - let last = inner.last.load(Acquire); - let mut read = inner.read.load(Acquire); + let write = hdr.write.load(Acquire); + let last = hdr.last.load(Acquire); + let mut read = hdr.read.load(Acquire); // Resolve the inverted case or end of read if (read == last) && (write < read) { @@ -572,7 +659,7 @@ impl<'a, const N: usize> Consumer<'a, { N }> { // Commit does not check read, but if Grant has started an inversion, // grant could move Last to the prior write position // MOVING READ BACKWARDS! - inner.read.store(0, Release); + hdr.read.store(0, Release); } let sz = if write < read { @@ -584,34 +671,34 @@ impl<'a, const N: usize> Consumer<'a, { N }> { } - read; if sz == 0 { - inner.read_in_progress.store(false, Release); + hdr.read_in_progress.store(false, Release); return Err(Error::InsufficientSize); } - // This is sound, as UnsafeCell, MaybeUninit, and GenericArray - // are all `#[repr(Transparent)] - let start_of_buf_ptr = inner.buf.get().cast::(); + let start_of_buf_ptr = sto.0; let grant_slice = unsafe { from_raw_parts_mut(start_of_buf_ptr.offset(read as isize), sz) }; Ok(GrantR { buf: grant_slice, - bbq: self.bbq, + bbq: self.bbq.duplicate(), to_release: 0, + pd: PhantomData, }) } /// Obtains two disjoint slices, which are each contiguous of committed bytes. /// Combined these contain all previously commited data. - pub fn split_read(&mut self) -> Result> { - let inner = unsafe { &self.bbq.as_ref() }; + pub fn split_read(&mut self) -> Result> { + let hdr = self.bbq.get_header(); + let sto = self.bbq.get_storage(); - if atomic::swap(&inner.read_in_progress, true, AcqRel) { + if atomic::swap(&hdr.read_in_progress, true, AcqRel) { return Err(Error::GrantInProgress); } - let write = inner.write.load(Acquire); - let last = inner.last.load(Acquire); - let mut read = inner.read.load(Acquire); + let write = hdr.write.load(Acquire); + let last = hdr.last.load(Acquire); + let mut read = hdr.read.load(Acquire); // Resolve the inverted case or end of read if (read == last) && (write < read) { @@ -624,7 +711,7 @@ impl<'a, const N: usize> Consumer<'a, { N }> { // Commit does not check read, but if Grant has started an inversion, // grant could move Last to the prior write position // MOVING READ BACKWARDS! - inner.read.store(0, Release); + hdr.read.store(0, Release); } let (sz1, sz2) = if write < read { @@ -636,13 +723,13 @@ impl<'a, const N: usize> Consumer<'a, { N }> { }; if sz1 == 0 { - inner.read_in_progress.store(false, Release); + hdr.read_in_progress.store(false, Release); return Err(Error::InsufficientSize); } - // This is sound, as UnsafeCell, MaybeUninit, and GenericArray - // are all `#[repr(Transparent)] - let start_of_buf_ptr = inner.buf.get().cast::(); + // This is sound, as UnsafeCell is `#[repr(Transparent)] + // Here we are casting a `*mut [u8; N]` to a `*mut u8` + let start_of_buf_ptr = sto.0; let grant_slice1 = unsafe { from_raw_parts_mut(start_of_buf_ptr.offset(read as isize), sz1) }; let grant_slice2 = unsafe { from_raw_parts_mut(start_of_buf_ptr, sz2) }; @@ -650,13 +737,14 @@ impl<'a, const N: usize> Consumer<'a, { N }> { Ok(SplitGrantR { buf1: grant_slice1, buf2: grant_slice2, - bbq: self.bbq, + bbq: self.bbq.duplicate(), to_release: 0, + pd: PhantomData, }) } } -impl BBBuffer<{ N }> { +impl<'a, STO: BBGetter<'a>> BBQueue { /// Returns the size of the backing storage. /// /// This is the maximum number of bytes that can be stored in this queue. @@ -664,10 +752,10 @@ impl BBBuffer<{ N }> { /// ```rust /// # // bbqueue test shim! /// # fn bbqtest() { - /// use bbqueue_ng::BBBuffer; + /// use bbqueue_ng::BBQueue; /// /// // Create a new buffer of 6 elements - /// let buffer: BBBuffer<6> = BBBuffer::new(); + /// let buffer: BBQueue<6> = BBQueue::new(); /// assert_eq!(buffer.capacity(), 6); /// # // bbqueue test shim! /// # } @@ -678,7 +766,7 @@ impl BBBuffer<{ N }> { /// # } /// ``` pub fn capacity(&self) -> usize { - N + self.sto.get_storage().1 } } @@ -693,13 +781,18 @@ impl BBBuffer<{ N }> { /// If the `thumbv6` feature is selected, dropping the grant /// without committing it takes a short critical section, #[derive(Debug, PartialEq)] -pub struct GrantW<'a, const N: usize> { +pub struct GrantW<'a, STO> +where + STO: BBGetter<'a>, +{ pub(crate) buf: &'a mut [u8], - bbq: NonNull>, + // TODO: Is 'a the right lifetime? + bbq: STO, pub(crate) to_commit: usize, + pd: PhantomData<&'a ()>, } -unsafe impl<'a, const N: usize> Send for GrantW<'a, { N }> {} +unsafe impl<'a, STO: BBGetter<'a>> Send for GrantW<'a, STO> {} /// A structure representing a contiguous region of memory that /// may be read from, and potentially "released" (or cleared) @@ -714,28 +807,38 @@ unsafe impl<'a, const N: usize> Send for GrantW<'a, { N }> {} /// If the `thumbv6` feature is selected, dropping the grant /// without releasing it takes a short critical section, #[derive(Debug, PartialEq)] -pub struct GrantR<'a, const N: usize> { +pub struct GrantR<'a, STO> +where + STO: BBGetter<'a>, +{ pub(crate) buf: &'a mut [u8], - bbq: NonNull>, + // TODO: Is 'a the right lifetime? + bbq: STO, pub(crate) to_release: usize, + pd: PhantomData<&'a ()>, } /// A structure representing up to two contiguous regions of memory that /// may be read from, and potentially "released" (or cleared) /// from the queue #[derive(Debug, PartialEq)] -pub struct SplitGrantR<'a, const N: usize> { +pub struct SplitGrantR<'a, STO> +where + STO: BBGetter<'a>, +{ pub(crate) buf1: &'a mut [u8], pub(crate) buf2: &'a mut [u8], - bbq: NonNull>, + // TODO: Is 'a the right lifetime? + bbq: STO, pub(crate) to_release: usize, + pd: PhantomData<&'a ()>, } -unsafe impl<'a, const N: usize> Send for GrantR<'a, { N }> {} +unsafe impl<'a, STO: BBGetter<'a>> Send for GrantR<'a, STO> {} -unsafe impl<'a, const N: usize> Send for SplitGrantR<'a, { N }> {} +unsafe impl<'a, STO: BBGetter<'a>> Send for SplitGrantR<'a, STO> {} -impl<'a, const N: usize> GrantW<'a, { N }> { +impl<'a, STO: BBGetter<'a>> GrantW<'a, STO> { /// Finalizes a writable grant given by `grant()` or `grant_max()`. /// This makes the data available to be read via `read()`. This consumes /// the grant. @@ -755,10 +858,10 @@ impl<'a, const N: usize> GrantW<'a, { N }> { /// ```rust /// # // bbqueue test shim! /// # fn bbqtest() { - /// use bbqueue_ng::BBBuffer; + /// use bbqueue_ng::BBQueue; /// /// // Create and split a new buffer of 6 elements - /// let buffer: BBBuffer<6> = BBBuffer::new(); + /// let buffer: BBQueue<6> = BBQueue::new(); /// let (mut prod, mut cons) = buffer.try_split().unwrap(); /// /// // Successfully obtain and commit a grant of four bytes @@ -777,29 +880,15 @@ impl<'a, const N: usize> GrantW<'a, { N }> { self.buf } - /// Sometimes, it's not possible for the lifetimes to check out. For example, - /// if you need to hand this buffer to a function that expects to receive a - /// `&'static mut [u8]`, it is not possible for the inner reference to outlive the - /// grant itself. - /// - /// You MUST guarantee that in no cases, the reference that is returned here outlives - /// the grant itself. Once the grant has been released, referencing the data contained - /// WILL cause undefined behavior. - /// - /// Additionally, you must ensure that a separate reference to this data is not created - /// to this data, e.g. using `DerefMut` or the `buf()` method of this grant. - pub unsafe fn as_static_mut_buf(&mut self) -> &'static mut [u8] { - transmute::<&mut [u8], &'static mut [u8]>(self.buf) - } - #[inline(always)] pub(crate) fn commit_inner(&mut self, used: usize) { - let inner = unsafe { &self.bbq.as_ref() }; + let hdr = self.bbq.get_header(); + let sto = self.bbq.get_storage(); // If there is no grant in progress, return early. This // generally means we are dropping the grant within a // wrapper structure - if !inner.write_in_progress.load(Acquire) { + if !hdr.write_in_progress.load(Acquire) { return; } @@ -810,17 +899,17 @@ impl<'a, const N: usize> GrantW<'a, { N }> { let len = self.buf.len(); let used = min(len, used); - let write = inner.write.load(Acquire); - atomic::fetch_sub(&inner.reserve, len - used, AcqRel); + let write = hdr.write.load(Acquire); + atomic::fetch_sub(&hdr.reserve, len - used, AcqRel); - let max = N; - let last = inner.last.load(Acquire); - let new_write = inner.reserve.load(Acquire); + let max = sto.1; + let last = hdr.last.load(Acquire); + let new_write = hdr.reserve.load(Acquire); if (new_write < write) && (write != max) { // We have already wrapped, but we are skipping some bytes at the end of the ring. // Mark `last` where the write pointer used to be to hold the line here - inner.last.store(write, Release); + hdr.last.store(write, Release); } else if new_write > last { // We're about to pass the last pointer, which was previously the artificial // end of the ring. Now that we've passed it, we can "unlock" the section @@ -829,7 +918,7 @@ impl<'a, const N: usize> GrantW<'a, { N }> { // Since new_write is strictly larger than last, it is safe to move this as // the other thread will still be halted by the (about to be updated) write // value - inner.last.store(max, Release); + hdr.last.store(max, Release); } // else: If new_write == last, either: // * last == max, so no need to write, OR @@ -839,10 +928,10 @@ impl<'a, const N: usize> GrantW<'a, { N }> { // Write must be updated AFTER last, otherwise read could think it was // time to invert early! - inner.write.store(new_write, Release); + hdr.write.store(new_write, Release); // Allow subsequent grants - inner.write_in_progress.store(false, Release); + hdr.write_in_progress.store(false, Release); } /// Configures the amount of bytes to be commited on drop. @@ -851,7 +940,7 @@ impl<'a, const N: usize> GrantW<'a, { N }> { } } -impl<'a, const N: usize> GrantR<'a, { N }> { +impl<'a, STO: BBGetter<'a>> GrantR<'a, STO> { /// Release a sequence of bytes from the buffer, allowing the space /// to be used by later writes. This consumes the grant. /// @@ -880,10 +969,10 @@ impl<'a, const N: usize> GrantR<'a, { N }> { /// ``` /// # // bbqueue test shim! /// # fn bbqtest() { - /// use bbqueue_ng::BBBuffer; + /// use bbqueue_ng::BBQueue; /// /// // Create and split a new buffer of 6 elements - /// let buffer: BBBuffer<6> = BBBuffer::new(); + /// let buffer: BBQueue<6> = BBQueue::new(); /// let (mut prod, mut cons) = buffer.try_split().unwrap(); /// /// // Successfully obtain and commit a grant of four bytes @@ -916,29 +1005,14 @@ impl<'a, const N: usize> GrantR<'a, { N }> { self.buf } - /// Sometimes, it's not possible for the lifetimes to check out. For example, - /// if you need to hand this buffer to a function that expects to receive a - /// `&'static [u8]`, it is not possible for the inner reference to outlive the - /// grant itself. - /// - /// You MUST guarantee that in no cases, the reference that is returned here outlives - /// the grant itself. Once the grant has been released, referencing the data contained - /// WILL cause undefined behavior. - /// - /// Additionally, you must ensure that a separate reference to this data is not created - /// to this data, e.g. using `Deref` or the `buf()` method of this grant. - pub unsafe fn as_static_buf(&self) -> &'static [u8] { - transmute::<&[u8], &'static [u8]>(self.buf) - } - #[inline(always)] pub(crate) fn release_inner(&mut self, used: usize) { - let inner = unsafe { &self.bbq.as_ref() }; + let hdr = self.bbq.get_header(); // If there is no grant in progress, return early. This // generally means we are dropping the grant within a // wrapper structure - if !inner.read_in_progress.load(Acquire) { + if !hdr.read_in_progress.load(Acquire) { return; } @@ -946,9 +1020,9 @@ impl<'a, const N: usize> GrantR<'a, { N }> { debug_assert!(used <= self.buf.len()); // This should be fine, purely incrementing - let _ = atomic::fetch_add(&inner.read, used, Release); + let _ = atomic::fetch_add(&hdr.read, used, Release); - inner.read_in_progress.store(false, Release); + hdr.read_in_progress.store(false, Release); } /// Configures the amount of bytes to be released on drop. @@ -957,7 +1031,7 @@ impl<'a, const N: usize> GrantR<'a, { N }> { } } -impl<'a, const N: usize> SplitGrantR<'a, { N }> { +impl<'a, STO: BBGetter<'a>> SplitGrantR<'a, STO> { /// Release a sequence of bytes from the buffer, allowing the space /// to be used by later writes. This consumes the grant. /// @@ -979,10 +1053,10 @@ impl<'a, const N: usize> SplitGrantR<'a, { N }> { /// ``` /// # // bbqueue test shim! /// # fn bbqtest() { - /// use bbqueue_ng::BBBuffer; + /// use bbqueue_ng::BBQueue; /// /// // Create and split a new buffer of 6 elements - /// let buffer: BBBuffer<6> = BBBuffer::new(); + /// let buffer: BBQueue<6> = BBQueue::new(); /// let (mut prod, mut cons) = buffer.try_split().unwrap(); /// /// // Successfully obtain and commit a grant of four bytes @@ -1017,12 +1091,12 @@ impl<'a, const N: usize> SplitGrantR<'a, { N }> { #[inline(always)] pub(crate) fn release_inner(&mut self, used: usize) { - let inner = unsafe { &self.bbq.as_ref() }; + let hdr = self.bbq.get_header(); // If there is no grant in progress, return early. This // generally means we are dropping the grant within a // wrapper structure - if !inner.read_in_progress.load(Acquire) { + if !hdr.read_in_progress.load(Acquire) { return; } @@ -1031,13 +1105,13 @@ impl<'a, const N: usize> SplitGrantR<'a, { N }> { if used <= self.buf1.len() { // This should be fine, purely incrementing - let _ = atomic::fetch_add(&inner.read, used, Release); + let _ = atomic::fetch_add(&hdr.read, used, Release); } else { // Also release parts of the second buffer - inner.read.store(used - self.buf1.len(), Release); + hdr.read.store(used - self.buf1.len(), Release); } - inner.read_in_progress.store(false, Release); + hdr.read_in_progress.store(false, Release); } /// Configures the amount of bytes to be released on drop. @@ -1051,19 +1125,28 @@ impl<'a, const N: usize> SplitGrantR<'a, { N }> { } } -impl<'a, const N: usize> Drop for GrantW<'a, N> { +impl<'a, STO> Drop for GrantW<'a, STO> +where + STO: BBGetter<'a>, +{ fn drop(&mut self) { self.commit_inner(self.to_commit) } } -impl<'a, const N: usize> Drop for GrantR<'a, N> { +impl<'a, STO> Drop for GrantR<'a, STO> +where + STO: BBGetter<'a>, +{ fn drop(&mut self) { self.release_inner(self.to_release) } } -impl<'a, const N: usize> Deref for GrantW<'a, N> { +impl<'a, STO> Deref for GrantW<'a, STO> +where + STO: BBGetter<'a>, +{ type Target = [u8]; fn deref(&self) -> &Self::Target { @@ -1071,13 +1154,19 @@ impl<'a, const N: usize> Deref for GrantW<'a, N> { } } -impl<'a, const N: usize> DerefMut for GrantW<'a, N> { +impl<'a, STO> DerefMut for GrantW<'a, STO> +where + STO: BBGetter<'a>, +{ fn deref_mut(&mut self) -> &mut [u8] { self.buf } } -impl<'a, const N: usize> Deref for GrantR<'a, N> { +impl<'a, STO> Deref for GrantR<'a, STO> +where + STO: BBGetter<'a>, +{ type Target = [u8]; fn deref(&self) -> &Self::Target { @@ -1085,7 +1174,10 @@ impl<'a, const N: usize> Deref for GrantR<'a, N> { } } -impl<'a, const N: usize> DerefMut for GrantR<'a, N> { +impl<'a, STO> DerefMut for GrantR<'a, STO> +where + STO: BBGetter<'a>, +{ fn deref_mut(&mut self) -> &mut [u8] { self.buf } diff --git a/core/src/framed.rs b/core/src/framed.rs index 19f805a..9417a2c 100644 --- a/core/src/framed.rs +++ b/core/src/framed.rs @@ -11,9 +11,9 @@ //! ```rust //! # // bbqueue test shim! //! # fn bbqtest() { -//! use bbqueue_ng::BBBuffer; +//! use bbqueue_ng::BBQueue; //! -//! let bb: BBBuffer<1000> = BBBuffer::new(); +//! let bb: BBQueue<1000> = BBQueue::new(); //! let (mut prod, mut cons) = bb.try_split_framed().unwrap(); //! //! // One frame in, one frame out @@ -74,6 +74,7 @@ use crate::{Consumer, GrantR, GrantW, Producer}; use crate::{ vusize::{decode_usize, decoded_len, encode_usize_to_slice, encoded_len}, + bbbuffer::sealed::BBGetter, Result, }; @@ -83,16 +84,22 @@ use core::{ }; /// A producer of Framed data -pub struct FrameProducer<'a, const N: usize> { - pub(crate) producer: Producer<'a, N>, +pub struct FrameProducer<'a, STO> +where + STO: BBGetter<'a>, +{ + pub(crate) producer: Producer<'a, STO>, } -impl<'a, const N: usize> FrameProducer<'a, { N }> { +impl<'a, STO> FrameProducer<'a, STO> +where + STO: BBGetter<'a>, +{ /// Receive a grant for a frame with a maximum size of `max_sz` in bytes. /// /// This size does not include the size of the frame header. The exact size /// of the frame can be set on `commit`. - pub fn grant(&mut self, max_sz: usize) -> Result> { + pub fn grant(&mut self, max_sz: usize) -> Result> { let hdr_len = encoded_len(max_sz); Ok(FrameGrantW { grant_w: self.producer.grant_exact(max_sz + hdr_len)?, @@ -102,13 +109,16 @@ impl<'a, const N: usize> FrameProducer<'a, { N }> { } /// A consumer of Framed data -pub struct FrameConsumer<'a, const N: usize> { - pub(crate) consumer: Consumer<'a, N>, +pub struct FrameConsumer<'a, STO> +where + STO: BBGetter<'a>, +{ + pub(crate) consumer: Consumer<'a, STO>, } -impl<'a, const N: usize> FrameConsumer<'a, { N }> { +impl<'a, STO: BBGetter<'a>> FrameConsumer<'a, STO> { /// Obtain the next available frame, if any - pub fn read(&mut self) -> Option> { + pub fn read(&mut self) -> Option> { // Get all available bytes. We never wrap a frame around, // so if a header is available, the whole frame will be. let mut grant_r = self.consumer.read().ok()?; @@ -140,8 +150,11 @@ impl<'a, const N: usize> FrameConsumer<'a, { N }> { /// the contents without first calling `to_commit()`, then no /// frame will be comitted for writing. #[derive(Debug, PartialEq)] -pub struct FrameGrantW<'a, const N: usize> { - grant_w: GrantW<'a, N>, +pub struct FrameGrantW<'a, STO> +where + STO: BBGetter<'a>, +{ + grant_w: GrantW<'a, STO>, hdr_len: u8, } @@ -150,12 +163,15 @@ pub struct FrameGrantW<'a, const N: usize> { /// NOTE: If the grant is dropped without explicitly releasing /// the contents, then no frame will be released. #[derive(Debug, PartialEq)] -pub struct FrameGrantR<'a, const N: usize> { - grant_r: GrantR<'a, N>, +pub struct FrameGrantR<'a, STO> +where + STO: BBGetter<'a>, +{ + grant_r: GrantR<'a, STO>, hdr_len: u8, } -impl<'a, const N: usize> Deref for FrameGrantW<'a, { N }> { +impl<'a, STO: BBGetter<'a>> Deref for FrameGrantW<'a, STO> { type Target = [u8]; fn deref(&self) -> &Self::Target { @@ -163,13 +179,13 @@ impl<'a, const N: usize> Deref for FrameGrantW<'a, { N }> { } } -impl<'a, const N: usize> DerefMut for FrameGrantW<'a, { N }> { +impl<'a, STO: BBGetter<'a>> DerefMut for FrameGrantW<'a, STO> { fn deref_mut(&mut self) -> &mut [u8] { &mut self.grant_w.buf[self.hdr_len.into()..] } } -impl<'a, const N: usize> Deref for FrameGrantR<'a, { N }> { +impl<'a, STO: BBGetter<'a>> Deref for FrameGrantR<'a, STO> { type Target = [u8]; fn deref(&self) -> &Self::Target { @@ -177,13 +193,13 @@ impl<'a, const N: usize> Deref for FrameGrantR<'a, { N }> { } } -impl<'a, const N: usize> DerefMut for FrameGrantR<'a, { N }> { +impl<'a, STO: BBGetter<'a>> DerefMut for FrameGrantR<'a, STO> { fn deref_mut(&mut self) -> &mut [u8] { &mut self.grant_r.buf[self.hdr_len.into()..] } } -impl<'a, const N: usize> FrameGrantW<'a, { N }> { +impl<'a, STO: BBGetter<'a>> FrameGrantW<'a, STO> { /// Commit a frame to make it available to the Consumer half. /// /// `used` is the size of the payload, in bytes, not @@ -220,7 +236,7 @@ impl<'a, const N: usize> FrameGrantW<'a, { N }> { } } -impl<'a, const N: usize> FrameGrantR<'a, { N }> { +impl<'a, STO: BBGetter<'a>> FrameGrantR<'a, STO> { /// Release a frame to make the space available for future writing /// /// Note: The full frame is always released diff --git a/core/src/lib.rs b/core/src/lib.rs index b6b1250..12476f9 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -26,10 +26,10 @@ //! ## Local usage //! //! ```rust, no_run -//! # use bbqueue_ng::BBBuffer; +//! # use bbqueue_ng::BBQueue; //! # //! // Create a buffer with six elements -//! let bb: BBBuffer<6> = BBBuffer::new(); +//! let bb: BBQueue<6> = BBQueue::new(); //! let (mut prod, mut cons) = bb.try_split().unwrap(); //! //! // Request space for one byte @@ -55,10 +55,10 @@ //! ## Static usage //! //! ```rust, no_run -//! # use bbqueue_ng::BBBuffer; +//! # use bbqueue_ng::BBQueue; //! # //! // Create a buffer with six elements -//! static BB: BBBuffer<6> = BBBuffer::new(); +//! static BB: BBQueue<6> = BBQueue::new(); //! //! fn main() { //! // Split the bbqueue into producer and consumer halves. @@ -102,8 +102,10 @@ //! most, so they should make no difference to most applications. #![cfg_attr(not(feature = "std"), no_std)] -#![deny(missing_docs)] -#![deny(warnings)] +// AJM: TODO - Restore +// #![deny(missing_docs)] +// #![deny(warnings)] +#![allow(dead_code, unused_imports)] mod bbbuffer; pub use bbbuffer::*; diff --git a/core/storage-notes.md b/core/storage-notes.md new file mode 100644 index 0000000..75d3ebd --- /dev/null +++ b/core/storage-notes.md @@ -0,0 +1,121 @@ +# BBQueue Storage Work + +## Use cases + +Current: "Embedded Use Case" + +* Statically allocate storage +* BBBuffer lives forever +* User uses Producer and Consumer + +Future: + +* Have the STORAGE for BBBuffer be provided seperately +* Allow for uses like: + * Statically allocated storage (like now) + * Heap Allocation provided storage (Arc, etc.) + * User provided storage (probably `unsafe`) + +## Sample Code for Use Cases + +### Static Buffer + +```rust + +static BB_QUEUE: BBQueue> = BBQueue::new(OwnedBBBuffer::new()); + +fn main() { + let (prod, cons) = BB_QUEUE.try_split().unwrap(); + // ... +} +``` + +### Heap Allocation provided storage + +Choice A: Simple + +```rust +fn main() { + // BBQueue> + // Producer>, Consumer> + // + // Storage is dropped when `prod` and `cons` are BOTH dropped. + let (prod, cons) = BBQueue::new_arc::<1024>(); +} +``` + +Choice B: Explicit + +```rust +fn main() { + // EDIT: This is sub-par, because this would require `arc_queue`, + // `prod`, and `storage` to ALL be dropped + // before the buffer is dropped. + let arc_queue: BBQueue> = BBStorage::new_arc(); + let (prod, cons) = arc_queue.try_split().unwrap(); +} +``` + +NOTE: This is not yet possible as of the current state of the repo. I do intent do support it. + +### User provided storage + +Choice A: Naive + +EDIT: Not this. See below + +```rust +static mut UNSAFE_BUFFER: [u8; 1024] = [0u8; 1024]; + +fn main() { + let borrowed = unsafe { + // TODO: Make sure BBQueue has lifetime shorter + // than `'borrowed` here? In this case it is + // 'static, but may not always be. + BBStorageBorrowed::new(&mut UNSAFE_BUFFER); + }; + let bbqueue = BBQueue::new(borrowed); + + // NOTE: This is NOT good, because the bound lifetime + // of prod and cons will be that of `bbqueue`, which + // is probably not suitable (non-'static). In many cases, we want + // the producer and consumer to also have `Producer<'static>` lifetime + let (prod, cons) = bbqueue.try_split().unwrap(); +} +``` + +Choice B: "loadable" storage? + +This would require EITHER: + +* The BBStorage methods are failable +* The split belongs to the BBStorage item + * (Could be an inherent or trait method) +* Loadable storage panics on a split if not loaded + +```rust +static mut UNSAFE_BUFFER: [u8; 1024] = [0u8; 1024]; +static LOADABLE_BORROWED: BBStorageLoadBorrow::new(); + +fn main() { + // This could probably be shortened to a single "store and take header" action. + // Done in multiple steps here for clarity. + let mut_buf = unsafe { + &mut UNSAFE_BUFFER + }; + let old = LOADABLE_BORROWED.store(); // -> Result> + // Result: Err if already taken + // Option: Some if other buffer already stored + assert_eq!(Ok(None), old); + + let bbqueue = BBQueue::new(LOADABLE_BORROWED.take_header().unwrap()); + + // Here prod and cons are <'static>, because LOADABLE_BORROWED is static. + // BUUUUUT we still probably allow access of BBStorage methods, which would be totally unsafe + // + // EDIT: Okay, sealing the trait DOES prevent outer usage, so we're good on this regard! + let (prod, cons) = bbqueue.try_split().unwrap(); +} +``` + +NOTE: This is not yet possible as of the current state of the repo. I do intent do support it. diff --git a/rust-toolchain b/rust-toolchain index c089857..65b2df8 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -nightly-2020-12-28 +beta