diff --git a/src/buf/byte.rs b/src/buf/byte.rs index 1e187dd92572e8a917147957f9b1cc0bd2eefb2d..106f574351d67e91e6d38283506cef029f576fef 100644 --- a/src/buf/byte.rs +++ b/src/buf/byte.rs @@ -9,6 +9,9 @@ use std::{cmp, ptr}; */ /// A `Buf` backed by a contiguous region of memory. +/// +/// This `Buf` is better suited for cases where there is a clear delineation +/// between reading and writing. pub struct ByteBuf { mem: alloc::MemRef, cap: u32, diff --git a/src/buf/mod.rs b/src/buf/mod.rs index be02ee1bc0f2c45f9e225d44aff2c41d7d2aa9e4..07de5f8b772237fb882e1fcd9662152171df6510 100644 --- a/src/buf/mod.rs +++ b/src/buf/mod.rs @@ -9,7 +9,7 @@ pub use self::ring::RingBuf; pub use self::slice::{SliceBuf, MutSliceBuf}; use {BufError, RopeBuf}; -use std::{cmp, fmt, io, ptr}; +use std::{cmp, fmt, io, ptr, usize}; /// A trait for values that provide sequential read access to bytes. pub trait Buf { @@ -312,6 +312,77 @@ impl fmt::Debug for Box<Buf+'static> { } } +impl Buf for io::Cursor<Vec<u8>> { + fn remaining(&self) -> usize { + self.get_ref().len() - self.position() as usize + } + + fn bytes(&self) -> &[u8] { + let pos = self.position() as usize; + &(&self.get_ref())[pos..] + } + + fn advance(&mut self, cnt: usize) { + let pos = self.position() as usize; + let pos = cmp::min(self.get_ref().len(), pos + cnt); + self.set_position(pos as u64); + } +} + +impl MutBuf for Vec<u8> { + fn remaining(&self) -> usize { + usize::MAX - self.len() + } + + fn advance(&mut self, cnt: usize) { + let len = self.len() + cnt; + + if len > self.capacity() { + // Reserve additional + // TODO: Should this case panic? + let cap = self.capacity(); + self.reserve(cap - len); + } + + unsafe { + self.set_len(len); + } + } + + fn mut_bytes(&mut self) -> &mut [u8] { + use std::slice; + + if self.capacity() == self.len() { + self.reserve(64); // Grow the vec + } + + let cap = self.capacity(); + let len = self.len(); + + unsafe { + let ptr = self.as_mut_ptr(); + &mut slice::from_raw_parts_mut(ptr, cap)[len..] + } + } +} + +impl<'a> Buf for io::Cursor<&'a [u8]> { + fn remaining(&self) -> usize { + self.get_ref().len() - self.position() as usize + } + + fn bytes(&self) -> &[u8] { + let pos = self.position() as usize; + &(&self.get_ref())[pos..] + } + + fn advance(&mut self, cnt: usize) { + let pos = self.position() as usize; + let pos = cmp::min(self.get_ref().len(), pos + cnt); + self.set_position(pos as u64); + } +} + /* * * ===== Read impls ===== diff --git a/src/buf/ring.rs b/src/buf/ring.rs index 57ec32dabebc14b84cb5ca0410a5af48e042dcee..a422093f4a25231e0a87ad37e56242e9a689c2ee 100644 --- a/src/buf/ring.rs +++ b/src/buf/ring.rs @@ -9,6 +9,8 @@ enum Mark { /// Buf backed by a continous chunk of memory. Maintains a read cursor and a /// write cursor. When reads and writes reach the end of the allocated buffer, /// wraps around to the start. +/// +/// This type is suited for use cases where reads and writes are intermixed. pub struct RingBuf { ptr: alloc::MemRef, // Pointer to the memory cap: usize, // Capacity of the buffer @@ -19,6 +21,7 @@ pub struct RingBuf { // TODO: There are most likely many optimizations that can be made impl RingBuf { + /// Allocates a new `RingBuf` with the specified capacity. pub fn new(mut capacity: usize) -> RingBuf { // Handle the 0 length buffer case if capacity == 0 { @@ -45,14 +48,17 @@ impl RingBuf { } } + /// Returns `true` if the buf cannot accept any further writes. pub fn is_full(&self) -> bool { self.cap == self.len } + /// Returns `true` if the buf cannot accept any further reads. pub fn is_empty(&self) -> bool { self.len == 0 } + /// Returns the number of bytes that the buf can hold. pub fn capacity(&self) -> usize { self.cap } @@ -85,10 +91,12 @@ impl RingBuf { } } + /// Returns the number of bytes remaining to read. fn read_remaining(&self) -> usize { self.len } + /// Returns the remaining write capacity until which the buf becomes full. fn write_remaining(&self) -> usize { self.cap - self.len } diff --git a/test/test_buf.rs b/test/test_buf.rs index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..93689ac020b608a11b60d01ad36436fa7860deda 100644 --- a/test/test_buf.rs +++ b/test/test_buf.rs @@ -0,0 +1,48 @@ +use bytes::{Buf, MutBuf, MutBufExt}; +use std::usize; +use std::io::{Cursor}; + +#[test] +pub fn test_fresh_cursor_vec() { + let mut buf = Cursor::new(b"hello".to_vec()); + + assert_eq!(buf.remaining(), 5); + assert_eq!(buf.bytes(), b"hello"); + + buf.advance(2); + + assert_eq!(buf.remaining(), 3); + assert_eq!(buf.bytes(), b"llo"); + + buf.advance(3); + + assert_eq!(buf.remaining(), 0); + assert_eq!(buf.bytes(), b""); + + buf.advance(1); + + assert_eq!(buf.remaining(), 0); + assert_eq!(buf.bytes(), b""); +} + +#[test] +pub fn test_vec_as_mut_buf() { + let mut buf = vec![]; + + assert_eq!(buf.remaining(), usize::MAX); + assert_eq!(buf.mut_bytes().len(), 64); + + buf.write(&b"zomg"[..]).unwrap(); + + assert_eq!(&buf, b"zomg"); + + assert_eq!(buf.remaining(), usize::MAX - 4); + assert_eq!(buf.capacity(), 64); + + for _ in 0..16 { + buf.write(&b"zomg"[..]).unwrap(); + } + + assert_eq!(buf.len(), 68); + assert_eq!(buf.capacity(), 128); +}