From 57e84f267b10f310eb752e733f524659594cf0fc Mon Sep 17 00:00:00 2001 From: Carl Lerche <me@carllerche.com> Date: Tue, 1 Nov 2016 08:14:31 -0700 Subject: [PATCH] Restructure and trim down the library This commit is a significant overhaul of the library in an effort to head towards a stable API. The rope implementation as well as a number of buffer implementations have been removed from the library and will live at https://github.com/carllerche/bytes-more while they incubate. **Bytes / BytesMut** `Bytes` is now an atomic ref counted byte slice. As it is contigous, it offers a richer API than before. `BytesMut` is a mutable variant. It is safe by ensuring that it is the only handle to a given byte slice. **AppendBuf -> ByteBuf** `AppendBuf` has been replaced by `ByteBuf`. The API is not identical, but is close enough to be considered a suitable replacement. **Removed types** The following types have been removed in favor of living in bytes-more * RingBuf * BlockBuf * `Bytes` as a rope implementation * ReadExt * WriteExt --- .travis.yml | 7 - Cargo.toml | 14 - bench/bench.rs | 49 --- deploy.sh | 18 - examples/into_buf.rs | 17 - src/alloc/heap.rs | 9 - src/alloc/mod.rs | 66 ---- src/alloc/pool.rs | 265 ------------- src/buf/byte.rs | 204 ++++++++++ src/{imp => }/buf/mod.rs | 404 ++++++++++---------- src/{imp => }/buf/slice.rs | 67 ++-- src/buf/take.rs | 182 +++++++++ src/bytes.rs | 522 ++++++++++++++++++++++++++ src/imp/alloc.rs | 68 ---- src/imp/buf/append.rs | 122 ------ src/imp/buf/block.rs | 367 ------------------ src/imp/buf/bound.rs | 55 --- src/imp/buf/ring.rs | 158 -------- src/imp/buf/take.rs | 69 ---- src/imp/bytes/mod.rs | 289 -------------- src/imp/bytes/rope.rs | 642 -------------------------------- src/imp/bytes/seq.rs | 78 ---- src/imp/bytes/small.rs | 81 ---- src/imp/mod.rs | 5 - src/lib.rs | 50 +-- test/test.rs | 27 -- test/test_append.rs | 30 -- test/test_block.rs | 22 -- test/test_buf_fill.rs | 48 --- test/test_bytes.rs | 42 --- test/test_pool.rs | 85 ----- test/test_ring.rs | 129 ------- test/test_rope.rs | 83 ----- test/test_seq.rs | 32 -- test/test_slice_buf.rs | 67 ---- test/test_small.rs | 32 -- {test => tests}/test_buf.rs | 27 +- tests/test_bytes.rs | 174 +++++++++ {test => tests}/test_mut_buf.rs | 24 +- tests/test_slice_buf.rs | 71 ++++ 40 files changed, 1419 insertions(+), 3282 deletions(-) delete mode 100644 bench/bench.rs delete mode 100644 deploy.sh delete mode 100644 examples/into_buf.rs delete mode 100644 src/alloc/heap.rs delete mode 100644 src/alloc/mod.rs delete mode 100644 src/alloc/pool.rs create mode 100644 src/buf/byte.rs rename src/{imp => }/buf/mod.rs (57%) rename src/{imp => }/buf/slice.rs (67%) create mode 100644 src/buf/take.rs create mode 100644 src/bytes.rs delete mode 100644 src/imp/alloc.rs delete mode 100644 src/imp/buf/append.rs delete mode 100644 src/imp/buf/block.rs delete mode 100644 src/imp/buf/bound.rs delete mode 100644 src/imp/buf/ring.rs delete mode 100644 src/imp/buf/take.rs delete mode 100644 src/imp/bytes/mod.rs delete mode 100644 src/imp/bytes/rope.rs delete mode 100644 src/imp/bytes/seq.rs delete mode 100644 src/imp/bytes/small.rs delete mode 100644 src/imp/mod.rs delete mode 100644 test/test.rs delete mode 100644 test/test_append.rs delete mode 100644 test/test_block.rs delete mode 100644 test/test_buf_fill.rs delete mode 100644 test/test_bytes.rs delete mode 100644 test/test_pool.rs delete mode 100644 test/test_ring.rs delete mode 100644 test/test_rope.rs delete mode 100644 test/test_seq.rs delete mode 100644 test/test_slice_buf.rs delete mode 100644 test/test_small.rs rename {test => tests}/test_buf.rs (68%) create mode 100644 tests/test_bytes.rs rename {test => tests}/test_mut_buf.rs (57%) create mode 100644 tests/test_slice_buf.rs diff --git a/.travis.yml b/.travis.yml index 85e4c58..6094ac4 100644 --- a/.travis.yml +++ b/.travis.yml @@ -9,10 +9,3 @@ rust: script: - cargo test - cargo doc --no-deps - -after_success: - - test $TRAVIS_PULL_REQUEST == "false" && test $TRAVIS_BRANCH == "master" && bash deploy.sh - -env: - global: - secure: "mBLJANLvtmyWCXw4zMquptqHQnws0pF+C/u4zL1Jfwz8T4UnUjmBUMxSOgSEIzrOM3qb+CTCjY2/j6BM21+/Zfdl8k8CvFWtkqQUPwIfrtwddCgI+P8Hlrk8G43drz/8XAbZ7dOl+Ovwhr0xnSD9ImfyXJec1kDWhubmgyt47Fs=" diff --git a/Cargo.toml b/Cargo.toml index b88cc19..6366eb9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,18 +19,4 @@ exclude = [ ] [dependencies] -log = "0.3.6" byteorder = "0.5.3" - -[dev-dependencies] -rand = "0.3.5" - -[[bench]] - -name = "bench" -path = "bench/bench.rs" - -[[test]] - -name = "test" -path = "test/test.rs" diff --git a/bench/bench.rs b/bench/bench.rs deleted file mode 100644 index d668fab..0000000 --- a/bench/bench.rs +++ /dev/null @@ -1,49 +0,0 @@ -#![feature(test)] - -use bytes::ByteBuf; -use bytes::alloc::Pool; -use test::Bencher; -use std::sync::Arc; - -extern crate bytes; -extern crate test; - -const SIZE:usize = 4_096; - -#[bench] -pub fn bench_allocate_arc_vec(b: &mut Bencher) { - b.iter(|| { - let mut v = Vec::with_capacity(200); - - for _ in 0..200 { - let buf = Arc::new(Vec::<u8>::with_capacity(SIZE)); - v.push(buf); - } - }); -} - -#[bench] -pub fn bench_allocate_byte_buf(b: &mut Bencher) { - b.iter(|| { - let mut v = Vec::with_capacity(200); - - for _ in 0..200 { - let buf = ByteBuf::mut_with_capacity(SIZE); - v.push(buf); - } - }); -} - -#[bench] -pub fn bench_allocate_with_pool(b: &mut Bencher) { - let mut pool = Pool::with_capacity(1_024, SIZE); - - b.iter(|| { - let mut v = Vec::with_capacity(200); - - for _ in 0..200 { - let buf = pool.new_byte_buf(); - v.push(buf); - } - }) -} diff --git a/deploy.sh b/deploy.sh deleted file mode 100644 index ad266a0..0000000 --- a/deploy.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -rev=$(git rev-parse --short HEAD) - -cd target/doc - -git init -git config user.name "Carl Lerche" -git config user.email "me@carllerche.com" - -git remote add upstream "https://$GH_TOKEN@github.com/carllerche/bytes" -git fetch upstream && git reset upstream/gh-pages - -touch . - -git add -A . -git commit -m "rebuild pages at ${rev}" -git push -q upstream HEAD:gh-pages diff --git a/examples/into_buf.rs b/examples/into_buf.rs deleted file mode 100644 index 753a5ad..0000000 --- a/examples/into_buf.rs +++ /dev/null @@ -1,17 +0,0 @@ -extern crate bytes; - -use bytes::{Buf, IntoBuf, Bytes}; - -pub fn dump<T>(data: &T) where - for<'a> &'a T: IntoBuf, -{ - let mut dst: Vec<u8> = vec![]; - data.into_buf().copy_to(&mut dst); - println!("GOT: {:?}", dst); -} - -pub fn main() { - let b = Bytes::from_slice(b"hello world"); - dump(&b); - dump(&b); -} diff --git a/src/alloc/heap.rs b/src/alloc/heap.rs deleted file mode 100644 index 2a8a73e..0000000 --- a/src/alloc/heap.rs +++ /dev/null @@ -1,9 +0,0 @@ -use alloc::{MemRef}; -use std::sync::Arc; - -pub unsafe fn allocate(len: usize) -> MemRef { - let mut v = Vec::with_capacity(len); - v.set_len(len); - - MemRef::new(Arc::new(v.into_boxed_slice())) -} diff --git a/src/alloc/mod.rs b/src/alloc/mod.rs deleted file mode 100644 index 5ab497c..0000000 --- a/src/alloc/mod.rs +++ /dev/null @@ -1,66 +0,0 @@ -//! Buffer allocation -//! -//! This module is currently not really in use - -mod heap; - -use std::sync::Arc; - -pub struct MemRef { - mem: Arc<Box<[u8]>>, -} - -/// Allocate a segment of memory and return a `MemRef`. -pub unsafe fn heap(len: usize) -> MemRef { - heap::allocate(len) -} - -impl MemRef { - #[inline] - pub unsafe fn new(mem: Arc<Box<[u8]>>) -> MemRef { - MemRef { mem: mem } - } - - #[inline] - pub fn len(&self) -> usize { - self.mem.len() - } - - #[inline] - pub unsafe fn bytes(&self) -> &[u8] { - &*self.mem - } - - #[inline] - pub unsafe fn bytes_slice(&self, start: usize, end: usize) -> &[u8] { - use std::slice; - let ptr = self.mem.as_ptr().offset(start as isize); - slice::from_raw_parts(ptr, end - start) - } - - #[inline] - pub unsafe fn mut_bytes(&mut self) -> &mut [u8] { - use std::slice; - let len = self.mem.len(); - slice::from_raw_parts_mut(self.mem.as_ptr() as *mut u8, len) - } - - /// Unsafe, unchecked access to the bytes - #[inline] - pub unsafe fn mut_bytes_slice(&mut self, start: usize, end: usize) -> &mut [u8] { - use std::slice; - let ptr = self.mem.as_ptr().offset(start as isize); - slice::from_raw_parts_mut(ptr as *mut u8, end - start) - } - - pub fn get_ref(&self) -> &Arc<Box<[u8]>> { - &self.mem - } -} - -impl Clone for MemRef { - #[inline] - fn clone(&self) -> MemRef { - MemRef { mem: self.mem.clone() } - } -} diff --git a/src/alloc/pool.rs b/src/alloc/pool.rs deleted file mode 100644 index cafacb8..0000000 --- a/src/alloc/pool.rs +++ /dev/null @@ -1,265 +0,0 @@ -use {AppendBuf, ByteBuf, MutByteBuf}; -use super::{Mem, MemRef}; -use stable_heap as heap; -use std::{mem, ptr, isize, usize}; -use std::cell::{Cell, UnsafeCell}; -use std::marker::PhantomData; -use std::sync::Arc; -use std::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering}; - -// TODO: ensure that not Sync -pub struct Pool { - inner: Arc<PoolInner>, - marker: PhantomData<Cell<()>>, -} - -struct PoolInner { - ptr: *mut u8, // Pointer to the raw memory - next: AtomicPtr<Entry>, - cap: usize, // Total number of entries - buf_len: usize, // Byte size of each byte buf - entry_len: usize, // Byte size of each entry -} - -struct Entry { - inner: UnsafeCell<Inner>, -} - -struct Inner { - pool: Option<Pool>, - refs: AtomicUsize, - next: *mut Entry, -} - -const MAX_REFCOUNT: usize = (isize::MAX) as usize; - -impl Pool { - /// Constructs a new `Pool` with with specified capacity such that each - /// buffer in the pool has a length of `buf_len`. - pub fn with_capacity(cap: usize, mut buf_len: usize) -> Pool { - // Ensure that all buffers have a power of 2 size. This enables - // optimizations in Buf implementations. - buf_len = buf_len.next_power_of_two(); - - let inner = Arc::new(PoolInner::with_capacity(cap, buf_len)); - - // Iterate each entry and initialize the memory - let mut next = ptr::null_mut(); - - for i in 0..cap { - unsafe { - let off = i * inner.entry_len; - let ptr = inner.ptr.offset(off as isize); - let e = &mut *(ptr as *mut Entry); - - ptr::write(&mut e.inner as *mut UnsafeCell<Inner>, UnsafeCell::new(Inner { - pool: None, - refs: AtomicUsize::new(0), - next: next, - })); - - next = ptr as *mut Entry; - - let ptr = ptr.offset(mem::size_of::<Entry>() as isize); - ptr::write(ptr as *mut &Mem, e as &Mem); - - let ptr = ptr.offset(mem::size_of::<&Mem>() as isize); - ptr::write(ptr as *mut usize, buf_len); - } - } - - // Set the next ptr to the head of the Entry linked list - inner.next.store(next, Ordering::Relaxed); - - Pool { - inner: inner, - marker: PhantomData, - } - } - - /// Returns the number of buffers that the `Pool` holds. - #[inline] - pub fn capacity(&self) -> usize { - self.inner.cap - } - - /// Returns the size of buffers allocated by the pool - #[inline] - pub fn buffer_len(&self) -> usize { - self.inner.buf_len - } - - /// Returns a new `ByteBuf` backed by a buffer from the pool. If the pool - /// is depleted, `None` is returned. - pub fn new_byte_buf(&self) -> Option<MutByteBuf> { - let len = self.inner.buf_len as u32; - self.checkout().map(|mem| { - let buf = unsafe { ByteBuf::from_mem_ref(mem, len, 0, len) }; - buf.flip() - }) - } - - pub fn new_append_buf(&self) -> Option<AppendBuf> { - let len = self.inner.buf_len as u32; - self.checkout().map(|mem| unsafe { AppendBuf::from_mem_ref(mem, len, 0) }) - } - - fn checkout(&self) -> Option<MemRef> { - unsafe { - let mut ptr = self.inner.next.load(Ordering::Acquire); - - loop { - if ptr.is_null() { - // The pool is depleted - return None; - } - - let inner = &*(*ptr).inner.get(); - - let next = inner.next; - - let res = self.inner.next.compare_and_swap(ptr, next, Ordering::AcqRel); - - if res == ptr { - break; - } - - ptr = res; - } - - let inner = &mut *(*ptr).inner.get(); - - // Unset next pointer & set the pool - inner.next = ptr::null_mut(); - inner.refs.store(1, Ordering::Relaxed); - inner.pool = Some(self.clone()); - - let ptr = ptr as *mut u8; - let ptr = ptr.offset(mem::size_of::<Entry>() as isize); - - Some(MemRef::new(ptr)) - } - } - - fn clone(&self) -> Pool { - Pool { - inner: self.inner.clone(), - marker: PhantomData, - } - } -} - -impl PoolInner { - fn with_capacity(cap: usize, buf_len: usize) -> PoolInner { - let ptr = unsafe { heap::allocate(alloc_len(cap, buf_len), align()) }; - - PoolInner { - ptr: ptr, - next: AtomicPtr::new(ptr::null_mut()), - cap: cap, - buf_len: buf_len, - entry_len: entry_len(buf_len), - } - } -} - -impl Drop for PoolInner { - fn drop(&mut self) { - unsafe { heap::deallocate(self.ptr, alloc_len(self.cap, self.buf_len), align()) } - } -} - -impl Entry { - fn release(&self) { - unsafe { - let inner = &mut *self.inner.get(); - let pool = inner.pool.take() - .expect("entry not associated with a pool"); - - let mut next = pool.inner.next.load(Ordering::Acquire); - - loop { - inner.next = next; - - let actual = pool.inner.next - .compare_and_swap(next, self as *const Entry as *mut Entry, Ordering::AcqRel); - - if actual == next { - break; - } - - next = actual; - } - } - } -} - -impl Mem for Entry { - fn ref_inc(&self) { - // Using a relaxed ordering is alright here, as knowledge of the - // original reference prevents other threads from erroneously deleting - // the object. - // - // As explained in the [Boost documentation][1], Increasing the - // reference counter can always be done with memory_order_relaxed: New - // references to an object can only be formed from an existing - // reference, and passing an existing reference from one thread to - // another must already provide any required synchronization. - // - // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) - let old_size = unsafe { - (*self.inner.get()).refs.fetch_add(1, Ordering::Relaxed) - }; - - // However we need to guard against massive refcounts in case someone - // is `mem::forget`ing Arcs. If we don't do this the count can overflow - // and users will use-after free. We racily saturate to `isize::MAX` on - // the assumption that there aren't ~2 billion threads incrementing - // the reference count at once. This branch will never be taken in - // any realistic program. - // - // We abort because such a program is incredibly degenerate, and we - // don't care to support it. - if old_size > MAX_REFCOUNT { - panic!("too many refs"); - } - } - - fn ref_dec(&self) { - unsafe { - let prev = (*self.inner.get()).refs.fetch_sub(1, Ordering::Release); - - if prev != 1 { - return; - } - } - - atomic::fence(Ordering::Acquire); - self.release(); - } -} - -// TODO: is there a better way to do this? -unsafe impl Send for Entry {} -unsafe impl Sync for Entry {} - -fn alloc_len(cap: usize, buf_len: usize) -> usize { - cap * entry_len(buf_len) -} - -fn entry_len(bytes_len: usize) -> usize { - let len = bytes_len + - mem::size_of::<Entry>() + - mem::size_of::<&Mem>() + - mem::size_of::<usize>(); - - if len & (align() - 1) == 0 { - len - } else { - (len & !align()) + align() - } -} - -fn align() -> usize { - mem::size_of::<usize>() -} diff --git a/src/buf/byte.rs b/src/buf/byte.rs new file mode 100644 index 0000000..ed73bbd --- /dev/null +++ b/src/buf/byte.rs @@ -0,0 +1,204 @@ +use {Buf, BufMut, BytesMut}; + +use std::{cmp, fmt}; + +/// A buffer backed by `BytesMut` +pub struct ByteBuf { + mem: BytesMut, + rd: usize, +} + +impl ByteBuf { + /// Create a new `ByteBuf` with 8kb capacity + pub fn new() -> ByteBuf { + ByteBuf::with_capacity(8 * 1024) + } + + /// Create a new `ByteBuf` with `cap` capacity + pub fn with_capacity(cap: usize) -> ByteBuf { + ByteBuf { + mem: BytesMut::with_capacity(cap), + rd: 0, + } + } + + /// Create a new `ByteBuf` backed by `bytes` + pub fn from_bytes(bytes: BytesMut) -> ByteBuf { + ByteBuf { + mem: bytes, + rd: 0, + } + } + + /// Create a new `ByteBuf` containing the given slice + pub fn from_slice<T: AsRef<[u8]>>(bytes: T) -> ByteBuf { + let mut buf = ByteBuf::with_capacity(bytes.as_ref().len()); + buf.copy_from_slice(bytes.as_ref()); + buf + } + + /// Return the number of bytes the buffer can contain + pub fn capacity(&self) -> usize { + self.mem.capacity() + } + + /// Return the read cursor position + pub fn position(&self) -> usize { + self.rd + } + + /// Set the read cursor position + pub fn set_position(&mut self, position: usize) { + assert!(position <= self.mem.len(), "position out of bounds"); + self.rd = position + } + + /// Return the number of buffered bytes + pub fn len(&self) -> usize { + self.mem.len() + } + + /// Returns `true` if the buffer contains no unread bytes + pub fn is_empty(&self) -> bool { + self.mem.is_empty() + } + + /// Clears the buffer, removing any written data + pub fn clear(&mut self) { + self.rd = 0; + unsafe { self.mem.set_len(0); } + } + + /// Splits the buffer into two at the current read index. + pub fn drain_read(&mut self) -> BytesMut { + let drained = self.mem.drain_to(self.rd); + self.rd = 0; + drained + } + + /// Splits the buffer into two at the given index. + pub fn drain_to(&mut self, at: usize) -> BytesMut { + let drained = self.mem.drain_to(at); + + if at >= self.rd { + self.rd = 0; + } else { + self.rd -= at; + } + + drained + } + + /// Reserves capacity for at least additional more bytes to be written in + /// the given `ByteBuf`. The `ByteBuf` may reserve more space to avoid + /// frequent reallocations. + pub fn reserve(&mut self, additional: usize) { + if self.remaining_mut() < additional { + let cap = cmp::max(self.capacity() * 2, self.len() + additional); + let cap = cap.next_power_of_two(); + + let mut new = ByteBuf::with_capacity(cap); + + new.copy_from_slice(self.mem.as_ref()); + new.rd = self.rd; + + *self = new; + } + } + + /// Reserves the minimum capacity for exactly additional more bytes to be + /// written in the given `ByteBuf`. Does nothing if the capacity is already + /// sufficient. + /// + /// Note that the allocator may give the collection more space than it + /// requests. Therefore capacity can not be relied upon to be precisely + /// minimal. Prefer reserve if future insertions are expected. + pub fn reserve_exact(&mut self, additional: usize) { + if self.remaining_mut() < additional { + let cap = self.len() + additional; + let mut new = ByteBuf::with_capacity(cap); + + new.copy_from_slice(self.mem.as_ref()); + new.rd = self.rd; + + *self = new; + } + } + + /// Gets a reference to the underlying `BytesMut` + pub fn get_ref(&self) -> &BytesMut { + &self.mem + } + + /// Unwraps the `ByteBuf`, returning the underlying `BytesMut` + pub fn into_inner(self) -> BytesMut { + self.mem + } +} + +impl Buf for ByteBuf { + fn remaining(&self) -> usize { + self.len() - self.rd + } + + fn bytes(&self) -> &[u8] { + &self.mem[self.rd..] + } + + fn advance(&mut self, cnt: usize) { + assert!(cnt <= self.remaining(), "buffer overflow"); + self.rd += cnt; + } + + fn copy_to_slice(&mut self, dst: &mut [u8]) { + assert!(self.remaining() >= dst.len()); + + let len = dst.len(); + dst.copy_from_slice(&self.bytes()[..len]); + self.rd += len; + } +} + +impl BufMut for ByteBuf { + fn remaining_mut(&self) -> usize { + self.capacity() - self.len() + } + + unsafe fn advance_mut(&mut self, cnt: usize) { + let new_len = self.len() + cnt; + self.mem.set_len(new_len); + } + + unsafe fn bytes_mut(&mut self) -> &mut [u8] { + let len = self.len(); + &mut self.mem.as_raw()[len..] + } + + fn copy_from_slice(&mut self, src: &[u8]) { + assert!(self.remaining_mut() >= src.len()); + + let len = src.len(); + + unsafe { + self.bytes_mut()[..len].copy_from_slice(src); + self.advance_mut(len); + } + } +} + +impl fmt::Debug for ByteBuf { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + self.bytes().fmt(fmt) + } +} + +impl fmt::Write for ByteBuf { + fn write_str(&mut self, s: &str) -> fmt::Result { + BufMut::put_str(self, s); + Ok(()) + } + + fn write_fmt(&mut self, args: fmt::Arguments) -> fmt::Result { + fmt::write(self, args) + } +} diff --git a/src/imp/buf/mod.rs b/src/buf/mod.rs similarity index 57% rename from src/imp/buf/mod.rs rename to src/buf/mod.rs index 04c4ed2..e280bfe 100644 --- a/src/imp/buf/mod.rs +++ b/src/buf/mod.rs @@ -1,14 +1,10 @@ -pub mod append; -pub mod block; -pub mod bound; +pub mod byte; pub mod slice; -pub mod ring; pub mod take; -use {Bytes}; -use buf::Take; +use {Bytes, Take, TakeMut}; use byteorder::ByteOrder; -use std::{cmp, fmt, io, ptr, usize}; +use std::{cmp, io, ptr, usize}; /// A trait for values that provide sequential read access to bytes. pub trait Buf { @@ -28,16 +24,18 @@ pub trait Buf { self.remaining() > 0 } - fn copy_to<S: Sink + ?Sized>(&mut self, dst: &mut S) -> usize - where Self: Sized { - let rem = self.remaining(); + /// Copies bytes from `self` into `dst` + /// + /// # Panics + /// + /// The function panics if `self` does not contain enough bytes to fill + /// `dst`. + fn copy_to<S: Sink + ?Sized>(&mut self, dst: &mut S) where Self: Sized { dst.sink(self); - rem - self.remaining() } - /// Read bytes from the `Buf` into the given slice and advance the cursor by - /// the number of bytes read. - /// Returns the number of bytes read. + /// Copies bytes from the `Buf` into the given slice and advance the cursor by + /// the number of bytes copied. /// /// ``` /// use std::io::Cursor; @@ -46,11 +44,15 @@ pub trait Buf { /// let mut buf = Cursor::new(b"hello world"); /// let mut dst = [0; 5]; /// - /// buf.read_slice(&mut dst); + /// buf.copy_to_slice(&mut dst); /// assert_eq!(b"hello", &dst); /// assert_eq!(6, buf.remaining()); /// ``` - fn read_slice(&mut self, dst: &mut [u8]) { + /// + /// # Panics + /// + /// This function panics if `self.remaining() < dst.len()` + fn copy_to_slice(&mut self, dst: &mut [u8]) { let mut off = 0; assert!(self.remaining() >= dst.len()); @@ -72,7 +74,7 @@ pub trait Buf { } } - /// Reads an unsigned 8 bit integer from the `Buf` without advancing the + /// Gets an unsigned 8 bit integer from the `Buf` without advancing the /// buffer cursor fn peek_u8(&self) -> Option<u8> { if self.has_remaining() { @@ -82,89 +84,89 @@ pub trait Buf { } } - /// Reads an unsigned 8 bit integer from the `Buf`. - fn read_u8(&mut self) -> u8 { + /// Gets an unsigned 8 bit integer from the `Buf`. + fn get_u8(&mut self) -> u8 { let mut buf = [0; 1]; - self.read_slice(&mut buf); + self.copy_to_slice(&mut buf); buf[0] } - /// Reads a signed 8 bit integer from the `Buf`. - fn read_i8(&mut self) -> i8 { + /// Gets a signed 8 bit integer from the `Buf`. + fn get_i8(&mut self) -> i8 { let mut buf = [0; 1]; - self.read_slice(&mut buf); + self.copy_to_slice(&mut buf); buf[0] as i8 } - /// Reads an unsigned 16 bit integer from the `Buf` - fn read_u16<T: ByteOrder>(&mut self) -> u16 { + /// Gets an unsigned 16 bit integer from the `Buf` + fn get_u16<T: ByteOrder>(&mut self) -> u16 { let mut buf = [0; 2]; - self.read_slice(&mut buf); + self.copy_to_slice(&mut buf); T::read_u16(&buf) } - /// Reads a signed 16 bit integer from the `Buf` - fn read_i16<T: ByteOrder>(&mut self) -> i16 { + /// Gets a signed 16 bit integer from the `Buf` + fn get_i16<T: ByteOrder>(&mut self) -> i16 { let mut buf = [0; 2]; - self.read_slice(&mut buf); + self.copy_to_slice(&mut buf); T::read_i16(&buf) } - /// Reads an unsigned 32 bit integer from the `Buf` - fn read_u32<T: ByteOrder>(&mut self) -> u32 { + /// Gets an unsigned 32 bit integer from the `Buf` + fn get_u32<T: ByteOrder>(&mut self) -> u32 { let mut buf = [0; 4]; - self.read_slice(&mut buf); + self.copy_to_slice(&mut buf); T::read_u32(&buf) } - /// Reads a signed 32 bit integer from the `Buf` - fn read_i32<T: ByteOrder>(&mut self) -> i32 { + /// Gets a signed 32 bit integer from the `Buf` + fn get_i32<T: ByteOrder>(&mut self) -> i32 { let mut buf = [0; 4]; - self.read_slice(&mut buf); + self.copy_to_slice(&mut buf); T::read_i32(&buf) } - /// Reads an unsigned 64 bit integer from the `Buf` - fn read_u64<T: ByteOrder>(&mut self) -> u64 { + /// Gets an unsigned 64 bit integer from the `Buf` + fn get_u64<T: ByteOrder>(&mut self) -> u64 { let mut buf = [0; 8]; - self.read_slice(&mut buf); + self.copy_to_slice(&mut buf); T::read_u64(&buf) } - /// Reads a signed 64 bit integer from the `Buf` - fn read_i64<T: ByteOrder>(&mut self) -> i64 { + /// Gets a signed 64 bit integer from the `Buf` + fn get_i64<T: ByteOrder>(&mut self) -> i64 { let mut buf = [0; 8]; - self.read_slice(&mut buf); + self.copy_to_slice(&mut buf); T::read_i64(&buf) } - /// Reads an unsigned n-bytes integer from the `Buf` - fn read_uint<T: ByteOrder>(&mut self, nbytes: usize) -> u64 { + /// Gets an unsigned n-bytes integer from the `Buf` + fn get_uint<T: ByteOrder>(&mut self, nbytes: usize) -> u64 { let mut buf = [0; 8]; - self.read_slice(&mut buf[..nbytes]); + self.copy_to_slice(&mut buf[..nbytes]); T::read_uint(&buf[..nbytes], nbytes) } - /// Reads a signed n-bytes integer from the `Buf` - fn read_int<T: ByteOrder>(&mut self, nbytes: usize) -> i64 { + /// Gets a signed n-bytes integer from the `Buf` + fn get_int<T: ByteOrder>(&mut self, nbytes: usize) -> i64 { let mut buf = [0; 8]; - self.read_slice(&mut buf[..nbytes]); + self.copy_to_slice(&mut buf[..nbytes]); T::read_int(&buf[..nbytes], nbytes) } - /// Reads a IEEE754 single-precision (4 bytes) floating point number from + /// Gets a IEEE754 single-precision (4 bytes) floating point number from /// the `Buf` - fn read_f32<T: ByteOrder>(&mut self) -> f32 { + fn get_f32<T: ByteOrder>(&mut self) -> f32 { let mut buf = [0; 4]; - self.read_slice(&mut buf); + self.copy_to_slice(&mut buf); T::read_f32(&buf) } - /// Reads a IEEE754 double-precision (8 bytes) floating point number from + /// Gets a IEEE754 double-precision (8 bytes) floating point number from /// the `Buf` - fn read_f64<T: ByteOrder>(&mut self) -> f64 { + fn get_f64<T: ByteOrder>(&mut self) -> f64 { let mut buf = [0; 8]; - self.read_slice(&mut buf); + self.copy_to_slice(&mut buf); T::read_f64(&buf) } @@ -175,7 +177,7 @@ pub trait Buf { /// Create an adapter which will limit at most `limit` bytes from it. fn take(self, limit: usize) -> Take<Self> where Self: Sized { - Take::new(self, limit) + take::new(self, limit) } /// Return a `Reader` for the value. Allows using a `Buf` as an `io::Read` @@ -185,61 +187,64 @@ pub trait Buf { } /// A trait for values that provide sequential write access to bytes. -pub trait MutBuf { +pub trait BufMut { - /// Returns the number of bytes that can be written to the MutBuf - fn remaining(&self) -> usize; + /// Returns the number of bytes that can be written to the BufMut + fn remaining_mut(&self) -> usize; - /// Advance the internal cursor of the MutBuf - unsafe fn advance(&mut self, cnt: usize); + /// Advance the internal cursor of the BufMut + unsafe fn advance_mut(&mut self, cnt: usize); /// Returns true iff there is any more space for bytes to be written - fn has_remaining(&self) -> bool { - self.remaining() > 0 + fn has_remaining_mut(&self) -> bool { + self.remaining_mut() > 0 } - /// Returns a mutable slice starting at the current MutBuf position and of - /// length between 0 and `MutBuf::remaining()`. + /// Returns a mutable slice starting at the current BufMut position and of + /// length between 0 and `BufMut::remaining()`. /// /// The returned byte slice may represent uninitialized memory. - unsafe fn mut_bytes(&mut self) -> &mut [u8]; + unsafe fn bytes_mut(&mut self) -> &mut [u8]; - fn copy_from<S: Source>(&mut self, src: S) -> usize - where Self: Sized { - let rem = self.remaining(); + /// Copies bytes from `src` into `self` + /// + /// # Panics + /// + /// Panics if `self` does not have enough capacity to copy all the data + /// from `src` + fn copy_from<S: Source>(&mut self, src: S) where Self: Sized { src.source(self); - rem - self.remaining() } - /// Write bytes from the given slice into the `MutBuf` and advance the + /// Copies bytes from the given slice into the `BufMut` and advance the /// cursor by the number of bytes written. /// Returns the number of bytes written. /// /// ``` - /// use bytes::MutBuf; + /// use bytes::BufMut; /// use std::io::Cursor; /// /// let mut dst = [0; 6]; /// /// { /// let mut buf = Cursor::new(&mut dst); - /// buf.write_slice(b"hello"); + /// buf.copy_from_slice(b"hello"); /// - /// assert_eq!(1, buf.remaining()); + /// assert_eq!(1, buf.remaining_mut()); /// } /// /// assert_eq!(b"hello\0", &dst); /// ``` - fn write_slice(&mut self, src: &[u8]) { + fn copy_from_slice(&mut self, src: &[u8]) { let mut off = 0; - assert!(self.remaining() >= src.len(), "buffer overflow"); + assert!(self.remaining_mut() >= src.len(), "buffer overflow"); while off < src.len() { let cnt; unsafe { - let dst = self.mut_bytes(); + let dst = self.bytes_mut(); cnt = cmp::min(dst.len(), src.len() - off); ptr::copy_nonoverlapping( @@ -251,113 +256,119 @@ pub trait MutBuf { } - unsafe { self.advance(cnt); } + unsafe { self.advance_mut(cnt); } } } - fn write_str(&mut self, src: &str) { - self.write_slice(src.as_bytes()); + /// Writes the given string into self. + /// + /// # Panics + /// + /// The function panics if `self` does not have enough remaining capacity + /// to write the full string. + fn put_str(&mut self, src: &str) { + self.copy_from_slice(src.as_bytes()); } - /// Writes an unsigned 8 bit integer to the MutBuf. - fn write_u8(&mut self, n: u8) { - self.write_slice(&[n]) + /// Writes an unsigned 8 bit integer to the BufMut. + fn put_u8(&mut self, n: u8) { + self.copy_from_slice(&[n]) } - /// Writes a signed 8 bit integer to the MutBuf. - fn write_i8(&mut self, n: i8) { - self.write_slice(&[n as u8]) + /// Writes a signed 8 bit integer to the BufMut. + fn put_i8(&mut self, n: i8) { + self.copy_from_slice(&[n as u8]) } - /// Writes an unsigned 16 bit integer to the MutBuf. - fn write_u16<T: ByteOrder>(&mut self, n: u16) { + /// Writes an unsigned 16 bit integer to the BufMut. + fn put_u16<T: ByteOrder>(&mut self, n: u16) { let mut buf = [0; 2]; T::write_u16(&mut buf, n); - self.write_slice(&buf) + self.copy_from_slice(&buf) } - /// Writes a signed 16 bit integer to the MutBuf. - fn write_i16<T: ByteOrder>(&mut self, n: i16) { + /// Writes a signed 16 bit integer to the BufMut. + fn put_i16<T: ByteOrder>(&mut self, n: i16) { let mut buf = [0; 2]; T::write_i16(&mut buf, n); - self.write_slice(&buf) + self.copy_from_slice(&buf) } - /// Writes an unsigned 32 bit integer to the MutBuf. - fn write_u32<T: ByteOrder>(&mut self, n: u32) { + /// Writes an unsigned 32 bit integer to the BufMut. + fn put_u32<T: ByteOrder>(&mut self, n: u32) { let mut buf = [0; 4]; T::write_u32(&mut buf, n); - self.write_slice(&buf) + self.copy_from_slice(&buf) } - /// Writes a signed 32 bit integer to the MutBuf. - fn write_i32<T: ByteOrder>(&mut self, n: i32) { + /// Writes a signed 32 bit integer to the BufMut. + fn put_i32<T: ByteOrder>(&mut self, n: i32) { let mut buf = [0; 4]; T::write_i32(&mut buf, n); - self.write_slice(&buf) + self.copy_from_slice(&buf) } - /// Writes an unsigned 64 bit integer to the MutBuf. - fn write_u64<T: ByteOrder>(&mut self, n: u64) { + /// Writes an unsigned 64 bit integer to the BufMut. + fn put_u64<T: ByteOrder>(&mut self, n: u64) { let mut buf = [0; 8]; T::write_u64(&mut buf, n); - self.write_slice(&buf) + self.copy_from_slice(&buf) } - /// Writes a signed 64 bit integer to the MutBuf. - fn write_i64<T: ByteOrder>(&mut self, n: i64) { + /// Writes a signed 64 bit integer to the BufMut. + fn put_i64<T: ByteOrder>(&mut self, n: i64) { let mut buf = [0; 8]; T::write_i64(&mut buf, n); - self.write_slice(&buf) + self.copy_from_slice(&buf) } - /// Writes an unsigned n-bytes integer to the MutBuf. + /// Writes an unsigned n-bytes integer to the BufMut. /// /// If the given integer is not representable in the given number of bytes, /// this method panics. If `nbytes > 8`, this method panics. - fn write_uint<T: ByteOrder>(&mut self, n: u64, nbytes: usize) { + fn put_uint<T: ByteOrder>(&mut self, n: u64, nbytes: usize) { let mut buf = [0; 8]; T::write_uint(&mut buf, n, nbytes); - self.write_slice(&buf[0..nbytes]) + self.copy_from_slice(&buf[0..nbytes]) } - /// Writes a signed n-bytes integer to the MutBuf. + /// Writes a signed n-bytes integer to the BufMut. /// /// If the given integer is not representable in the given number of bytes, /// this method panics. If `nbytes > 8`, this method panics. - fn write_int<T: ByteOrder>(&mut self, n: i64, nbytes: usize) { + fn put_int<T: ByteOrder>(&mut self, n: i64, nbytes: usize) { let mut buf = [0; 8]; T::write_int(&mut buf, n, nbytes); - self.write_slice(&buf[0..nbytes]) + self.copy_from_slice(&buf[0..nbytes]) } /// Writes a IEEE754 single-precision (4 bytes) floating point number to - /// the MutBuf. - fn write_f32<T: ByteOrder>(&mut self, n: f32) { + /// the BufMut. + fn put_f32<T: ByteOrder>(&mut self, n: f32) { let mut buf = [0; 4]; T::write_f32(&mut buf, n); - self.write_slice(&buf) + self.copy_from_slice(&buf) } /// Writes a IEEE754 double-precision (8 bytes) floating point number to - /// the MutBuf. - fn write_f64<T: ByteOrder>(&mut self, n: f64) { + /// the BufMut. + fn put_f64<T: ByteOrder>(&mut self, n: f64) { let mut buf = [0; 8]; T::write_f64(&mut buf, n); - self.write_slice(&buf) + self.copy_from_slice(&buf) } - /// Creates a "by reference" adaptor for this instance of MutBuf + /// Creates a "by reference" adaptor for this instance of BufMut fn by_ref(&mut self) -> &mut Self where Self: Sized { self } /// Create an adapter which will limit at most `limit` bytes from it. - fn take(self, limit: usize) -> Take<Self> where Self: Sized { - Take::new(self, limit) + fn take_mut(self, limit: usize) -> TakeMut<Self> where Self: Sized { + take::new_mut(self, limit) } - /// Return a `Write` for the value. Allows using a `MutBuf` as an + /// Return a `Write` for the value. Allows using a `BufMut` as an /// `io::Write` fn writer(self) -> Writer<Self> where Self: Sized { Writer::new(self) @@ -376,8 +387,10 @@ pub trait MutBuf { /// on the types themselves. For example, `IntoBuf` is implemented for `&'a /// Vec<u8>` and not `Vec<u8>` directly. pub trait IntoBuf { + /// The `Buf` type that `self` is being converted into type Buf: Buf; + /// Creates a `Buf` from a value. fn into_buf(self) -> Self::Buf; } @@ -439,45 +452,47 @@ impl<'a> IntoBuf for &'a () { */ -/// A value that writes bytes from itself into a `MutBuf`. +/// A value that writes bytes from itself into a `BufMut`. pub trait Source { /// Copy data from self into destination buffer - fn source<B: MutBuf>(self, buf: &mut B); + fn source<B: BufMut>(self, buf: &mut B); } impl<'a> Source for &'a [u8] { - fn source<B: MutBuf>(self, buf: &mut B) { - buf.write_slice(self); + fn source<B: BufMut>(self, buf: &mut B) { + buf.copy_from_slice(self); } } impl Source for u8 { - fn source<B: MutBuf>(self, buf: &mut B) { + fn source<B: BufMut>(self, buf: &mut B) { let src = [self]; - buf.write_slice(&src); + buf.copy_from_slice(&src); } } impl Source for Bytes { - fn source<B: MutBuf>(self, buf: &mut B) { - Source::source(&self, buf); + fn source<B: BufMut>(self, buf: &mut B) { + Source::source(self.as_ref(), buf); } } impl<'a> Source for &'a Bytes { - fn source<B: MutBuf>(self, buf: &mut B) { - Source::source(&mut self.buf(), buf); + fn source<B: BufMut>(self, buf: &mut B) { + Source::source(self.as_ref(), buf); } } impl<'a, T: Buf> Source for &'a mut T { - fn source<B: MutBuf>(mut self, buf: &mut B) { - while self.has_remaining() && buf.has_remaining() { + fn source<B: BufMut>(mut self, buf: &mut B) { + assert!(buf.remaining_mut() >= self.remaining()); + + while self.has_remaining() { let l; unsafe { let s = self.bytes(); - let d = buf.mut_bytes(); + let d = buf.bytes_mut(); l = cmp::min(s.len(), d.len()); ptr::copy_nonoverlapping( @@ -487,22 +502,24 @@ impl<'a, T: Buf> Source for &'a mut T { } self.advance(l); - unsafe { buf.advance(l); } + unsafe { buf.advance_mut(l); } } } } +/// A value that copies bytes from a `Buf` into itself pub trait Sink { + /// Copy bytes from `buf` into `self` fn sink<B: Buf>(&mut self, buf: &mut B); } impl Sink for [u8] { fn sink<B: Buf>(&mut self, buf: &mut B) { - buf.read_slice(self); + buf.copy_to_slice(self); } } -impl<T: MutBuf> Sink for T { +impl<T: BufMut> Sink for T { fn sink<B: Buf>(&mut self, buf: &mut B) { Source::source(buf, self) } @@ -550,32 +567,12 @@ impl<B: Buf + Sized> io::Read for Reader<B> { } } -/// Buffer related extension for `io::Read` -pub trait ReadExt { - fn read_buf<B: MutBuf>(&mut self, buf: &mut B) -> io::Result<usize>; -} - -impl<T: io::Read> ReadExt for T { - fn read_buf<B: MutBuf>(&mut self, buf: &mut B) -> io::Result<usize> { - if !buf.has_remaining() { - return Ok(0); - } - - unsafe { - let i = try!(self.read(buf.mut_bytes())); - - buf.advance(i); - Ok(i) - } - } -} - -/// Adapts a `MutBuf` to the `io::Write` trait +/// Adapts a `BufMut` to the `io::Write` trait pub struct Writer<B> { buf: B, } -impl<B: MutBuf> Writer<B> { +impl<B: BufMut> Writer<B> { /// Return a `Writer` for teh given `buf` pub fn new(buf: B) -> Writer<B> { Writer { buf: buf } @@ -591,15 +588,15 @@ impl<B: MutBuf> Writer<B> { &mut self.buf } - /// Unwraps this `Writer`, returning the underlying `MutBuf` + /// Unwraps this `Writer`, returning the underlying `BufMut` pub fn into_inner(self) -> B { self.buf } } -impl<B: MutBuf + Sized> io::Write for Writer<B> { +impl<B: BufMut + Sized> io::Write for Writer<B> { fn write(&mut self, src: &[u8]) -> io::Result<usize> { - let n = cmp::min(self.buf.remaining(), src.len()); + let n = cmp::min(self.buf.remaining_mut(), src.len()); self.buf.copy_from(&src[0..n]); Ok(n) @@ -610,29 +607,40 @@ impl<B: MutBuf + Sized> io::Write for Writer<B> { } } -/// Buffer related extension for `io::Write` -pub trait WriteExt { - fn write_buf<B: Buf>(&mut self, buf: &mut B) -> io::Result<usize>; -} - -impl<T: io::Write> WriteExt for T { - fn write_buf<B: Buf>(&mut self, buf: &mut B) -> io::Result<usize> { - if !buf.has_remaining() { - return Ok(0); - } - - let i = try!(self.write(buf.bytes())); - buf.advance(i); - Ok(i) - } -} - /* * * ===== Buf impls ===== * */ +impl<'a, T: Buf> Buf for &'a mut T { + fn remaining(&self) -> usize { + (**self).remaining() + } + + fn bytes(&self) -> &[u8] { + (**self).bytes() + } + + fn advance(&mut self, cnt: usize) { + (**self).advance(cnt) + } +} + +impl<'a, T: BufMut> BufMut for &'a mut T { + fn remaining_mut(&self) -> usize { + (**self).remaining_mut() + } + + unsafe fn bytes_mut(&mut self) -> &mut [u8] { + (**self).bytes_mut() + } + + unsafe fn advance_mut(&mut self, cnt: usize) { + (**self).advance_mut(cnt) + } +} + impl<T: AsRef<[u8]>> Buf for io::Cursor<T> { fn remaining(&self) -> usize { let len = self.get_ref().as_ref().len(); @@ -657,35 +665,34 @@ impl<T: AsRef<[u8]>> Buf for io::Cursor<T> { } } -impl<T: AsMut<[u8]> + AsRef<[u8]>> MutBuf for io::Cursor<T> { - - fn remaining(&self) -> usize { - Buf::remaining(self) +impl<T: AsMut<[u8]> + AsRef<[u8]>> BufMut for io::Cursor<T> { + fn remaining_mut(&self) -> usize { + self.remaining() } - /// Advance the internal cursor of the MutBuf - unsafe fn advance(&mut self, cnt: usize) { + /// Advance the internal cursor of the BufMut + unsafe fn advance_mut(&mut self, cnt: usize) { let pos = self.position() as usize; let pos = cmp::min(self.get_mut().as_mut().len(), pos + cnt); self.set_position(pos as u64); } - /// Returns a mutable slice starting at the current MutBuf position and of - /// length between 0 and `MutBuf::remaining()`. + /// Returns a mutable slice starting at the current BufMut position and of + /// length between 0 and `BufMut::remaining()`. /// /// The returned byte slice may represent uninitialized memory. - unsafe fn mut_bytes(&mut self) -> &mut [u8] { + unsafe fn bytes_mut(&mut self) -> &mut [u8] { let pos = self.position() as usize; &mut (self.get_mut().as_mut())[pos..] } } -impl MutBuf for Vec<u8> { - fn remaining(&self) -> usize { +impl BufMut for Vec<u8> { + fn remaining_mut(&self) -> usize { usize::MAX - self.len() } - unsafe fn advance(&mut self, cnt: usize) { + unsafe fn advance_mut(&mut self, cnt: usize) { let len = self.len() + cnt; if len > self.capacity() { @@ -698,7 +705,7 @@ impl MutBuf for Vec<u8> { self.set_len(len); } - unsafe fn mut_bytes(&mut self) -> &mut [u8] { + unsafe fn bytes_mut(&mut self) -> &mut [u8] { use std::slice; if self.capacity() == self.len() { @@ -712,22 +719,3 @@ impl MutBuf for Vec<u8> { &mut slice::from_raw_parts_mut(ptr, cap)[len..] } } - -/* - * - * ===== fmt impls ===== - * - */ - -pub struct Fmt<'a, B: 'a>(pub &'a mut B); - -impl<'a, B: MutBuf> fmt::Write for Fmt<'a, B> { - fn write_str(&mut self, s: &str) -> fmt::Result { - self.0.write_str(s); - Ok(()) - } - - fn write_fmt(&mut self, args: fmt::Arguments) -> fmt::Result { - fmt::write(self, args) - } -} diff --git a/src/imp/buf/slice.rs b/src/buf/slice.rs similarity index 67% rename from src/imp/buf/slice.rs rename to src/buf/slice.rs index b304de4..eee4e14 100644 --- a/src/imp/buf/slice.rs +++ b/src/buf/slice.rs @@ -1,7 +1,6 @@ //! A buffer backed by a contiguous region of memory. -use {Buf, MutBuf}; -use imp::alloc; +use {Buf, BufMut}; use std::fmt; /* @@ -23,24 +22,6 @@ pub struct SliceBuf<T = Box<[u8]>> { wr: usize, } -impl SliceBuf { - /// Constructs a new, empty `SliceBuf` with the specified capacity - /// - /// The `SliceBuf` will be backed by a `Box<[u8]>`. - pub fn with_capacity(capacity: usize) -> SliceBuf { - let mem = unsafe { alloc::with_capacity(capacity) }; - SliceBuf::new(mem) - } - - /// Create a new `SliceBuf` and copy the contents of the given slice into - /// it. - pub fn from_slice<T: AsRef<[u8]>>(bytes: &T) -> SliceBuf { - let mut buf = SliceBuf::with_capacity(bytes.as_ref().len()); - buf.write_slice(bytes.as_ref()); - buf - } -} - impl<T: AsRef<[u8]>> SliceBuf<T> { /// Creates a new `SliceBuf` wrapping the provided slice pub fn new(mem: T) -> SliceBuf<T> { @@ -81,24 +62,13 @@ impl<T: AsRef<[u8]>> SliceBuf<T> { pub fn clear(&mut self) { self.rd = 0; self.wr = 0; - } - - /// Return the number of bytes left to read - pub fn remaining_read(&self) -> usize { - self.wr - self.rd - } - - /// Return the remaining write capacity - pub fn remaining_write(&self) -> usize { - self.capacity() - self.wr - } -} + }} impl<T> Buf for SliceBuf<T> where T: AsRef<[u8]>, { fn remaining(&self) -> usize { - self.remaining_read() + self.wr - self.rd } fn bytes(&self) -> &[u8] { @@ -110,7 +80,7 @@ impl<T> Buf for SliceBuf<T> self.rd += cnt; } - fn read_slice(&mut self, dst: &mut [u8]) { + fn copy_to_slice(&mut self, dst: &mut [u8]) { assert!(self.remaining() >= dst.len()); let len = dst.len(); @@ -119,23 +89,25 @@ impl<T> Buf for SliceBuf<T> } } -impl<T> MutBuf for SliceBuf<T> +impl<T> BufMut for SliceBuf<T> where T: AsRef<[u8]> + AsMut<[u8]>, { - fn remaining(&self) -> usize { - self.remaining_write() + fn remaining_mut(&self) -> usize { + self.capacity() - self.wr } - unsafe fn advance(&mut self, cnt: usize) { - assert!(cnt <= self.remaining_write()); + unsafe fn advance_mut(&mut self, cnt: usize) { + assert!(cnt <= self.remaining_mut()); self.wr += cnt; } - unsafe fn mut_bytes(&mut self) -> &mut [u8] { + unsafe fn bytes_mut(&mut self) -> &mut [u8] { &mut self.mem.as_mut()[self.wr..] } - fn write_slice(&mut self, src: &[u8]) { + fn copy_from_slice(&mut self, src: &[u8]) { + assert!(self.remaining_mut() >= src.len()); + let wr = self.wr; self.mem.as_mut()[wr..wr+src.len()] @@ -152,3 +124,16 @@ impl<T> fmt::Debug for SliceBuf<T> self.bytes().fmt(fmt) } } + +impl<T> fmt::Write for SliceBuf<T> + where T: AsRef<[u8]> + AsMut<[u8]> +{ + fn write_str(&mut self, s: &str) -> fmt::Result { + BufMut::put_str(self, s); + Ok(()) + } + + fn write_fmt(&mut self, args: fmt::Arguments) -> fmt::Result { + fmt::write(self, args) + } +} diff --git a/src/buf/take.rs b/src/buf/take.rs new file mode 100644 index 0000000..f92a993 --- /dev/null +++ b/src/buf/take.rs @@ -0,0 +1,182 @@ +use {Buf, BufMut}; +use std::{cmp, fmt}; + +/// A buffer adapter which limits the bytes read from an underlying value. +#[derive(Debug)] +pub struct Take<T> { + inner: T, + limit: usize, +} + +/// A buffer adapter which limits the bytes written from an underlying value. +#[derive(Debug)] +pub struct TakeMut<T> { + inner: T, + limit: usize, +} + +pub fn new<T>(inner: T, limit: usize) -> Take<T> { + Take { + inner: inner, + limit: limit, + } +} + +pub fn new_mut<T>(inner: T, limit: usize) -> TakeMut<T> { + TakeMut { + inner: inner, + limit: limit, + } +} + +/* + * + * ===== impl Take ===== + * + */ + +impl<T> Take<T> { + /// Consumes this `Take`, returning the underlying value. + pub fn into_inner(self) -> T { + self.inner + } + + /// Gets a reference to the underlying value in this `Take`. + pub fn get_ref(&self) -> &T { + &self.inner + } + + /// Gets a mutable reference to the underlying value in this `Take`. + pub fn get_mut(&mut self) -> &mut T { + &mut self.inner + } + + /// Returns the maximum number of bytes that are made available from the + /// underlying value. + pub fn limit(&self) -> usize { + self.limit + } + + /// Sets the maximum number of bytes that are made available from the + /// underlying value. + pub fn set_limit(&mut self, lim: usize) { + self.limit = lim + } +} + +impl<T: Buf> Buf for Take<T> { + fn remaining(&self) -> usize { + cmp::min(self.inner.remaining(), self.limit) + } + + fn bytes(&self) -> &[u8] { + &self.inner.bytes()[..self.limit] + } + + fn advance(&mut self, cnt: usize) { + let cnt = cmp::min(cnt, self.limit); + self.limit -= cnt; + self.inner.advance(cnt); + } +} + +impl<T: BufMut> BufMut for Take<T> { + fn remaining_mut(&self) -> usize { + self.inner.remaining_mut() + } + + unsafe fn bytes_mut(&mut self) -> &mut [u8] { + self.inner.bytes_mut() + } + + unsafe fn advance_mut(&mut self, cnt: usize) { + self.inner.advance_mut(cnt) + } +} + +impl<T: BufMut> fmt::Write for Take<T> { + fn write_str(&mut self, s: &str) -> fmt::Result { + BufMut::put_str(self, s); + Ok(()) + } + + fn write_fmt(&mut self, args: fmt::Arguments) -> fmt::Result { + fmt::write(self, args) + } +} + +/* + * + * ===== impl TakeMut ===== + * + */ + +impl<T> TakeMut<T> { + /// Consumes this `TakeMut`, returning the underlying value. + pub fn into_inner(self) -> T { + self.inner + } + + /// Gets a reference to the underlying value in this `TakeMut`. + pub fn get_ref(&self) -> &T { + &self.inner + } + + /// Gets a mutable reference to the underlying value in this `TakeMut`. + pub fn get_mut(&mut self) -> &mut T { + &mut self.inner + } + + /// Returns the maximum number of bytes that are made available from the + /// underlying value. + pub fn limit(&self) -> usize { + self.limit + } + + /// Sets the maximum number of bytes that are made available from the + /// underlying value. + pub fn set_limit(&mut self, lim: usize) { + self.limit = lim + } +} + +impl<T: Buf> Buf for TakeMut<T> { + fn remaining(&self) -> usize { + self.inner.remaining() + } + + fn bytes(&self) -> &[u8] { + self.inner.bytes() + } + + fn advance(&mut self, cnt: usize) { + self.inner.advance(cnt) + } +} + +impl<T: BufMut> BufMut for TakeMut<T> { + fn remaining_mut(&self) -> usize { + cmp::min(self.inner.remaining_mut(), self.limit) + } + + unsafe fn bytes_mut(&mut self) -> &mut [u8] { + &mut self.inner.bytes_mut()[..self.limit] + } + + unsafe fn advance_mut(&mut self, cnt: usize) { + let cnt = cmp::min(cnt, self.limit); + self.limit -= cnt; + self.inner.advance_mut(cnt); + } +} + +impl<T: BufMut> fmt::Write for TakeMut<T> { + fn write_str(&mut self, s: &str) -> fmt::Result { + BufMut::put_str(self, s); + Ok(()) + } + + fn write_fmt(&mut self, args: fmt::Arguments) -> fmt::Result { + fmt::write(self, args) + } +} diff --git a/src/bytes.rs b/src/bytes.rs new file mode 100644 index 0000000..9a25688 --- /dev/null +++ b/src/bytes.rs @@ -0,0 +1,522 @@ +use ByteBuf; + +use std::cell::UnsafeCell; +use std::sync::Arc; +use std::{cmp, fmt, ops}; + +/// A reference counted slice of bytes. +/// +/// A `Bytes` is an immutable sequence of bytes. Given that it is guaranteed to +/// be immutable, `Bytes` is `Sync`, `Clone` is shallow (ref count increment), +/// and all operations only update views into the underlying data without +/// requiring any copies. +#[derive(Eq)] +pub struct Bytes { + inner: BytesMut, +} + +/// A unique reference to a slice of bytes. +/// +/// A `BytesMut` is a unique handle to a slice of bytes allowing mutation of +/// the underlying bytes. +pub struct BytesMut { + mem: Mem, + pos: usize, + len: usize, + cap: usize, +} + +struct Mem { + inner: Arc<UnsafeCell<Box<[u8]>>>, +} + +/* + * + * ===== Bytes ===== + * + */ + +impl Bytes { + /// Creates a new `Bytes` and copy the given slice into it. + pub fn from_slice<T: AsRef<[u8]>>(bytes: T) -> Bytes { + BytesMut::from_slice(bytes).freeze() + } + + /// Returns the number of bytes contained in this `Bytes`. + pub fn len(&self) -> usize { + self.inner.len() + } + + /// Returns true if the value contains no bytes + pub fn is_empty(&self) -> bool { + self.inner.is_empty() + } + + /// Returns the inner contents of this `Bytes` as a slice. + pub fn as_slice(&self) -> &[u8] { + self.as_ref() + } + + /// Extracts a new `Bytes` referencing the bytes from range [start, end). + pub fn slice(&self, start: usize, end: usize) -> Bytes { + let mut ret = self.clone(); + + ret.inner + .set_end(end) + .set_start(start); + + ret + } + + /// Extracts a new `Bytes` referencing the bytes from range [start, len). + pub fn slice_from(&self, start: usize) -> Bytes { + self.slice(start, self.len()) + } + + /// Extracts a new `Bytes` referencing the bytes from range [0, end). + pub fn slice_to(&self, end: usize) -> Bytes { + self.slice(0, end) + } + + /// Splits the bytes into two at the given index. + /// + /// Afterwards `self` contains elements `[0, at)`, and the returned `Bytes` + /// contains elements `[at, len)`. + /// + /// This is an O(1) operation that just increases the reference count and + /// sets a few indexes. + /// + /// # Panics + /// + /// Panics if `at > len` + pub fn split_off(&mut self, at: usize) -> Bytes { + self.inner.split_off(at).freeze() + } + + /// Splits the buffer into two at the given index. + /// + /// Afterwards `self` contains elements `[at, len)`, and the returned + /// `Bytes` contains elements `[0, at)`. + /// + /// This is an O(1) operation that just increases the reference count and + /// sets a few indexes. + /// + /// # Panics + /// + /// Panics if `at > len` + pub fn drain_to(&mut self, at: usize) -> Bytes { + self.inner.drain_to(at).freeze() + } + + /// Attempt to convert into a `BytesMut` handle. + /// + /// This will only succeed if there are no other outstanding references to + /// the underlying chunk of memory. + pub fn try_mut(mut self) -> Result<BytesMut, Bytes> { + if self.inner.mem.is_mut_safe() { + Ok(self.inner) + } else { + Err(self) + } + } + + /// Consumes handle, returning a new mutable handle + /// + /// The function attempts to avoid copying, however if it is unable to + /// obtain a unique reference to the underlying data, a new buffer is + /// allocated and the data is copied to it. + pub fn into_mut(self) -> BytesMut { + self.try_mut().unwrap_or_else(BytesMut::from_slice) + } +} + +impl Clone for Bytes { + fn clone(&self) -> Bytes { + Bytes { inner: self.inner.clone() } + } +} + +impl AsRef<[u8]> for Bytes { + fn as_ref(&self) -> &[u8] { + self.inner.as_ref() + } +} + +impl ops::Deref for Bytes { + type Target = [u8]; + + fn deref(&self) -> &[u8] { + self.as_ref() + } +} + +impl From<Vec<u8>> for Bytes { + fn from(src: Vec<u8>) -> Bytes { + BytesMut::from(src).freeze() + } +} + +impl<'a> From<&'a [u8]> for Bytes { + fn from(src: &'a [u8]) -> Bytes { + BytesMut::from(src).freeze() + } +} + +impl PartialEq for Bytes { + fn eq(&self, other: &Bytes) -> bool { + self.inner == other.inner + } +} + +impl fmt::Debug for Bytes { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&self.inner, fmt) + } +} + +unsafe impl Sync for Bytes {} + +/* + * + * ===== BytesMut ===== + * + */ + +impl BytesMut { + /// Create a new `BytesMut` with the specified capacity. + pub fn with_capacity(cap: usize) -> BytesMut { + BytesMut { + mem: Mem::with_capacity(cap), + pos: 0, + len: 0, + cap: cap, + } + } + + /// Creates a new `BytesMut` and copy the given slice into it. + pub fn from_slice<T: AsRef<[u8]>>(bytes: T) -> BytesMut { + let buf = ByteBuf::from_slice(bytes); + buf.into_inner() + } + + /// Returns the number of bytes contained in this `BytesMut`. + pub fn len(&self) -> usize { + self.len + } + + /// Returns true if the value contains no bytes + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Returns the total byte capacity of this `BytesMut` + pub fn capacity(&self) -> usize { + self.cap + } + + /// Return an immutable handle to the bytes + pub fn freeze(self) -> Bytes { + Bytes { inner: self } + } + + /// Splits the bytes into two at the given index. + /// + /// Afterwards `self` contains elements `[0, at)`, and the returned + /// `BytesMut` contains elements `[at, capacity)`. + /// + /// This is an O(1) operation that just increases the reference count and + /// sets a few indexes. + /// + /// # Panics + /// + /// Panics if `at > capacity` + pub fn split_off(&mut self, at: usize) -> BytesMut { + let mut other = self.clone(); + + other.set_start(at); + self.set_end(at); + + return other + } + + /// Splits the buffer into two at the given index. + /// + /// Afterwards `self` contains elements `[at, len)`, and the returned `BytesMut` + /// contains elements `[0, at)`. + /// + /// This is an O(1) operation that just increases the reference count and + /// sets a few indexes. + /// + /// # Panics + /// + /// Panics if `at > len` + pub fn drain_to(&mut self, at: usize) -> BytesMut { + let mut other = self.clone(); + + other.set_end(at); + self.set_start(at); + + return other + } + + /// Returns the inner contents of this `BytesMut` as a slice. + pub fn as_slice(&self) -> &[u8] { + self.as_ref() + } + + /// Returns the inner contents of this `BytesMut` as a mutable slice + /// + /// This a slice of bytes that have been initialized + pub fn as_mut(&mut self) -> &mut [u8] { + let end = self.pos + self.len; + &mut self.mem.as_mut()[self.pos..end] + } + + /// Sets the length of the buffer + /// + /// This will explicitly set the size of the buffer without actually + /// modifying the data, so it is up to the caller to ensure that the data + /// has been initialized. + /// + /// # Panics + /// + /// This method will panic if `len` is out of bounds for the underlying + /// slice or if it comes after the `end` of the configured window. + pub unsafe fn set_len(&mut self, len: usize) { + assert!(len <= self.cap); + self.len = len; + } + + /// Returns the inner contents of this `BytesMut` as a mutable slice + /// + /// This a slice of all bytes, including uninitialized memory + pub unsafe fn as_raw(&mut self) -> &mut [u8] { + let end = self.pos + self.cap; + &mut self.mem.as_mut()[self.pos..end] + } + + /// Changes the starting index of this window to the index specified. + /// + /// Returns the windows back to chain multiple calls to this method. + /// + /// # Panics + /// + /// This method will panic if `start` is out of bounds for the underlying + /// slice. + fn set_start(&mut self, start: usize) -> &mut BytesMut { + assert!(start <= self.cap); + self.pos += start; + + if self.len >= start { + self.len -= start; + } else { + self.len = 0; + } + + self.cap -= start; + self + } + + /// Changes the end index of this window to the index specified. + /// + /// Returns the windows back to chain multiple calls to this method. + /// + /// # Panics + /// + /// This method will panic if `start` is out of bounds for the underlying + /// slice. + fn set_end(&mut self, end: usize) -> &mut BytesMut { + assert!(end <= self.cap); + self.cap = end; + self.len = cmp::min(self.len, end); + self + } + + /// Increments the ref count. This should only be done if it is known that + /// it can be done safely. As such, this fn is not public, instead other + /// fns will use this one while maintaining the guarantees. + fn clone(&self) -> BytesMut { + BytesMut { + mem: self.mem.clone(), + .. *self + } + } +} + +impl AsRef<[u8]> for BytesMut { + fn as_ref(&self) -> &[u8] { + let end = self.pos + self.len; + &self.mem.as_ref()[self.pos..end] + } +} + +impl ops::Deref for BytesMut { + type Target = [u8]; + + fn deref(&self) -> &[u8] { + self.as_ref() + } +} + +impl ops::DerefMut for BytesMut { + fn deref_mut(&mut self) -> &mut [u8] { + self.as_mut() + } +} + +impl From<Vec<u8>> for BytesMut { + fn from(src: Vec<u8>) -> BytesMut { + let len = src.len(); + let cap = src.capacity(); + + BytesMut { + mem: Mem::from_vec(src), + pos: 0, + len: len, + cap: cap, + } + } +} + +impl<'a> From<&'a [u8]> for BytesMut { + fn from(src: &'a [u8]) -> BytesMut { + BytesMut::from_slice(src) + } +} + +impl PartialEq for BytesMut { + fn eq(&self, other: &BytesMut) -> bool { + **self == **other + } +} + +impl Eq for BytesMut { +} + +impl fmt::Debug for BytesMut { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(self.as_ref(), fmt) + } +} + +unsafe impl Send for BytesMut {} + +/* + * + * ===== Mem ===== + * + */ + +impl Mem { + fn with_capacity(cap: usize) -> Mem { + let mut vec = Vec::with_capacity(cap); + unsafe { vec.set_len(cap); } + + Mem { inner: Arc::new(UnsafeCell::new(vec.into_boxed_slice())) } + } + + fn from_vec(mut vec: Vec<u8>) -> Mem { + let cap = vec.capacity(); + unsafe { vec.set_len(cap); } + + Mem { inner: Arc::new(UnsafeCell::new(vec.into_boxed_slice())) } + } + + fn as_ref(&self) -> &[u8] { + unsafe { &*self.inner.get() } + } + + fn as_mut(&mut self) -> &mut [u8] { + unsafe { &mut *self.inner.get() } + } + + fn is_mut_safe(&mut self) -> bool { + Arc::get_mut(&mut self.inner).is_some() + } + + fn clone(&self) -> Mem { + Mem { inner: self.inner.clone() } + } +} + +/* + * + * ===== PartialEq ===== + * + */ + +impl PartialEq<[u8]> for BytesMut { + fn eq(&self, other: &[u8]) -> bool { + &**self == other + } +} + +impl PartialEq<BytesMut> for [u8] { + fn eq(&self, other: &BytesMut) -> bool { + *other == *self + } +} + +impl PartialEq<Vec<u8>> for BytesMut { + fn eq(&self, other: &Vec<u8>) -> bool { + *self == &other[..] + } +} + +impl PartialEq<BytesMut> for Vec<u8> { + fn eq(&self, other: &BytesMut) -> bool { + *other == *self + } +} + +impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut + where BytesMut: PartialEq<T> +{ + fn eq(&self, other: &&'a T) -> bool { + *self == **other + } +} + +impl<'a> PartialEq<BytesMut> for &'a [u8] { + fn eq(&self, other: &BytesMut) -> bool { + *other == *self + } +} + +impl PartialEq<[u8]> for Bytes { + fn eq(&self, other: &[u8]) -> bool { + self.inner == *other + } +} + +impl PartialEq<Bytes> for [u8] { + fn eq(&self, other: &Bytes) -> bool { + *other == *self + } +} + +impl PartialEq<Vec<u8>> for Bytes { + fn eq(&self, other: &Vec<u8>) -> bool { + *self == &other[..] + } +} + +impl PartialEq<Bytes> for Vec<u8> { + fn eq(&self, other: &Bytes) -> bool { + *other == *self + } +} + +impl<'a> PartialEq<Bytes> for &'a [u8] { + fn eq(&self, other: &Bytes) -> bool { + *other == *self + } +} + +impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes + where Bytes: PartialEq<T> +{ + fn eq(&self, other: &&'a T) -> bool { + *self == **other + } +} diff --git a/src/imp/alloc.rs b/src/imp/alloc.rs deleted file mode 100644 index fe38c7b..0000000 --- a/src/imp/alloc.rs +++ /dev/null @@ -1,68 +0,0 @@ -#![allow(warnings)] - -use std::sync::Arc; - -/// A sequential chunk of memory that is atomically reference counted. -pub struct Mem { - mem: Arc<Box<[u8]>>, -} - -pub unsafe fn with_capacity(mut capacity: usize) -> Box<[u8]> { - // Round up to the next power of two - capacity = capacity.next_power_of_two(); - - let mut v: Vec<u8> = Vec::with_capacity(capacity); - v.set_len(capacity); - v.into_boxed_slice() -} - -impl Mem { - /// Return a new `Mem` with the given capacity - pub unsafe fn with_capacity(capacity: usize) -> Mem { - let mem = Arc::new(with_capacity(capacity)); - Mem { mem: mem } - } - - pub unsafe fn from_boxed(src: Arc<Box<[u8]>>) -> Mem { - Mem { mem: src } - } - - /// Returns the length in bytes - pub fn len(&self) -> usize { - self.mem.len() - } - - /// View of the underlying memory. - /// - /// The memory could be uninitialized. - pub unsafe fn bytes(&self) -> &[u8] { - &*self.mem - } - - /// View of a range of the underlying memory. - /// - /// The offsets are not checked and the memory could be uninitialized. - pub unsafe fn slice(&self, start: usize, end: usize) -> &[u8] { - use std::slice; - let ptr = self.mem.as_ptr().offset(start as isize); - slice::from_raw_parts(ptr, end - start) - } - - /// Mutable view of the underlying memory. - /// - /// The memory could be uninitialized. - pub unsafe fn mut_bytes(&mut self) -> &mut [u8] { - use std::slice; - let len = self.mem.len(); - slice::from_raw_parts_mut(self.mem.as_ptr() as *mut u8, len) - } - - /// Mutable view of a range of the underlying memory. - /// - /// The offsets are not checked and the memory could be uninitialized. - pub unsafe fn mut_bytes_slice(&mut self, start: usize, end: usize) -> &mut [u8] { - use std::slice; - let ptr = self.mem.as_ptr().offset(start as isize); - slice::from_raw_parts_mut(ptr as *mut u8, end - start) - } -} diff --git a/src/imp/buf/append.rs b/src/imp/buf/append.rs deleted file mode 100644 index d671f18..0000000 --- a/src/imp/buf/append.rs +++ /dev/null @@ -1,122 +0,0 @@ -use {alloc, MutBuf, Bytes}; -use std::cell::Cell; - -/// A `Buf` backed by a contiguous region of memory. -/// -/// This buffer can only be written to once. Byte strings (immutable views) can -/// be created at any time, not just when the writing is complete. -pub struct AppendBuf { - mem: alloc::MemRef, - rd: Cell<u32>, // Read cursor - wr: u32, // Write cursor - cap: u32, -} - -impl AppendBuf { - pub fn with_capacity(mut capacity: u32) -> AppendBuf { - // Round the capacity to the closest power of 2 - capacity = capacity.next_power_of_two(); - - unsafe { - // Allocate the memory - let mem = alloc::heap(capacity as usize); - - AppendBuf::from_mem_ref(mem, capacity, 0) - } - } - - pub unsafe fn from_mem_ref(mem: alloc::MemRef, cap: u32, pos: u32) -> AppendBuf { - AppendBuf { - mem: mem, - rd: Cell::new(pos), - wr: pos, - cap: cap, - } - } - - #[inline] - pub fn len(&self) -> usize { - (self.wr - self.rd.get()) as usize - } - - #[inline] - pub fn capacity(&self) -> usize { - (self.cap - self.rd.get()) as usize - } - - pub fn bytes(&self) -> &[u8] { - let rd = self.rd.get() as usize; - let wr = self.wr as usize; - unsafe { &self.mem.bytes_slice(rd, wr) } - } - - pub fn shift(&self, n: usize) -> Bytes { - let ret = self.slice(0, n); - self.rd.set(self.rd.get() + ret.len() as u32); - assert!(self.rd.get() <= self.wr, "buffer overflow"); - ret - } - - pub fn drop(&self, n: usize) { - assert!(n <= self.len()); - self.rd.set(self.rd.get() + n as u32); - } - - pub fn slice(&self, begin: usize, end: usize) -> Bytes { - // TODO: Fix overflow potential - - let rd = self.rd.get(); - let wr = self.wr; - - let begin = begin as u32 + rd; - let end = end as u32 + rd; - - assert!(begin <= end && end <= wr, "invalid range"); - - Bytes::from_boxed(self.mem.get_ref().clone(), begin as usize, (end - begin) as usize) - } -} - -impl MutBuf for AppendBuf { - #[inline] - fn remaining(&self) -> usize { - (self.cap - self.wr) as usize - } - - #[inline] - fn has_remaining(&self) -> bool { - // Implemented as an equality for the perfz - self.cap != self.wr - } - - #[inline] - unsafe fn advance(&mut self, cnt: usize) { - self.wr += cnt as u32; - - if self.wr > self.cap { - panic!("buffer overflow"); - } - } - - #[inline] - unsafe fn mut_bytes(&mut self) -> &mut [u8] { - let wr = self.wr as usize; - let cap = self.cap as usize; - self.mem.mut_bytes_slice(wr, cap) - } -} - -impl AsRef<[u8]> for AppendBuf { - fn as_ref(&self) -> &[u8] { - self.bytes() - } -} - -impl From<AppendBuf> for Bytes { - fn from(src: AppendBuf) -> Bytes { - let rd = src.rd.get(); - let wr = src.wr; - - Bytes::from_boxed(src.mem.get_ref().clone(), rd as usize, (wr - rd) as usize) - } -} diff --git a/src/imp/buf/block.rs b/src/imp/buf/block.rs deleted file mode 100644 index b6b7049..0000000 --- a/src/imp/buf/block.rs +++ /dev/null @@ -1,367 +0,0 @@ -#![allow(warnings)] - -use {alloc, Buf, MutBuf, Bytes}; -use buf::AppendBuf; -use std::{cmp, ptr, slice}; -use std::io::Cursor; -use std::rc::Rc; -use std::collections::{vec_deque, VecDeque}; - -/// Append only buffer backed by a chain of `AppendBuf` buffers. -/// -/// Each `AppendBuf` block is of a fixed size and allocated on demand. This -/// makes the total capacity of a `BlockBuf` potentially much larger than what -/// is currently allocated. -pub struct BlockBuf { - len: usize, - cap: usize, - blocks: VecDeque<AppendBuf>, - new_block: NewBlock, -} - -enum NewBlock { - Heap(usize), - // Pool(Rc<Pool>), -} - -pub struct BlockBufCursor<'a> { - rem: usize, - blocks: vec_deque::Iter<'a, AppendBuf>, - curr: Option<Cursor<&'a [u8]>>, -} - -// TODO: -// -// - Add `comapct` fn which moves all buffered data into one block. -// - Add `slice` fn which returns `Bytes` for arbitrary views into the Buf -// -impl BlockBuf { - /// Create BlockBuf - pub fn new(max_blocks: usize, block_size: usize) -> BlockBuf { - assert!(max_blocks > 1, "at least 2 blocks required"); - - let new_block = NewBlock::Heap(block_size); - - BlockBuf { - len: 0, - cap: max_blocks * new_block.block_size(), - blocks: VecDeque::with_capacity(max_blocks), - new_block: new_block, - } - } - - /// Returns the number of buffered bytes - #[inline] - pub fn len(&self) -> usize { - debug_assert_eq!(self.len, self.blocks.iter().map(|b| b.len()).fold(0, |a, b| a+b)); - self.len - } - - /// Returns true if there are no buffered bytes - #[inline] - pub fn is_empty(&self) -> bool { - return self.len() == 0 - } - - /// Returns a `Buf` for the currently buffered bytes. - #[inline] - pub fn buf(&self) -> BlockBufCursor { - let mut iter = self.blocks.iter(); - - // Get the next leaf node buffer - let block = iter.next() - .map(|block| Cursor::new(block.bytes())); - - BlockBufCursor { - rem: self.len(), - blocks: iter, - curr: block, - } - } - - /// Consumes `n` buffered bytes, returning them as an immutable `Bytes` - /// value. - /// - /// # Panics - /// - /// Panics if `n` is greater than the number of buffered bytes. - #[inline] - pub fn shift(&mut self, n: usize) -> Bytes { - trace!("BlockBuf::shift; n={}", n); - - // Fast path - match self.blocks.len() { - 0 => { - assert!(n == 0, "buffer overflow"); - Bytes::empty() - } - 1 => { - let (ret, pop) = { - let block = self.blocks.front().expect("unexpected state"); - - let ret = block.shift(n); - self.len -= n; - - (ret, self.len == 0 && !MutBuf::has_remaining(block)) - }; - - if pop { - let _ = self.blocks.pop_front(); - } - - ret - } - _ => { - self.shift_multi(n) - } - } - } - - fn shift_multi(&mut self, mut n: usize) -> Bytes { - let mut ret: Option<Bytes> = None; - - while n > 0 { - if !self.have_buffered_data() { - panic!("shift len out of buffered range"); - } - - let (segment, pop) = { - let block = self.blocks.front().expect("unexpected state"); - - - let block_len = block.len(); - let segment_n = cmp::min(n, block_len); - n -= segment_n; - self.len -= segment_n; - - let pop = block_len == segment_n && !MutBuf::has_remaining(block); - - (block.shift(segment_n), pop) - }; - - if pop { - let _ = self.blocks.pop_front(); - } - - ret = Some(match ret.take() { - Some(curr) => { - curr.concat(segment) - } - None => segment, - }); - - } - - ret.unwrap_or_else(|| Bytes::empty()) - } - - /// Drop the first `n` buffered bytes - /// - /// # Panics - /// - /// Panics if `n` is greater than the number of buffered bytes. - pub fn drop(&mut self, mut n: usize) { - while n > 0 { - if !self.have_buffered_data() { - panic!("shift len out of buffered range"); - } - - let pop = { - let block = self.blocks.front().expect("unexpected state"); - - let segment_n = cmp::min(n, block.len()); - n -= segment_n; - self.len -= segment_n; - - block.drop(segment_n); - - block.len() == 0 - }; - - if pop { - let _ = self.blocks.pop_front(); - } - } - } - - pub fn is_compact(&mut self) -> bool { - self.blocks.len() <= 1 - } - - /// Moves all buffered bytes into a single block. - /// - /// # Panics - /// - /// Panics if the buffered bytes cannot fit in a single block. - pub fn compact(&mut self) { - trace!("BlockBuf::compact; attempting compaction"); - - if self.can_compact() { - trace!("BlockBuf::compact; data not aligned at start -- compacting"); - - let mut compacted = self.new_block.new_block() - .expect("unable to allocate block"); - - for block in self.blocks.drain(..) { - compacted.write_slice(block.bytes()); - } - - assert!(self.blocks.is_empty(), "blocks not removed"); - - self.blocks.push_back(compacted); - } - } - - #[inline] - fn can_compact(&self) -> bool { - if self.blocks.len() > 1 { - return true; - } - - self.blocks.front() - .map(|b| b.capacity() != self.new_block.block_size()) - .unwrap_or(false) - } - - /// Return byte slice if bytes are in sequential memory - #[inline] - pub fn bytes(&self) -> Option<&[u8]> { - match self.blocks.len() { - 0 => Some(unsafe { slice::from_raw_parts(ptr::null(), 0) }), - 1 => self.blocks.front().map(|b| b.bytes()), - _ => None, - } - } - - #[inline] - fn block_size(&self) -> usize { - self.new_block.block_size() - } - - #[inline] - fn allocate_block(&mut self) { - if let Some(block) = self.new_block.new_block() { - // Store the block - self.blocks.push_back(block); - } - } - - #[inline] - fn have_buffered_data(&self) -> bool { - self.len() > 0 - } - - #[inline] - fn needs_alloc(&self) -> bool { - if let Some(buf) = self.blocks.back() { - // `unallocated_blocks` is checked here because if further blocks - // cannot be allocated, an empty slice should be returned. - if MutBuf::has_remaining(buf) { - return false; - } - } - - true - } -} - -impl MutBuf for BlockBuf { - #[inline] - fn remaining(&self) -> usize { - // TODO: Ensure that the allocator has enough capacity to provide the - // remaining bytes - self.cap - self.len - } - - #[inline] - fn has_remaining(&self) -> bool { - // TODO: Ensure that the allocator has enough capacity to provide the - // remaining bytes - self.cap != self.len - } - - unsafe fn advance(&mut self, cnt: usize) { - trace!("BlockBuf::advance; cnt={:?}", cnt); - - // `mut_bytes` only returns bytes from the last block, thus it should - // only be possible to advance the last block - if let Some(buf) = self.blocks.back_mut() { - self.len += cnt; - buf.advance(cnt); - } - } - - #[inline] - unsafe fn mut_bytes(&mut self) -> &mut [u8] { - if self.needs_alloc() { - if self.blocks.len() != self.blocks.capacity() { - self.allocate_block() - } - } - - self.blocks.back_mut() - .map(|buf| buf.mut_bytes()) - .unwrap_or(slice::from_raw_parts_mut(ptr::null_mut(), 0)) - } -} - -impl Default for BlockBuf { - fn default() -> BlockBuf { - BlockBuf::new(16, 8_192) - } -} - -impl<'a> Buf for BlockBufCursor<'a> { - fn remaining(&self) -> usize { - self.rem - } - - fn bytes(&self) -> &[u8] { - self.curr.as_ref() - .map(|buf| Buf::bytes(buf)) - .unwrap_or(unsafe { slice::from_raw_parts(ptr::null(), 0)}) - } - - fn advance(&mut self, mut cnt: usize) { - cnt = cmp::min(cnt, self.rem); - - // Advance the internal cursor - self.rem -= cnt; - - // Advance the leaf buffer - while cnt > 0 { - { - let curr = self.curr.as_mut() - .expect("expected a value"); - - if curr.remaining() > cnt { - curr.advance(cnt); - break; - } - - cnt -= curr.remaining(); - } - - self.curr = self.blocks.next() - .map(|block| Cursor::new(block.bytes())); - } - } -} - -impl NewBlock { - #[inline] - fn block_size(&self) -> usize { - match *self { - NewBlock::Heap(size) => size, - // NewBlock::Pool(ref pool) => pool.buffer_len(), - } - } - - #[inline] - fn new_block(&self) -> Option<AppendBuf> { - match *self { - NewBlock::Heap(size) => Some(AppendBuf::with_capacity(size as u32)), - // NewBlock::Pool(ref pool) => pool.new_append_buf(), - } - } -} diff --git a/src/imp/buf/bound.rs b/src/imp/buf/bound.rs deleted file mode 100644 index 66cc2ca..0000000 --- a/src/imp/buf/bound.rs +++ /dev/null @@ -1,55 +0,0 @@ -use {Buf, IntoBuf}; -use std::mem; - -/// Takes a `T` that can be iterated as a buffer and provides buffer with a -/// 'static lifetime -pub struct BoundBuf<T> - where T: 'static, - &'static T: IntoBuf -{ - data: T, // This should never be mutated - buf: <&'static T as IntoBuf>::Buf, // This buf should never leak out -} - -impl<T> BoundBuf<T> - where &'static T: IntoBuf, -{ - /// Creates a new `BoundBuf` wrapping the provided data - pub fn new(data: T) -> BoundBuf<T> { - let buf = unsafe { - let r: &'static T = mem::transmute(&data); - r.into_buf() - }; - - BoundBuf { - data: data, - buf: buf, - } - } - - /// Consumes this BoundBuf, returning the underlying value. - pub fn into_inner(self) -> T { - self.data - } - - /// Gets a reference to the underlying value - pub fn get_ref(&self) -> &T { - &self.data - } -} - -impl<T> Buf for BoundBuf<T> - where &'static T: IntoBuf -{ - fn remaining(&self) -> usize { - self.buf.remaining() - } - - fn bytes(&self) -> &[u8] { - self.buf.bytes() - } - - fn advance(&mut self, cnt: usize) { - self.buf.advance(cnt) - } -} diff --git a/src/imp/buf/ring.rs b/src/imp/buf/ring.rs deleted file mode 100644 index 6e6630c..0000000 --- a/src/imp/buf/ring.rs +++ /dev/null @@ -1,158 +0,0 @@ -use {Buf, MutBuf}; -use imp::alloc; -use std::fmt; - - -/// `RingBuf` is backed by contiguous memory and writes may wrap. -/// -/// When writing reaches the end of the memory, writing resume at the beginning -/// of the memory. Writes may never overwrite pending reads. -pub struct RingBuf<T = Box<[u8]>> { - // Contiguous memory - mem: T, - // Current read position - rd: u64, - // Current write position - wr: u64, - // Mask used to convert the cursor to an offset - mask: u64, -} - -impl RingBuf { - /// Allocates a new `RingBuf` with the specified capacity. - pub fn with_capacity(capacity: usize) -> RingBuf { - let mem = unsafe { alloc::with_capacity(capacity) }; - RingBuf::new(mem) - } -} - -impl<T: AsRef<[u8]>> RingBuf<T> { - /// Creates a new `RingBuf` wrapping the provided slice - pub fn new(mem: T) -> RingBuf<T> { - // Ensure that the memory chunk provided has a length that is a power - // of 2 - let len = mem.as_ref().len() as u64; - let mask = len - 1; - - assert!(len & mask == 0, "mem length must be power of two"); - - RingBuf { - mem: mem, - rd: 0, - wr: 0, - mask: mask, - } - } - - /// Returns the number of bytes that the buf can hold. - pub fn capacity(&self) -> usize { - self.mem.as_ref().len() - } - - /// Return the read cursor position - pub fn position(&self) -> u64 { - self.rd - } - - /// Set the read cursor position - pub fn set_position(&mut self, position: u64) { - assert!(position <= self.wr && position + self.capacity() as u64 >= self.wr, - "position out of bounds"); - self.rd = position; - } - - /// Return the number of buffered bytes - pub fn len(&self) -> usize { - if self.wr >= self.capacity() as u64 { - (self.rd - (self.wr - self.capacity() as u64)) as usize - } else { - self.rd as usize - } - } - - /// Returns `true` if the buf cannot accept any further reads. - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Resets all internal state to the initial state. - pub fn clear(&mut self) { - self.rd = 0; - self.wr = 0; - } - - /// Returns the number of bytes remaining to read. - pub fn remaining_read(&self) -> usize { - (self.wr - self.rd) as usize - } - - /// Returns the remaining write capacity until which the buf becomes full. - pub fn remaining_write(&self) -> usize { - self.capacity() - self.remaining_read() - } -} - -impl<T: AsRef<[u8]>> fmt::Debug for RingBuf<T> { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "RingBuf[.. {}]", self.len()) - } -} - -impl<T: AsRef<[u8]>> Buf for RingBuf<T> { - fn remaining(&self) -> usize { - self.remaining_read() - } - - fn bytes(&self) -> &[u8] { - // This comparison must be performed in order to differentiate between - // the at capacity case and the empty case. - if self.wr > self.rd { - let a = (self.rd & self.mask) as usize; - let b = (self.wr & self.mask) as usize; - - println!("a={:?}; b={:?}, wr={:?}; rd={:?}", a, b, self.wr, self.rd); - - if b > a { - &self.mem.as_ref()[a..b] - } else { - &self.mem.as_ref()[a..] - } - } else { - &[] - } - } - - fn advance(&mut self, cnt: usize) { - assert!(cnt <= self.remaining_read(), "buffer overflow"); - self.rd += cnt as u64 - } -} - -impl<T> MutBuf for RingBuf<T> - where T: AsRef<[u8]> + AsMut<[u8]>, -{ - fn remaining(&self) -> usize { - self.remaining_write() - } - - unsafe fn advance(&mut self, cnt: usize) { - assert!(cnt <= self.remaining_write(), "buffer overflow"); - self.wr += cnt as u64; - } - - unsafe fn mut_bytes(&mut self) -> &mut [u8] { - let a = (self.wr & self.mask) as usize; - - if self.wr > self.rd { - let b = (self.rd & self.mask) as usize; - - if a >= b { - &mut self.mem.as_mut()[a..] - } else { - &mut self.mem.as_mut()[a..b] - } - } else { - &mut self.mem.as_mut()[a..] - } - } -} diff --git a/src/imp/buf/take.rs b/src/imp/buf/take.rs deleted file mode 100644 index e9f760c..0000000 --- a/src/imp/buf/take.rs +++ /dev/null @@ -1,69 +0,0 @@ -use {Buf, MutBuf}; -use std::{cmp}; - -#[derive(Debug)] -pub struct Take<T> { - inner: T, - limit: usize, -} - -impl<T> Take<T> { - pub fn new(inner: T, limit: usize) -> Take<T> { - Take { - inner: inner, - limit: limit, - } - } - - pub fn into_inner(self) -> T { - self.inner - } - - pub fn get_ref(&self) -> &T { - &self.inner - } - - pub fn get_mut(&mut self) -> &mut T { - &mut self.inner - } - - pub fn limit(&self) -> usize { - self.limit - } - - pub fn set_limit(&mut self, lim: usize) { - self.limit = lim - } -} - -impl<T: Buf> Buf for Take<T> { - fn remaining(&self) -> usize { - cmp::min(self.inner.remaining(), self.limit) - } - - fn bytes(&self) -> &[u8] { - &self.inner.bytes()[..self.limit] - } - - fn advance(&mut self, cnt: usize) { - let cnt = cmp::min(cnt, self.limit); - self.limit -= cnt; - self.inner.advance(cnt); - } -} - -impl<T: MutBuf> MutBuf for Take<T> { - fn remaining(&self) -> usize { - cmp::min(self.inner.remaining(), self.limit) - } - - unsafe fn mut_bytes(&mut self) -> &mut [u8] { - &mut self.inner.mut_bytes()[..self.limit] - } - - unsafe fn advance(&mut self, cnt: usize) { - let cnt = cmp::min(cnt, self.limit); - self.limit -= cnt; - self.inner.advance(cnt); - } -} diff --git a/src/imp/bytes/mod.rs b/src/imp/bytes/mod.rs deleted file mode 100644 index 05c9efc..0000000 --- a/src/imp/bytes/mod.rs +++ /dev/null @@ -1,289 +0,0 @@ -pub mod rope; -pub mod seq; -pub mod small; - -use {Buf, IntoBuf}; -use self::seq::Seq; -use self::small::Small; -use self::rope::{Rope, RopeBuf}; -use std::{cmp, fmt, ops}; -use std::io::Cursor; -use std::sync::Arc; - -/// An immutable sequence of bytes -#[derive(Clone)] -pub struct Bytes { - kind: Kind, -} - -#[derive(Clone)] -enum Kind { - Seq(Seq), - Small(Small), - Rope(Arc<Rope>), -} - -pub struct BytesBuf<'a> { - kind: BufKind<'a>, -} - -enum BufKind<'a> { - Cursor(Cursor<&'a [u8]>), - Rope(RopeBuf<'a>), -} - -impl Bytes { - /// Return an empty `Bytes` - pub fn empty() -> Bytes { - Bytes { kind: Kind::Small(Small::empty()) } - } - - pub fn from_slice<T: AsRef<[u8]>>(slice: T) -> Bytes { - Small::from_slice(slice.as_ref()) - .map(|b| Bytes { kind: Kind::Small(b)}) - .unwrap_or_else(|| Seq::from_slice(slice.as_ref())) - } - - pub fn from_vec(mem: Vec<u8>) -> Bytes { - let pos = 0; - let len = mem.len(); - - Small::from_slice(&mem[..]) - .map(|b| Bytes { kind: Kind::Small(b) }) - .unwrap_or_else(|| { - let seq = Seq::new(Arc::new(mem.into_boxed_slice()), pos, len); - Bytes { kind: Kind::Seq(seq) } - }) - } - - /// Creates a new `Bytes` from an `Arc<Box<[u8]>>`, an offset, and a length. - #[inline] - pub fn from_boxed(mem: Arc<Box<[u8]>>, pos: usize, len: usize) -> Bytes { - // Check ranges - assert!(pos + len <= mem.len(), "invalid arguments"); - - Small::from_slice(&mem[pos..pos + len]) - .map(|b| Bytes { kind: Kind::Small(b) }) - .unwrap_or_else(|| { - let seq = Seq::new(mem, pos, len); - Bytes { kind: Kind::Seq(seq) } - }) - } - - pub fn buf(&self) -> BytesBuf { - let kind = match self.kind { - Kind::Seq(ref v) => BufKind::Cursor(v.buf()), - Kind::Small(ref v) => BufKind::Cursor(v.buf()), - Kind::Rope(ref v) => BufKind::Rope(v.buf()), - }; - - BytesBuf { kind: kind } - } - - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - pub fn len(&self) -> usize { - match self.kind { - Kind::Seq(ref v) => v.len(), - Kind::Small(ref v) => v.len(), - Kind::Rope(ref v) => v.len(), - } - } - - /// Concatenate two `Bytes` together - pub fn concat(self, other: Bytes) -> Bytes { - Rope::concat(self, other) - } - - /// Divide one `Bytes` into two at an index - pub fn split_at(self, mid: usize) -> (Bytes, Bytes) { - (self.slice_to(mid), self.slice_from(mid)) - } - - /// Returns a new ByteStr value containing the byte range between `begin` - /// (inclusive) and `end` (exclusive) - pub fn slice(&self, begin: usize, end: usize) -> Bytes { - match self.kind { - Kind::Seq(ref v) => v.slice(begin, end), - Kind::Small(ref v) => v.slice(begin, end), - Kind::Rope(ref v) => v.slice(begin, end), - } - } - - /// Returns a new ByteStr value containing the byte range starting from - /// `begin` (inclusive) to the end of the byte str. - /// - /// Equivalent to `bytes.slice(begin, bytes.len())` - pub fn slice_from(&self, begin: usize) -> Bytes { - self.slice(begin, self.len()) - } - - /// Returns a new ByteStr value containing the byte range from the start up - /// to `end` (exclusive). - /// - /// Equivalent to `bytes.slice(0, end)` - pub fn slice_to(&self, end: usize) -> Bytes { - self.slice(0, end) - } - - /// Returns the Rope depth - fn depth(&self) -> u16 { - match self.kind { - Kind::Rope(ref r) => r.depth(), - _ => 0, - } - } - - fn into_rope(self) -> Result<Arc<Rope>, Bytes> { - match self.kind { - Kind::Rope(r) => Ok(r), - _ => Err(self), - } - } -} - -impl<'a> From<&'a [u8]> for Bytes { - fn from(src: &'a [u8]) -> Bytes { - Bytes::from_slice(src) - } -} - -impl From<Vec<u8>> for Bytes { - fn from(src: Vec<u8>) -> Bytes { - let mem = Arc::new(src.into_boxed_slice()); - let len = mem.len(); - - Bytes::from_boxed(mem, 0, len) - } -} - -impl ops::Index<usize> for Bytes { - type Output = u8; - - fn index(&self, index: usize) -> &u8 { - match self.kind { - Kind::Seq(ref v) => v.index(index), - Kind::Small(ref v) => v.index(index), - Kind::Rope(ref v) => v.index(index), - } - } -} - -impl cmp::PartialEq<Bytes> for Bytes { - fn eq(&self, other: &Bytes) -> bool { - if self.len() != other.len() { - return false; - } - - let mut buf1 = self.buf(); - let mut buf2 = self.buf(); - - while buf1.has_remaining() { - let len; - - { - let b1 = buf1.bytes(); - let b2 = buf2.bytes(); - - len = cmp::min(b1.len(), b2.len()); - - if b1[..len] != b2[..len] { - return false; - } - } - - buf1.advance(len); - buf2.advance(len); - } - - true - } - - fn ne(&self, other: &Bytes) -> bool { - return !self.eq(other) - } -} - -impl<'a> IntoBuf for &'a Bytes { - type Buf = BytesBuf<'a>; - - fn into_buf(self) -> Self::Buf { - self.buf() - } -} - -/* - * - * ===== BytesBuf ===== - * - */ - -impl<'a> Buf for BytesBuf<'a> { - fn remaining(&self) -> usize { - match self.kind { - BufKind::Cursor(ref v) => v.remaining(), - BufKind::Rope(ref v) => v.remaining(), - } - } - - fn bytes(&self) -> &[u8] { - match self.kind { - BufKind::Cursor(ref v) => v.bytes(), - BufKind::Rope(ref v) => v.bytes(), - } - } - - fn advance(&mut self, cnt: usize) { - match self.kind { - BufKind::Cursor(ref mut v) => v.advance(cnt), - BufKind::Rope(ref mut v) => v.advance(cnt), - } - } -} - - -/* - * - * ===== Internal utilities ===== - * - */ - -impl fmt::Debug for Bytes { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - let mut buf = self.buf(); - - try!(write!(fmt, "Bytes[len={}; ", self.len())); - - let mut rem = 128; - - while buf.has_remaining() { - let byte = buf.read_u8(); - - if rem > 0 { - if is_ascii(byte) { - try!(write!(fmt, "{}", byte as char)); - } else { - try!(write!(fmt, "\\x{:02X}", byte)); - } - - rem -= 1; - } else { - try!(write!(fmt, " ... ")); - break; - } - } - - try!(write!(fmt, "]")); - - Ok(()) - } -} - -fn is_ascii(byte: u8) -> bool { - match byte { - 10 | 13 | 32...126 => true, - _ => false, - } -} diff --git a/src/imp/bytes/rope.rs b/src/imp/bytes/rope.rs deleted file mode 100644 index 18bdb47..0000000 --- a/src/imp/bytes/rope.rs +++ /dev/null @@ -1,642 +0,0 @@ -use {Buf, MutBuf, Bytes}; -use super::seq::Seq; -use super::small::{Small}; -use buf::{Source, AppendBuf}; -use std::{cmp, ops}; -use std::io::Cursor; -use std::sync::Arc; - -// The implementation is mostly a port of the implementation found in the Java -// protobuf lib. - -const CONCAT_BY_COPY_LEN: usize = 128; -const MAX_DEPTH: usize = 47; - -// Used to decide when to rebalance the tree. -static MIN_LENGTH_BY_DEPTH: [usize; MAX_DEPTH] = [ - 1, 2, 3, 5, 8, - 13, 21, 34, 55, 89, - 144, 233, 377, 610, 987, - 1_597, 2_584, 4_181, 6_765, 10_946, - 17_711, 28_657, 46_368, 75_025, 121_393, - 196_418, 317_811, 514_229, 832_040, 1_346_269, - 2_178_309, 3_524_578, 5_702_887, 9_227_465, 14_930_352, - 24_157_817, 39_088_169, 63_245_986, 102_334_155, 165_580_141, - 267_914_296, 433_494_437, 701_408_733, 1_134_903_170, 1_836_311_903, - 2_971_215_073, 4_294_967_295]; - -/// An immutable sequence of bytes formed by concatenation of other `ByteStr` -/// values, without copying the data in the pieces. The concatenation is -/// represented as a tree whose leaf nodes are each a `Bytes` value. -/// -/// Most of the operation here is inspired by the now-famous paper [Ropes: an -/// Alternative to Strings. hans-j. boehm, russ atkinson and michael -/// plass](http://www.cs.rit.edu/usr/local/pub/jeh/courses/QUARTERS/FP/Labs/CedarRope/rope-paper.pdf). -/// -/// Fundamentally the Rope algorithm represents the collection of pieces as a -/// binary tree. BAP95 uses a Fibonacci bound relating depth to a minimum -/// sequence length, sequences that are too short relative to their depth cause -/// a tree rebalance. More precisely, a tree of depth d is "balanced" in the -/// terminology of BAP95 if its length is at least F(d+2), where F(n) is the -/// n-the Fibonacci number. Thus for depths 0, 1, 2, 3, 4, 5,... we have -/// minimum lengths 1, 2, 3, 5, 8, 13,... -#[derive(Clone)] -pub struct Rope { - left: Node, - right: Node, - depth: u16, - len: usize, -} - -pub struct RopeBuf<'a> { - // Number of bytes left to iterate - rem: usize, - - // Iterates all the leaf nodes in order - nodes: NodeIter<'a>, - - // Current leaf node buffer - leaf_buf: Option<Cursor<&'a [u8]>>, -} - -#[derive(Clone)] -enum Node { - Empty, - Seq(Seq), - Small(Small), - Rope(Arc<Rope>), -} - -// TODO: store stack inline if possible -struct NodeIter<'a> { - stack: Vec<&'a Rope>, - next: Option<&'a Node>, -} - -/// Balance operation state -struct Balance { - stack: Vec<Partial>, -} - -/// Temporarily detached branch -enum Partial { - Bytes(Bytes), - Node(Node), -} - -impl Rope { - fn new<N1: Into<Node>, N2: Into<Node>>(left: N1, right: N2) -> Rope { - let left = left.into(); - let right = right.into(); - - debug_assert!(!left.is_empty() || right.is_empty()); - - // If left is 0 then right must be zero - let len = left.len() + right.len(); - let depth = cmp::max(left.depth(), right.depth()) + 1; - - Rope { - left: left, - right: right, - depth: depth, - len: len, - } - } - - pub fn buf(&self) -> RopeBuf { - let mut nodes = NodeIter::new(self); - - // Get the next leaf node buffer - let leaf_buf = nodes.next() - .map(|node| node.leaf_buf()); - - RopeBuf { - rem: self.len(), - nodes: nodes, - leaf_buf: leaf_buf, - } - } - - /// Concat two `Bytes` together. - pub fn concat(left: Bytes, right: Bytes) -> Bytes { - if right.is_empty() { - return left; - } - - if left.is_empty() { - return right; - } - - let len = left.len() + right.len(); - - if len < CONCAT_BY_COPY_LEN { - return concat_bytes(&left, &right, len); - } - - let left = match left.into_rope() { - Ok(left) => { - let len = left.right.len() + right.len(); - - if len < CONCAT_BY_COPY_LEN { - // Optimization from BAP95: As an optimization of the case - // where the ByteString is constructed by repeated concatenate, - // recognize the case where a short string is concatenated to a - // left-hand node whose right-hand branch is short. In the - // paper this applies to leaves, but we just look at the length - // here. This has the advantage of shedding references to - // unneeded data when substrings have been taken. - // - // When we recognize this case, we do a copy of the data and - // create a new parent node so that the depth of the result is - // the same as the given left tree. - let new_right = concat_bytes(&left.right, &right, len); - - return Rope::new(left.left.clone(), new_right).into_bytes(); - } - - if left.left.depth() > left.right.depth() && left.depth > right.depth() { - // Typically for concatenate-built strings the left-side is - // deeper than the right. This is our final attempt to - // concatenate without increasing the tree depth. We'll redo - // the the node on the RHS. This is yet another optimization - // for building the string by repeatedly concatenating on the - // right. - let new_right = Rope::new(left.right.clone(), right); - - return Rope::new(left.left.clone(), new_right).into_bytes(); - } - - Bytes { kind: super::Kind::Rope(left) } - } - Err(left) => left, - }; - - // Fine, we'll add a node and increase the tree depth -- unless we - // rebalance ;^) - let depth = cmp::max(left.depth(), right.depth()) + 1; - - if len >= MIN_LENGTH_BY_DEPTH[depth as usize] { - // No need to rebalance - return Rope::new(left, right).into_bytes(); - } - - Balance::new().balance(left, right).into() - } - - pub fn depth(&self) -> u16 { - self.depth - } - - pub fn len(&self) -> usize { - self.len as usize - } - - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - pub fn slice(&self, begin: usize, end: usize) -> Bytes { - // Assert args - assert!(begin <= end && end <= self.len(), "invalid range"); - - let len = end - begin; - - // Empty slice - if len == 0 { - return Bytes::empty(); - } - - // Full rope - if len == self.len() { - return self.clone().into_bytes(); - } - - // == Proper substring == - - let left_len = self.left.len(); - - if end <= left_len { - // Slice on the left - return self.left.slice(begin, end); - } - - if begin >= left_len { - // Slice on the right - return self.right.slice(begin - left_len, end - left_len); - } - - // Split slice - let left_slice = self.left.slice(begin, self.left.len()); - let right_slice = self.right.slice(0, end - left_len); - - Rope::new(left_slice, right_slice).into_bytes() - } - - fn into_bytes(self) -> Bytes { - use super::Kind; - Bytes { kind: Kind::Rope(Arc::new(self)) } - } -} - -impl Node { - fn len(&self) -> usize { - match *self { - Node::Seq(ref b) => b.len(), - Node::Small(ref b) => b.len(), - Node::Rope(ref b) => b.len, - Node::Empty => 0, - } - } - - fn is_empty(&self) -> bool { - self.len() == 0 - } - - fn depth(&self) -> u16 { - match *self { - Node::Rope(ref r) => r.depth, - _ => 0, - } - } - - fn slice(&self, begin: usize, end: usize) -> Bytes { - match *self { - Node::Seq(ref v) => v.slice(begin, end), - Node::Small(ref v) => v.slice(begin, end), - Node::Rope(ref v) => v.slice(begin, end), - Node::Empty => unreachable!(), - } - } - - fn leaf_buf(&self) -> Cursor<&[u8]> { - match *self { - Node::Seq(ref v) => v.buf(), - Node::Small(ref v) => v.buf(), - _ => unreachable!(), - } - } - - fn as_rope(&self) -> Option<&Rope> { - match *self { - Node::Rope(ref v) => Some(&**v), - _ => None, - } - } -} - -impl<'a> Source for &'a Node { - fn source<B: MutBuf>(self, buf: &mut B) { - match *self { - Node::Seq(ref b) => b.as_slice().source(buf), - Node::Small(ref b) => b.as_ref().source(buf), - Node::Rope(ref b) => b.buf().source(buf), - Node::Empty => unreachable!(), - } - } -} - -impl From<Bytes> for Node { - fn from(src: Bytes) -> Node { - use super::Kind; - - match src.kind { - Kind::Seq(b) => Node::Seq(b), - Kind::Small(b) => Node::Small(b), - Kind::Rope(b) => Node::Rope(b), - } - } -} - -impl From<Rope> for Node { - fn from(src: Rope) -> Node { - Node::Rope(Arc::new(src)) - } -} - -impl ops::Index<usize> for Rope { - type Output = u8; - - fn index(&self, index: usize) -> &u8 { - assert!(index < self.len()); - - let left_len = self.left.len(); - - if index < left_len { - self.left.index(index) - } else { - self.right.index(index - left_len) - } - } -} - -impl ops::Index<usize> for Node { - type Output = u8; - - fn index(&self, index: usize) -> &u8 { - match *self { - Node::Seq(ref v) => v.index(index), - Node::Small(ref v) => v.index(index), - Node::Rope(ref v) => v.index(index), - Node::Empty => unreachable!(), - } - } -} - -/* - * - * ===== Helper Fns ===== - * - */ - -fn concat_bytes<S1, S2>(left: S1, right: S2, len: usize) -> Bytes - where S1: Source, S2: Source, -{ - let mut buf = AppendBuf::with_capacity(len as u32); - - buf.copy_from(left); - buf.copy_from(right); - - return buf.into(); -} - -fn depth_for_len(len: usize) -> u16 { - match MIN_LENGTH_BY_DEPTH.binary_search(&len) { - Ok(idx) => idx as u16, - Err(idx) => { - // It wasn't an exact match, so convert to the index of the - // containing fragment, which is one less even than the insertion - // point. - idx as u16 - 1 - } - } -} - -impl<'a> NodeIter<'a> { - fn new(root: &'a Rope) -> NodeIter<'a> { - let mut iter = NodeIter { - // TODO: Consider allocating with capacity for depth - stack: vec![], - next: None, - }; - - iter.next = iter.get_leaf_by_left(root); - iter - } - - fn get_leaf_by_left(&mut self, mut root: &'a Rope) -> Option<&'a Node> { - loop { - self.stack.push(root); - let left = &root.left; - - if left.is_empty() { - return None; - } - - if let Some(rope) = left.as_rope() { - root = rope; - continue; - } - - return Some(left); - } - } - - fn next_non_empty_leaf(&mut self) -> Option<&'a Node>{ - loop { - if let Some(rope) = self.stack.pop() { - if let Some(rope) = rope.right.as_rope() { - let res = self.get_leaf_by_left(&rope); - - if res.is_none() { - continue; - } - - return res; - } - - if rope.right.is_empty() { - continue; - } - - return Some(&rope.right); - } - - return None; - } - } -} - -impl<'a> Iterator for NodeIter<'a> { - type Item = &'a Node; - - fn next(&mut self) -> Option<&'a Node> { - let ret = self.next.take(); - - if ret.is_some() { - self.next = self.next_non_empty_leaf(); - } - - ret - } -} - -impl<'a> Buf for RopeBuf<'a> { - fn remaining(&self) -> usize { - self.rem - } - - fn bytes(&self) -> &[u8] { - self.leaf_buf.as_ref() - .map(|b| b.bytes()) - .unwrap_or(&[]) - } - - fn advance(&mut self, mut cnt: usize) { - cnt = cmp::min(cnt, self.rem); - - // Advance the internal cursor - self.rem -= cnt; - - // Advance the leaf buffer - while cnt > 0 { - { - let curr = self.leaf_buf.as_mut() - .expect("expected a value"); - - if curr.remaining() > cnt { - curr.advance(cnt); - break; - } - - cnt -= curr.remaining(); - } - - self.leaf_buf = self.nodes.next() - .map(|node| node.leaf_buf()); - } - } -} - -/* - * - * ===== Balance ===== - * - */ - -impl Balance { - fn new() -> Balance { - Balance { stack: vec![] } - } - - fn balance(&mut self, left: Bytes, right: Bytes) -> Bytes { - self.do_balance(Partial::Bytes(left)); - self.do_balance(Partial::Bytes(right)); - - let mut partial = self.stack.pop() - .expect("expected a value"); - - while !partial.is_empty() { - let new_left = self.stack.pop() - .expect("expected a value"); - - partial = Partial::Bytes(Rope::new(new_left, partial).into_bytes()); - } - - partial.unwrap_bytes() - } - - fn do_balance(&mut self, root: Partial) { - // BAP95: Insert balanced subtrees whole. This means the result might not - // be balanced, leading to repeated rebalancings on concatenate. However, - // these rebalancings are shallow due to ignoring balanced subtrees, and - // relatively few calls to insert() result. - if root.is_balanced() { - self.insert(root); - } else { - let rope = root.unwrap_rope(); - - self.do_balance(Partial::Node(rope.left)); - self.do_balance(Partial::Node(rope.right)); - } - } - - // Push a string on the balance stack (BAP95). BAP95 uses an array and - // calls the elements in the array 'bins'. We instead use a stack, so the - // 'bins' of lengths are represented by differences between the elements of - // minLengthByDepth. - // - // If the length bin for our string, and all shorter length bins, are - // empty, we just push it on the stack. Otherwise, we need to start - // concatenating, putting the given string in the "middle" and continuing - // until we land in an empty length bin that matches the length of our - // concatenation. - fn insert(&mut self, bytes: Partial) { - let depth_bin = depth_for_len(bytes.len()); - let bin_end = MIN_LENGTH_BY_DEPTH[depth_bin as usize + 1]; - - // BAP95: Concatenate all trees occupying bins representing the length - // of our new piece or of shorter pieces, to the extent that is - // possible. The goal is to clear the bin which our piece belongs in, - // but that may not be entirely possible if there aren't enough longer - // bins occupied. - if let Some(len) = self.peek().map(|r| r.len()) { - if len >= bin_end { - self.stack.push(bytes); - return; - } - } - - let bin_start = MIN_LENGTH_BY_DEPTH[depth_bin as usize]; - - // Concatenate the subtrees of shorter length - let mut new_tree = self.stack.pop() - .expect("expected a value"); - - while let Some(len) = self.peek().map(|r| r.len()) { - // If the head is big enough, break the loop - if len >= bin_start { break; } - - let left = self.stack.pop() - .expect("expected a value"); - - new_tree = Partial::Bytes(Rope::new(left, new_tree).into_bytes()); - } - - // Concatenate the given string - new_tree = Partial::Bytes(Rope::new(new_tree, bytes).into_bytes()); - - // Continue concatenating until we land in an empty bin - while let Some(len) = self.peek().map(|r| r.len()) { - let depth_bin = depth_for_len(new_tree.len()); - let bin_end = MIN_LENGTH_BY_DEPTH[depth_bin as usize + 1]; - - if len < bin_end { - let left = self.stack.pop() - .expect("expected a value"); - - new_tree = Partial::Bytes(Rope::new(left, new_tree).into_bytes()); - } else { - break; - } - } - - self.stack.push(new_tree); - } - - fn peek(&self) -> Option<&Partial> { - self.stack.last() - } -} - -impl Partial { - fn is_empty(&self) -> bool { - self.len() == 0 - } - - fn len(&self) -> usize { - match *self { - Partial::Bytes(ref v) => v.len(), - Partial::Node(ref v) => v.len(), - } - } - - fn depth(&self) -> u16 { - match *self { - Partial::Bytes(ref v) => v.depth(), - Partial::Node(ref v) => v.depth(), - } - } - - fn is_balanced(&self) -> bool { - self.len() >= MIN_LENGTH_BY_DEPTH[self.depth() as usize] - } - - fn unwrap_bytes(self) -> Bytes { - match self { - Partial::Bytes(v) => v, - _ => panic!("unexpected state calling `Partial::unwrap_bytes()`. Expected `Bytes`, got `Node`"), - } - } - - fn unwrap_rope(self) -> Rope { - let arc = match self { - Partial::Bytes(v) => v.into_rope().ok().expect("unexpected state calling `Partial::unwrap_rope()`"), - Partial::Node(Node::Rope(v)) => v, - _ => panic!("unexpected state calling `Partial::unwrap_rope()`"), - }; - - match Arc::try_unwrap(arc) { - Ok(v) => v, - Err(v) => (*v).clone(), - } - } -} - -impl From<Partial> for Node { - fn from(src: Partial) -> Node { - match src { - Partial::Node(v) => v, - Partial::Bytes(v) => Node::from(v), - } - } -} diff --git a/src/imp/bytes/seq.rs b/src/imp/bytes/seq.rs deleted file mode 100644 index 08204c8..0000000 --- a/src/imp/bytes/seq.rs +++ /dev/null @@ -1,78 +0,0 @@ -//! Immutable set of bytes sequential in memory. - -use {MutBuf, Bytes}; -use buf::{AppendBuf}; -use std::ops; -use std::io::Cursor; -use std::sync::Arc; - -pub struct Seq { - mem: Arc<Box<[u8]>>, - pos: usize, - len: usize, -} - -impl Seq { - /// Creates a new `SeqByteStr` from a `MemRef`, an offset, and a length. - /// - /// This function is unsafe as there are no guarantees that the given - /// arguments are valid. - pub fn new(mem: Arc<Box<[u8]>>, pos: usize, len: usize) -> Seq { - Seq { - mem: mem, - pos: pos, - len: len, - } - } - - pub fn from_slice(bytes: &[u8]) -> Bytes { - let mut buf = AppendBuf::with_capacity(bytes.len() as u32); - - buf.copy_from(bytes); - buf.into() - } - - pub fn len(&self) -> usize { - self.len as usize - } - - pub fn slice(&self, begin: usize, end: usize) -> Bytes { - use super::Kind; - - assert!(begin <= end && end <= self.len(), "invalid range"); - - let seq = Seq::new( - self.mem.clone(), - self.pos + begin, - end - begin); - - Bytes { kind: Kind::Seq(seq) } - } - - pub fn buf(&self) -> Cursor<&[u8]> { - Cursor::new(self.as_slice()) - } - - pub fn as_slice(&self) -> &[u8] { - &self.mem[self.pos..self.pos+self.len] - } -} - -impl ops::Index<usize> for Seq { - type Output = u8; - - fn index(&self, index: usize) -> &u8 { - assert!(index < self.len()); - self.mem.index(index + self.pos as usize) - } -} - -impl Clone for Seq { - fn clone(&self) -> Seq { - Seq { - mem: self.mem.clone(), - pos: self.pos, - len: self.len, - } - } -} diff --git a/src/imp/bytes/small.rs b/src/imp/bytes/small.rs deleted file mode 100644 index 3f010fb..0000000 --- a/src/imp/bytes/small.rs +++ /dev/null @@ -1,81 +0,0 @@ -use {Bytes}; -use std::ops; -use std::io::Cursor; - -/* - * - * ===== Small immutable set of bytes ===== - * - */ - -#[cfg(target_pointer_width = "64")] -const MAX_LEN: usize = 7; - -#[cfg(target_pointer_width = "32")] -const MAX_LEN: usize = 3; - -#[derive(Clone, Copy)] -pub struct Small { - len: u8, - bytes: [u8; MAX_LEN], -} - -impl Small { - pub fn empty() -> Small { - use std::mem; - - Small { - len: 0, - bytes: unsafe { mem::zeroed() } - } - } - - pub fn from_slice(bytes: &[u8]) -> Option<Small> { - use std::{mem, ptr}; - - if bytes.len() > MAX_LEN { - return None; - } - - let mut ret = Small { - len: bytes.len() as u8, - bytes: unsafe { mem::zeroed() }, - }; - - // Copy the memory - unsafe { - ptr::copy_nonoverlapping( - bytes.as_ptr(), - ret.bytes.as_mut_ptr(), - bytes.len()); - } - - Some(ret) - } - - pub fn buf(&self) -> Cursor<&[u8]> { - Cursor::new(self.as_ref()) - } - - pub fn slice(&self, begin: usize, end: usize) -> Bytes { - Bytes::from_slice(&self.as_ref()[begin..end]) - } - - pub fn len(&self) -> usize { - self.len as usize - }} - -impl AsRef<[u8]> for Small { - fn as_ref(&self) -> &[u8] { - &self.bytes[..self.len as usize] - } -} - -impl ops::Index<usize> for Small { - type Output = u8; - - fn index(&self, index: usize) -> &u8 { - assert!(index < self.len()); - &self.bytes[index] - } -} diff --git a/src/imp/mod.rs b/src/imp/mod.rs deleted file mode 100644 index 9e453f7..0000000 --- a/src/imp/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -//! Used for internal code structure - -pub mod alloc; -pub mod buf; -pub mod bytes; diff --git a/src/lib.rs b/src/lib.rs index f8857fc..98d63fa 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,36 +1,22 @@ -#![crate_name = "bytes"] -#![deny(warnings)] +//! Provides abstractions for working with bytes. -#[macro_use] -extern crate log; -extern crate byteorder; - -// Implementation in here -mod imp; -// TODO: delete -mod alloc; +#![deny(warnings, missing_docs)] -pub use imp::buf::{Buf, MutBuf, IntoBuf}; -pub use imp::bytes::Bytes; - -pub mod buf { - //! Traits, helpers, and type definitions for working with buffers. +extern crate byteorder; - pub use imp::buf::{ - Source, - Sink, - Reader, - ReadExt, - Writer, - WriteExt, - Fmt, - }; +mod buf; +mod bytes; - pub use imp::buf::slice::SliceBuf; - pub use imp::buf::append::AppendBuf; - pub use imp::buf::block::{BlockBuf, BlockBufCursor}; - pub use imp::buf::bound::{BoundBuf}; - pub use imp::buf::ring::RingBuf; - pub use imp::buf::take::Take; - pub use imp::bytes::BytesBuf; -} +pub use buf::{ + Buf, + BufMut, + IntoBuf, + Source, + Sink, + Reader, + Writer, +}; +pub use buf::byte::{ByteBuf}; +pub use buf::slice::{SliceBuf}; +pub use buf::take::{Take, TakeMut}; +pub use bytes::{Bytes, BytesMut}; diff --git a/test/test.rs b/test/test.rs deleted file mode 100644 index e05924c..0000000 --- a/test/test.rs +++ /dev/null @@ -1,27 +0,0 @@ -use rand::random; - -extern crate bytes; -extern crate rand; -extern crate byteorder; - -// == Buf -mod test_append; -mod test_block; -mod test_buf; -mod test_buf_fill; -mod test_slice_buf; -mod test_mut_buf; -mod test_ring; - -// == Bytes -mod test_bytes; -mod test_rope; -mod test_seq; -mod test_small; - -// == Pool -// mod test_pool; - -fn gen_bytes(n: usize) -> Vec<u8> { - (0..n).map(|_| random()).collect() -} diff --git a/test/test_append.rs b/test/test_append.rs deleted file mode 100644 index fd5fcef..0000000 --- a/test/test_append.rs +++ /dev/null @@ -1,30 +0,0 @@ -use bytes::{Buf, MutBuf}; -use bytes::buf::AppendBuf; - -#[test] -pub fn test_initial_buf_empty() { - // Run in a loop a bunch in hope that if there is a memory issue, it will - // be exposed - for _ in 0..1000 { - let mut buf = AppendBuf::with_capacity(100); - let mut dst: Vec<u8> = vec![]; - - assert_eq!(buf.remaining(), 128); - - buf.write_slice(b"hello world"); - assert_eq!(buf.remaining(), 117); - assert_eq!(buf.bytes(), b"hello world"); - - let view1 = buf.slice(0, 11); - view1.buf().copy_to(&mut dst); - - assert_eq!(dst, b"hello world"); - assert_eq!(view1, buf.slice(0, 11)); - - drop(buf); - let mut buf = AppendBuf::with_capacity(100); - buf.write_slice(b"zomg no no no no"); - - assert_eq!(dst, b"hello world"); - } -} diff --git a/test/test_block.rs b/test/test_block.rs deleted file mode 100644 index a13c903..0000000 --- a/test/test_block.rs +++ /dev/null @@ -1,22 +0,0 @@ -use bytes::{MutBuf}; -use bytes::buf::{BlockBuf}; - -#[test] -pub fn test_block_drop() { - let mut buf = BlockBuf::new(2, 4); - - assert_eq!(buf.remaining(), 8); - - buf.write_slice(b"12345"); - buf.write_slice(b"678"); - assert_eq!(buf.remaining(), 0); - assert_eq!(buf.len(), 8); - - buf.drop(1); - assert_eq!(buf.len(), 7); - assert_eq!(buf.is_compact(), false); - - buf.drop(4); - assert_eq!(buf.len(), 3); - assert_eq!(buf.is_compact(), true); -} diff --git a/test/test_buf_fill.rs b/test/test_buf_fill.rs deleted file mode 100644 index 6083b9f..0000000 --- a/test/test_buf_fill.rs +++ /dev/null @@ -1,48 +0,0 @@ -use bytes::*; -use bytes::buf::*; -use std::io; - -#[test] -pub fn test_readijng_buf_from_reader() { - let mut reader = chunks(vec![b"foo", b"bar", b"baz"]); - let mut buf = AppendBuf::with_capacity(1024); - - assert_eq!(3, reader.read_buf(&mut buf).unwrap()); - assert_eq!(Bytes::from_slice(&b"foo"), Bytes::from(buf)); -} - -fn chunks(chunks: Vec<&'static [u8]>) -> Chunked { - Chunked { chunks: chunks } -} - -struct Chunked { - chunks: Vec<&'static [u8]>, -} - -impl io::Read for Chunked { - fn read(&mut self, dst: &mut [u8]) -> io::Result<usize> { - use std::{cmp, ptr}; - - if self.chunks.is_empty() { - return Ok(0); - } - - let src = self.chunks[0]; - let len = cmp::min(src.len(), dst.len()); - - unsafe { - ptr::copy_nonoverlapping( - src[..len].as_ptr(), - dst[..len].as_mut_ptr(), - len); - } - - if len < src.len() { - self.chunks[0] = &src[len..]; - } else { - self.chunks.remove(0); - } - - Ok(len) - } -} diff --git a/test/test_bytes.rs b/test/test_bytes.rs deleted file mode 100644 index 2d1681e..0000000 --- a/test/test_bytes.rs +++ /dev/null @@ -1,42 +0,0 @@ -use bytes::*; - -#[test] -pub fn test_debug_short_str_valid_ascii() { - let b = Bytes::from_slice(b"abcdefghij234"); - let d = format!("{:?}", b); - - assert_eq!(d, "Bytes[len=13; abcdefghij234]"); -} - -#[test] -pub fn test_debug_long_str_valid_ascii() { - let s = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. \ - Duis volutpat eros in gravida malesuada. Phasellus lobortis \ - maximus cursus. Praesent tristique orci non purus porta \ - dapibus. Ut ut commodo risus, sed semper felis. Phasellus \ - bibendum dui nunc, ac pharetra dui viverra a. Nunc imperdiet \ - sed nulla ut condimentum. In hac habitasse platea dictumst. \ - Interdum et malesuada fames ac ante ipsum primis in faucibus. \ - Sed facilisis dictum malesuada. Sed tempor odio ullamcorper mi \ - iaculis, eu tempus diam semper. Vivamus pulvinar metus ac erat \ - aliquet aliquam."; - - let b = Bytes::from(s.as_bytes()); - - let d = format!("{:?}", b); - - assert_eq!(d, "Bytes[len=556; Lorem ipsum dolor sit amet, \ - consectetur adipiscing elit. Duis volutpat \ - eros in gravida malesuada. Phasellus \ - lobortis maximus cur ... ]"); -} - -#[test] -pub fn test_short_string_invalid_ascii() { - let b = Bytes::from_slice(b"foo\x00bar\xFFbaz"); - let d = format!("{:?}", b); - - println!("{:?}", b); - - assert_eq!(d, "Bytes[len=11; foo\\x00bar\\xFFbaz]"); -} diff --git a/test/test_pool.rs b/test/test_pool.rs deleted file mode 100644 index 1e3446b..0000000 --- a/test/test_pool.rs +++ /dev/null @@ -1,85 +0,0 @@ -use bytes::alloc::Pool; -use bytes::{Buf, MutBuf}; -use rand::{self, Rng}; -use byteorder::{ByteOrder, BigEndian}; - -#[test] -fn test_pool_of_zero_capacity() { - let pool = Pool::with_capacity(0, 0); - assert!(pool.new_byte_buf().is_none()); - - let pool = Pool::with_capacity(0, 1_024); - assert!(pool.new_byte_buf().is_none()); -} - -#[test] -fn test_pool_with_one_capacity() { - let pool = Pool::with_capacity(1, 1024); - - let mut buf = pool.new_byte_buf().unwrap(); - assert!(pool.new_byte_buf().is_none()); - - assert_eq!(1024, buf.remaining()); - - buf.write_slice(b"Hello World"); - let mut buf = buf.flip(); - - let mut dst = vec![]; - - buf.copy_to(&mut dst); - - assert_eq!(&dst[..], b"Hello World"); - - // return the buffer to the pool - drop(buf); - - let _ = pool.new_byte_buf().unwrap(); -} - -#[test] -fn test_pool_stress() { - let pool = Pool::with_capacity(100, 4); - let mut bufs = Vec::with_capacity(100); - let mut rng = rand::thread_rng(); - - let mut s = [0; 4]; - - for i in 0..50_000u32 { - let action: usize = rng.gen(); - - match action % 3 { - 0 if bufs.len() < 100 => { - let mut buf = pool.new_byte_buf().unwrap(); - BigEndian::write_u32(&mut s, i); - buf.write_slice(&s); - bufs.push((i, buf.flip())); - } - 1 if bufs.len() > 0 => { - // drop - let len = bufs.len(); - let _ = bufs.remove(rng.gen::<usize>() % len); - } - 2 if bufs.len() > 0 => { - // read - let len = bufs.len(); - let (i, mut buf) = bufs.remove(rng.gen::<usize>() % len); - buf.mark(); - buf.read_slice(&mut s); - buf.reset(); - let v = BigEndian::read_u32(&s); - assert_eq!(i, v); - bufs.push((i, buf)); - } - 3 if bufs.len() > 0 => { - // write data - let len = bufs.len(); - let (i, buf) = bufs.remove(rng.gen::<usize>() % len); - let mut buf = buf.flip(); - BigEndian::write_u32(&mut s, i); - buf.write_slice(&s); - bufs.push((i, buf.flip())); - } - _ => {} - } - } -} diff --git a/test/test_ring.rs b/test/test_ring.rs deleted file mode 100644 index d2dd5dd..0000000 --- a/test/test_ring.rs +++ /dev/null @@ -1,129 +0,0 @@ -use bytes::{Buf, MutBuf}; -use bytes::buf::RingBuf; - -#[test] -pub fn test_ring_buf_is_send() { - fn is_send<T: Send>() {} - is_send::<RingBuf>(); -} - -#[test] -pub fn test_initial_buf_empty() { - let mut buf = RingBuf::with_capacity(16); - assert_eq!(MutBuf::remaining(&buf), 16); - assert_eq!(Buf::remaining(&buf), 0); - - let bytes_written = buf.copy_from(&[1, 2, 3][..]); - assert_eq!(bytes_written, 3); - - let bytes_written = buf.copy_from(&[][..]); - assert_eq!(bytes_written, 0); - assert_eq!(MutBuf::remaining(&buf), 13); - assert_eq!(Buf::remaining(&buf), 3); - assert_eq!(buf.bytes(), [1, 2, 3]); - - let mut out = [0u8; 3]; - - let pos = buf.position(); - let bytes_read = buf.copy_to(&mut out[..]); - assert_eq!(bytes_read, 3); - assert_eq!(out, [1, 2, 3]); - buf.set_position(pos); - let bytes_read = buf.copy_to(&mut out[..]); - assert_eq!(bytes_read, 3); - assert_eq!(out, [1, 2, 3]); - - assert_eq!(MutBuf::remaining(&buf), 16); - assert_eq!(Buf::remaining(&buf), 0); -} - -#[test] -fn test_wrapping_write() { - let mut buf = RingBuf::with_capacity(16); - let mut out = [0;10]; - - buf.copy_from(&[42;12][..]); - let bytes_read = buf.copy_to(&mut out[..]); - assert_eq!(bytes_read, 10); - - let bytes_written = buf.copy_from(&[23;8][..]); - assert_eq!(bytes_written, 8); - - let pos = buf.position(); - let bytes_read = buf.copy_to(&mut out[..]); - assert_eq!(bytes_read, 10); - assert_eq!(out, [42, 42, 23, 23, 23, 23, 23, 23, 23, 23]); - buf.set_position(pos); - let bytes_read = buf.copy_to(&mut out[..]); - assert_eq!(bytes_read, 10); - assert_eq!(out, [42, 42, 23, 23, 23, 23, 23, 23, 23, 23]); -} - -#[test] -fn test_io_write_and_read() { - let mut buf = RingBuf::with_capacity(16); - let mut out = [0u8;8]; - - let written = buf.copy_from(&[1;8][..]); - assert_eq!(written, 8); - - buf.copy_to(&mut out[..]); - assert_eq!(out, [1;8]); - - let written = buf.copy_from(&[2;8][..]); - assert_eq!(written, 8); - - let bytes_read = buf.copy_to(&mut out[..]); - assert_eq!(bytes_read, 8); - assert_eq!(out, [2;8]); -} - -#[test] -#[should_panic] -fn test_wrap_reset() { - let mut buf = RingBuf::with_capacity(8); - buf.copy_from(&[1, 2, 3, 4, 5, 6, 7][..]); - let pos = buf.position(); - buf.copy_to(&mut [0; 4][..]); - buf.copy_from(&[1, 2, 3, 4][..]); - buf.set_position(pos); -} - -#[test] -// Test that writes across a mark/reset are preserved. -fn test_mark_write() { - let mut buf = RingBuf::with_capacity(8); - buf.copy_from(&[1, 2, 3, 4, 5, 6, 7][..]); - let pos = buf.position(); - buf.copy_from(&[8][..]); - buf.set_position(pos); - - let mut buf2 = [0; 8]; - buf.copy_to(&mut buf2[..]); - assert_eq!(buf2, [1, 2, 3, 4, 5, 6, 7, 8]); -} - -#[test] -// Test that "RingBuf::reset" does not reset the length of a -// full buffer to zero. -fn test_reset_full() { - let mut buf = RingBuf::with_capacity(8); - buf.copy_from(&[1, 2, 3, 4, 5, 6, 7, 8][..]); - assert_eq!(MutBuf::remaining(&buf), 0); - let pos = buf.position(); - buf.set_position(pos); - assert_eq!(MutBuf::remaining(&buf), 0); -} - - -#[test] -// Test that "RingBuf::clear" does the full reset -fn test_clear() { - let mut buf = RingBuf::with_capacity(8); - buf.copy_from(&[0; 8][..]); - assert_eq!(MutBuf::remaining(&buf), 0); - assert_eq!(Buf::remaining(&buf), 8); - buf.clear(); - assert_eq!(MutBuf::remaining(&buf), 8); - assert_eq!(Buf::remaining(&buf), 0); -} diff --git a/test/test_rope.rs b/test/test_rope.rs deleted file mode 100644 index 8054b8b..0000000 --- a/test/test_rope.rs +++ /dev/null @@ -1,83 +0,0 @@ -use bytes::{Buf, Bytes}; - -const TEST_BYTES_1: &'static [u8] = - b"dblm4ng7jp4v9rdn1w6hhssmluoqrrrqj59rccl9 - nkv2tm1t2da4jyku51ge7f8hv581gkki8lekmf5f - 1l44whp4aiwbvhkziw02292on4noyvuwjzsloqyc - 5n0iyn4l6o6tgjhlek00mynfzb1wgcwj4mqp6zdr - 3625yy7rj7xuisal7b1a7xgq271abvt5ssxuj39v - njtetokxxrgxzp7ik9adnypkmmcn4270yv9l46m7 - 9mu2zmqmkxdmgia210vkdytb7ywfcyt2bvcsg9eq - 5yqizxl6888zrksvaxhzs2v355jxu8gr21m33t83 - qvoian1ra7c6pvxabshgngldxa408p18l1fdet2h"; - -const TEST_BYTES_2: &'static [u8] = - b"jmh14t79mllzj1ohxfj6fun7idwbks8oh35f83g6 - ryaowe86mmou5t1xa91uyg8e95wcu5mje1mswien - tt4clgj029cw0pyuvfbvsgzdg1x7sr9qsjkf2b1t - h43smgp1ea22lph17f78cel0cc2kjoht5281xuy8 - 0ex9uaqwj4330jrp30stsk15j9bpqezu3w78ktit - ev5g6xsngr35q7pemdm9hihf0ebrw5fbwhm530lo - e0zyj1bm7yfyk7f2i45jhr3wu3bvb4hj8jve6db0 - iewmr9weecaon9vdnqo5hen9iaiox5vsaxuo461m - 8336ugp20u4sfky3kfawr0ome1tiqyx8chkerrjh - a95s0gypcsgo9jqxasqkoj08t4uq5moxmay5plg5 - tlh6f9omhn0ezvi0w2n8hx7n6qk7rn1s3mjpnpl6 - hvilp8awaa4tvsis66q4e5b3xwy2z1h2klpa87h7"; - -#[test] -pub fn test_rope_slice() { - let mut dst = vec![]; - - let bytes = Bytes::from(TEST_BYTES_1); - assert_eq!(TEST_BYTES_1.len(), bytes.len()); - - bytes.buf().copy_to(&mut dst); - assert_eq!(dst, TEST_BYTES_1); - - let left = bytes.slice_to(250); - assert_eq!(250, left.len()); - - dst.clear(); - left.buf().copy_to(&mut dst); - assert_eq!(dst, &TEST_BYTES_1[..250]); - - let right = bytes.slice_from(250); - assert_eq!(TEST_BYTES_1.len() - 250, right.len()); - - dst.clear(); - right.buf().copy_to(&mut dst); - // assert_eq!(dst, &TEST_BYTES_1[250..]); -} - -#[test] -pub fn test_rope_concat_two_byte_str() { - let mut dst = vec![]; - - let left = Bytes::from(TEST_BYTES_1); - let right = Bytes::from(TEST_BYTES_2); - - let both = left.concat(right); - - assert_eq!(both.len(), TEST_BYTES_1.len() + TEST_BYTES_2.len()); - - both.buf().copy_to(&mut dst); - let mut expected = Vec::new(); - expected.extend(TEST_BYTES_1.iter().cloned()); - expected.extend(TEST_BYTES_2.iter().cloned()); - assert_eq!(dst, expected); -} - -#[test] -pub fn test_rope_equality() { - let a = Bytes::from(&b"Mary had a little lamb, its fleece was white as snow; "[..]) - .concat(Bytes::from(&b"And everywhere that Mary went, the lamb was sure to go."[..])); - - let b = Bytes::from(&b"Mary had a little lamb, "[..]) - .concat(Bytes::from(&b"its fleece was white as snow; "[..])) - .concat( - Bytes::from(&b"And everywhere that Mary went, "[..]) - .concat(Bytes::from(&b"the lamb was sure to go."[..]))); - - assert_eq!(a, b); -} diff --git a/test/test_seq.rs b/test/test_seq.rs deleted file mode 100644 index 2fb9bde..0000000 --- a/test/test_seq.rs +++ /dev/null @@ -1,32 +0,0 @@ -use bytes::{Buf, Bytes}; -use super::gen_bytes; - -#[test] -pub fn test_slice_round_trip() { - let mut dst = vec![]; - let src = gen_bytes(2000); - - let s = Bytes::from(src.clone()); - assert_eq!(2000, s.len()); - - s.buf().copy_to(&mut dst); - assert_eq!(dst, src); -} - -#[test] -pub fn test_index() { - let src = gen_bytes(2000); - - let s = Bytes::from(src.clone()); - - for i in 0..2000 { - assert_eq!(src[i], s[i]); - } -} - -#[test] -#[should_panic] -pub fn test_index_out_of_range() { - let s = Bytes::from(gen_bytes(2000)); - let _ = s[2001]; -} diff --git a/test/test_slice_buf.rs b/test/test_slice_buf.rs deleted file mode 100644 index cb00c0e..0000000 --- a/test/test_slice_buf.rs +++ /dev/null @@ -1,67 +0,0 @@ -use bytes::{Buf, MutBuf}; -use bytes::buf::SliceBuf; - -#[test] -pub fn test_initial_buf_empty() { - let buf = SliceBuf::with_capacity(100); - - assert!(buf.capacity() == 128); - assert!(buf.remaining_write() == 128); - assert!(buf.remaining_read() == 0); -} - -#[test] -pub fn test_slice_buf_bytes() { - let mut buf = SliceBuf::with_capacity(32); - - buf.copy_from(&b"hello "[..]); - assert_eq!(&b"hello "[..], buf.bytes()); - - buf.copy_from(&b"world"[..]); - assert_eq!(&b"hello world"[..], buf.bytes()); -} - -#[test] -pub fn test_byte_buf_read_write() { - let mut buf = SliceBuf::with_capacity(32); - - buf.copy_from(&b"hello world"[..]); - assert_eq!(21, buf.remaining_write()); - - buf.copy_from(&b" goodbye"[..]); - assert_eq!(13, buf.remaining_write()); - - let mut dst = [0; 5]; - - let pos = buf.position(); - assert_eq!(5, buf.copy_to(&mut dst[..])); - assert_eq!(b"hello", &dst); - - buf.set_position(pos); - assert_eq!(5, buf.copy_to(&mut dst[..])); - assert_eq!(b"hello", &dst); - - assert_eq!(5, buf.copy_to(&mut dst[..])); - assert_eq!(b" worl", &dst); - - let mut dst = [0; 2]; - assert_eq!(2, buf.copy_to(&mut dst[..])); - assert_eq!(b"d ", &dst); - - let mut dst = [0; 7]; - assert_eq!(7, buf.copy_to(&mut dst[..])); - assert_eq!(b"goodbye", &dst); - - assert_eq!(13, buf.remaining_write()); - - buf.copy_from(&b" have fun"[..]); - assert_eq!(4, buf.remaining_write()); - - assert_eq!(buf.bytes(), b" have fun"); - - buf.set_position(0); - assert_eq!(buf.bytes(), b"hello world goodbye have fun"); - - buf.clear(); - assert_eq!(buf.bytes(), b""); -} diff --git a/test/test_small.rs b/test/test_small.rs deleted file mode 100644 index 48176f8..0000000 --- a/test/test_small.rs +++ /dev/null @@ -1,32 +0,0 @@ -use bytes::{Buf, Bytes}; -use super::gen_bytes; - -#[test] -pub fn test_slice_round_trip() { - let mut dst = vec![]; - let src = gen_bytes(3); - - let s = Bytes::from(src.clone()); - assert_eq!(3, s.len()); - - s.buf().copy_to(&mut dst); - assert_eq!(dst, src); -} - -#[test] -pub fn test_index() { - let src = gen_bytes(3); - - let s = Bytes::from(src.clone()); - - for i in 0..3 { - assert_eq!(src[i], s[i]); - } -} - -#[test] -#[should_panic] -pub fn test_index_out_of_range() { - let s = Bytes::from(gen_bytes(3)); - let _ = s[2001]; -} diff --git a/test/test_buf.rs b/tests/test_buf.rs similarity index 68% rename from test/test_buf.rs rename to tests/test_buf.rs index 07ba633..1e3d1f6 100644 --- a/test/test_buf.rs +++ b/tests/test_buf.rs @@ -1,10 +1,11 @@ -use bytes::{Buf}; -use byteorder; -use std::io::{Cursor}; -use std::vec::{Vec}; +extern crate bytes; +extern crate byteorder; + +use bytes::{Buf, Sink}; +use std::io::Cursor; #[test] -pub fn test_fresh_cursor_vec() { +fn test_fresh_cursor_vec() { let mut buf = Cursor::new(b"hello".to_vec()); assert_eq!(buf.remaining(), 5); @@ -27,29 +28,27 @@ pub fn test_fresh_cursor_vec() { } #[test] -pub fn test_read_u8() { +fn test_get_u8() { let mut buf = Cursor::new(b"\x21zomg"); - assert_eq!(0x21, buf.read_u8()); + assert_eq!(0x21, buf.get_u8()); } #[test] -fn test_read_u16() { +fn test_get_u16() { let buf = b"\x21\x54zomg"; - assert_eq!(0x2154, Cursor::new(buf).read_u16::<byteorder::BigEndian>()); - assert_eq!(0x5421, Cursor::new(buf).read_u16::<byteorder::LittleEndian>()); + assert_eq!(0x2154, Cursor::new(buf).get_u16::<byteorder::BigEndian>()); + assert_eq!(0x5421, Cursor::new(buf).get_u16::<byteorder::LittleEndian>()); } #[test] #[should_panic] -fn test_read_u16_buffer_underflow() { +fn test_get_u16_buffer_underflow() { let mut buf = Cursor::new(b"\x21"); - buf.read_u16::<byteorder::BigEndian>(); + buf.get_u16::<byteorder::BigEndian>(); } #[test] fn test_vec_sink_capacity() { - use bytes::buf::Sink; - let mut sink: Vec<u8> = Vec::new(); sink.reserve(16); assert!(sink.capacity() >= 16, "Capacity {} must be at least 16", sink.capacity()); diff --git a/tests/test_bytes.rs b/tests/test_bytes.rs new file mode 100644 index 0000000..245a2bb --- /dev/null +++ b/tests/test_bytes.rs @@ -0,0 +1,174 @@ +extern crate bytes; + +use bytes::{Bytes, BytesMut}; + +fn is_sync<T: Sync>() {} +fn is_send<T: Send>() {} + +#[test] +fn test_bounds() { + is_sync::<Bytes>(); + is_send::<Bytes>(); + is_send::<BytesMut>(); +} + +#[test] +fn from_slice() { + let a = Bytes::from_slice(b"abcdefgh"); + assert_eq!(a, b"abcdefgh"[..]); + assert_eq!(a, &b"abcdefgh"[..]); + assert_eq!(a, Vec::from(&b"abcdefgh"[..])); + assert_eq!(b"abcdefgh"[..], a); + assert_eq!(&b"abcdefgh"[..], a); + assert_eq!(Vec::from(&b"abcdefgh"[..]), a); + + let a = BytesMut::from_slice(b"abcdefgh"); + assert_eq!(a, b"abcdefgh"[..]); + assert_eq!(a, &b"abcdefgh"[..]); + assert_eq!(a, Vec::from(&b"abcdefgh"[..])); + assert_eq!(b"abcdefgh"[..], a); + assert_eq!(&b"abcdefgh"[..], a); + assert_eq!(Vec::from(&b"abcdefgh"[..]), a); +} + +#[test] +fn fmt() { + let a = format!("{:?}", Bytes::from_slice(b"abcdefg")); + let b = format!("{:?}", b"abcdefg"); + + assert_eq!(a, b); + + let a = format!("{:?}", BytesMut::from_slice(b"abcdefg")); + assert_eq!(a, b); +} + +#[test] +fn len() { + let a = Bytes::from_slice(b"abcdefg"); + assert_eq!(a.len(), 7); + + let a = BytesMut::from_slice(b"abcdefg"); + assert_eq!(a.len(), 7); + + let a = Bytes::from_slice(b""); + assert!(a.is_empty()); + + let a = BytesMut::from_slice(b""); + assert!(a.is_empty()); +} + +#[test] +fn index() { + let a = Bytes::from_slice(b"hello world"); + assert_eq!(a[0..5], *b"hello"); +} + +#[test] +fn slice() { + let a = Bytes::from_slice(b"hello world"); + + let b = a.slice(3, 5); + assert_eq!(b, b"lo"[..]); + + let b = a.slice_to(5); + assert_eq!(b, b"hello"[..]); + + let b = a.slice_from(3); + assert_eq!(b, b"lo world"[..]); +} + +#[test] +#[should_panic] +fn slice_oob_1() { + let a = Bytes::from_slice(b"hello world"); + a.slice(5, 20); +} + +#[test] +#[should_panic] +fn slice_oob_2() { + let a = Bytes::from_slice(b"hello world"); + a.slice(15, 20); +} + +#[test] +fn split_off() { + let mut hello = Bytes::from_slice(b"helloworld"); + let world = hello.split_off(5); + + assert_eq!(hello, &b"hello"[..]); + assert_eq!(world, &b"world"[..]); + + let mut hello = BytesMut::from_slice(b"helloworld"); + let world = hello.split_off(5); + + assert_eq!(hello, &b"hello"[..]); + assert_eq!(world, &b"world"[..]); +} + +#[test] +#[should_panic] +fn split_off_oob() { + let mut hello = Bytes::from_slice(b"helloworld"); + hello.split_off(11); +} + +#[test] +#[should_panic] +fn split_off_oob_mut() { + let mut hello = BytesMut::from_slice(b"helloworld"); + hello.split_off(11); +} + +#[test] +fn split_off_uninitialized() { + let mut bytes = BytesMut::with_capacity(1024); + let other = bytes.split_off(128); + + assert_eq!(bytes.len(), 0); + assert_eq!(bytes.capacity(), 128); + + assert_eq!(other.len(), 0); + assert_eq!(other.capacity(), 896); +} + +#[test] +fn drain_to() { + let mut world = Bytes::from_slice(b"helloworld"); + let hello = world.drain_to(5); + + assert_eq!(hello, &b"hello"[..]); + assert_eq!(world, &b"world"[..]); + + let mut world = BytesMut::from_slice(b"helloworld"); + let hello = world.drain_to(5); + + assert_eq!(hello, &b"hello"[..]); + assert_eq!(world, &b"world"[..]); +} + +#[test] +#[should_panic] +fn drain_to_oob() { + let mut hello = Bytes::from_slice(b"helloworld"); + hello.drain_to(11); +} + +#[test] +#[should_panic] +fn drain_to_oob_mut() { + let mut hello = BytesMut::from_slice(b"helloworld"); + hello.drain_to(11); +} + +#[test] +fn drain_to_uninitialized() { + let mut bytes = BytesMut::with_capacity(1024); + let other = bytes.drain_to(128); + + assert_eq!(bytes.len(), 0); + assert_eq!(bytes.capacity(), 896); + + assert_eq!(other.len(), 0); + assert_eq!(other.capacity(), 128); +} diff --git a/test/test_mut_buf.rs b/tests/test_mut_buf.rs similarity index 57% rename from test/test_mut_buf.rs rename to tests/test_mut_buf.rs index bb00d1b..796f6a5 100644 --- a/test/test_mut_buf.rs +++ b/tests/test_mut_buf.rs @@ -1,22 +1,24 @@ -use bytes::MutBuf; -use byteorder; +extern crate bytes; +extern crate byteorder; + +use bytes::BufMut; use std::usize; #[test] -pub fn test_vec_as_mut_buf() { +fn test_vec_as_mut_buf() { let mut buf = Vec::with_capacity(64); - assert_eq!(buf.remaining(), usize::MAX); + assert_eq!(buf.remaining_mut(), usize::MAX); unsafe { - assert!(buf.mut_bytes().len() >= 64); + assert!(buf.bytes_mut().len() >= 64); } buf.copy_from(&b"zomg"[..]); assert_eq!(&buf, b"zomg"); - assert_eq!(buf.remaining(), usize::MAX - 4); + assert_eq!(buf.remaining_mut(), usize::MAX - 4); assert_eq!(buf.capacity(), 64); for _ in 0..16 { @@ -27,19 +29,19 @@ pub fn test_vec_as_mut_buf() { } #[test] -pub fn test_write_u8() { +fn test_put_u8() { let mut buf = Vec::with_capacity(8); - buf.write_u8(33); + buf.put_u8(33); assert_eq!(b"\x21", &buf[..]); } #[test] -fn test_write_u16() { +fn test_put_u16() { let mut buf = Vec::with_capacity(8); - buf.write_u16::<byteorder::BigEndian>(8532); + buf.put_u16::<byteorder::BigEndian>(8532); assert_eq!(b"\x21\x54", &buf[..]); buf.clear(); - buf.write_u16::<byteorder::LittleEndian>(8532); + buf.put_u16::<byteorder::LittleEndian>(8532); assert_eq!(b"\x54\x21", &buf[..]); } diff --git a/tests/test_slice_buf.rs b/tests/test_slice_buf.rs new file mode 100644 index 0000000..e712d14 --- /dev/null +++ b/tests/test_slice_buf.rs @@ -0,0 +1,71 @@ +extern crate bytes; + +use bytes::{Buf, BufMut, SliceBuf}; + +#[test] +fn test_initial_buf_empty() { + let mut mem = [0u8; 100]; + let buf = SliceBuf::new(&mut mem[..]); + + assert!(buf.capacity() == 100); + assert!(buf.remaining_mut() == 100); + assert!(buf.remaining() == 0); +} + +#[test] +fn test_slice_buf_bytes() { + let mut mem = [0u8; 32]; + let mut buf = SliceBuf::new(&mut mem[..]); + + buf.copy_from(&b"hello "[..]); + assert_eq!(&b"hello "[..], buf.bytes()); + + buf.copy_from(&b"world"[..]); + assert_eq!(&b"hello world"[..], buf.bytes()); +} + +#[test] +fn test_byte_buf_read_write() { + let mut mem = [0u8; 32]; + let mut buf = SliceBuf::new(&mut mem[..]); + + buf.copy_from(&b"hello world"[..]); + assert_eq!(21, buf.remaining_mut()); + + buf.copy_from(&b" goodbye"[..]); + assert_eq!(13, buf.remaining_mut()); + + let mut dst = [0; 5]; + + let pos = buf.position(); + buf.copy_to(&mut dst[..]); + assert_eq!(b"hello", &dst); + + buf.set_position(pos); + buf.copy_to(&mut dst[..]); + assert_eq!(b"hello", &dst); + + buf.copy_to(&mut dst[..]); + assert_eq!(b" worl", &dst); + + let mut dst = [0; 2]; + buf.copy_to(&mut dst[..]); + assert_eq!(b"d ", &dst); + + let mut dst = [0; 7]; + buf.copy_to(&mut dst[..]); + assert_eq!(b"goodbye", &dst); + + assert_eq!(13, buf.remaining_mut()); + + buf.copy_from(&b" have fun"[..]); + assert_eq!(4, buf.remaining_mut()); + + assert_eq!(buf.bytes(), b" have fun"); + + buf.set_position(0); + assert_eq!(buf.bytes(), b"hello world goodbye have fun"); + + buf.clear(); + assert_eq!(buf.bytes(), b""); +} -- GitLab