Newer
Older
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
if let Some(0) = upper {
return;
}
let mut bytes_mut = match mem::replace(self, Bytes::new()).try_mut() {
Ok(bytes_mut) => bytes_mut,
Err(bytes) => {
let mut bytes_mut = BytesMut::with_capacity(bytes.len() + lower);
bytes_mut.put_slice(&bytes);
bytes_mut
}
};
bytes_mut.extend(iter);
mem::replace(self, bytes_mut.freeze());
}
}
impl<'a> Extend<&'a u8> for Bytes {
fn extend<T>(&mut self, iter: T) where T: IntoIterator<Item = &'a u8> {
self.extend(iter.into_iter().map(|b| *b))
}
}
/*
*
* ===== BytesMut =====
*
*/
impl BytesMut {
/// Creates a new `BytesMut` with the specified capacity.
///
/// The returned `BytesMut` will be able to hold at least `capacity` bytes
/// without reallocating. If `capacity` is under `4 * size_of::<usize>() - 1`,
/// then `BytesMut` will not allocate.
///
/// It is important to note that this function does not specify the length
/// of the returned `BytesMut`, but only the capacity.
///
/// # Examples
///
/// ```
/// use bytes::{BytesMut, BufMut};
///
/// let mut bytes = BytesMut::with_capacity(64);
///
/// // `bytes` contains no data, even though there is capacity
/// assert_eq!(bytes.len(), 0);
///
///
/// assert_eq!(&bytes[..], b"hello world");
/// ```
pub fn with_capacity(capacity: usize) -> BytesMut {
/// Creates a new `BytesMut` with default capacity.
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
///
/// Resulting object has length 0 and unspecified capacity.
/// This function does not allocate.
///
/// # Examples
///
/// ```
/// use bytes::{BytesMut, BufMut};
///
/// let mut bytes = BytesMut::new();
///
/// assert_eq!(0, bytes.len());
///
/// bytes.reserve(2);
/// bytes.put_slice(b"xy");
///
/// assert_eq!(&b"xy"[..], &bytes[..]);
/// ```
#[inline]
pub fn new() -> BytesMut {
BytesMut::with_capacity(0)
}
/// Returns the number of bytes contained in this `BytesMut`.
///
/// # Examples
///
/// ```
/// use bytes::BytesMut;
///
/// let b = BytesMut::from(&b"hello"[..]);
/// assert_eq!(b.len(), 5);
/// ```
/// Returns true if the `BytesMut` has a length of 0.
///
/// # Examples
///
/// ```
/// use bytes::BytesMut;
///
/// let b = BytesMut::with_capacity(64);
/// assert!(b.is_empty());
/// ```
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the number of bytes the `BytesMut` can hold without reallocating.
///
/// # Examples
///
/// ```
/// use bytes::BytesMut;
///
/// let b = BytesMut::with_capacity(64);
/// assert_eq!(b.capacity(), 64);
/// ```
/// Converts `self` into an immutable `Bytes`.
///
/// The conversion is zero cost and is used to indicate that the slice
/// referenced by the handle will no longer be mutated. Once the conversion
/// is done, the handle can be cloned and shared across threads.
///
/// # Examples
///
/// ```
/// use bytes::{BytesMut, BufMut};
/// use std::thread;
///
/// let mut b = BytesMut::with_capacity(64);
/// let b1 = b.freeze();
/// let b2 = b1.clone();
///
/// let th = thread::spawn(move || {
/// assert_eq!(&b1[..], b"hello world");
/// });
///
/// assert_eq!(&b2[..], b"hello world");
/// th.join().unwrap();
/// ```
/// Splits the bytes into two at the given index.
///
/// Afterwards `self` contains elements `[0, at)`, and the returned
/// `BytesMut` contains elements `[at, capacity)`.
///
/// This is an `O(1)` operation that just increases the reference count
/// and sets a few indices.
/// # Examples
///
/// ```
/// use bytes::BytesMut;
///
/// let mut a = BytesMut::from(&b"hello world"[..]);
/// let mut b = a.split_off(5);
///
/// a[0] = b'j';
/// b[0] = b'!';
///
/// assert_eq!(&a[..], b"jello");
/// assert_eq!(&b[..], b"!world");
/// ```
/// Panics if `at > capacity`.
pub fn split_off(&mut self, at: usize) -> BytesMut {
BytesMut {
/// Removes the bytes from the current view, returning them in a new
/// `BytesMut` handle.
///
/// Afterwards, `self` will be empty, but will retain any additional
/// capacity that it had before the operation. This is identical to
///
/// This is an `O(1)` operation that just increases the reference count and
///
/// # Examples
///
/// ```
/// use bytes::{BytesMut, BufMut};
///
/// let mut buf = BytesMut::with_capacity(1024);
/// buf.put(&b"hello world"[..]);
///
///
/// assert!(buf.is_empty());
/// assert_eq!(1013, buf.capacity());
///
/// assert_eq!(other, b"hello world"[..]);
/// ```
self.split_to(len)
}
#[deprecated(since = "0.4.1", note = "use take instead")]
#[doc(hidden)]
pub fn drain(&mut self) -> BytesMut {
self.take()
/// Splits the buffer into two at the given index.
///
/// Afterwards `self` contains elements `[at, len)`, and the returned `BytesMut`
/// contains elements `[0, at)`.
///
/// This is an `O(1)` operation that just increases the reference count and
/// sets a few indices.
/// # Examples
///
/// ```
/// use bytes::BytesMut;
///
/// let mut a = BytesMut::from(&b"hello world"[..]);
///
/// a[0] = b'!';
/// b[0] = b'j';
///
/// assert_eq!(&a[..], b"!world");
/// assert_eq!(&b[..], b"jello");
/// ```
/// Panics if `at > len`.
pub fn split_to(&mut self, at: usize) -> BytesMut {
#[deprecated(since = "0.4.1", note = "use split_to instead")]
#[doc(hidden)]
pub fn drain_to(&mut self, at: usize) -> BytesMut {
self.split_to(at)
}
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
/// Shortens the buffer, keeping the first `len` bytes and dropping the
/// rest.
///
/// If `len` is greater than the buffer's current length, this has no
/// effect.
///
/// The [`split_off`] method can emulate `truncate`, but this causes the
/// excess bytes to be returned instead of dropped.
///
/// # Examples
///
/// ```
/// use bytes::BytesMut;
///
/// let mut buf = BytesMut::from(&b"hello world"[..]);
/// buf.truncate(5);
/// assert_eq!(buf, b"hello"[..]);
/// ```
///
/// [`split_off`]: #method.split_off
pub fn truncate(&mut self, len: usize) {
self.inner.truncate(len);
/// Shortens the buffer, dropping the first `cnt` bytes and keeping the
/// rest.
///
/// This is the same function as `Buf::advance`, and in the next breaking
/// release of `bytes`, this implementation will be removed in favor of
/// having `BytesMut` implement `Buf`.
///
/// # Panics
///
/// This function panics if `cnt` is greater than `self.len()`
#[inline]
pub fn advance(&mut self, cnt: usize) {
assert!(cnt <= self.len(), "cannot advance past `remaining`");
unsafe { self.inner.set_start(cnt); }
}
/// Clears the buffer, removing all data.
///
/// # Examples
///
/// ```
/// use bytes::BytesMut;
///
/// let mut buf = BytesMut::from(&b"hello world"[..]);
/// buf.clear();
/// assert!(buf.is_empty());
/// ```
pub fn clear(&mut self) {
self.truncate(0);
}
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
/// Resizes the buffer so that `len` is equal to `new_len`.
///
/// If `new_len` is greater than `len`, the buffer is extended by the
/// difference with each additional byte set to `value`. If `new_len` is
/// less than `len`, the buffer is simply truncated.
///
/// # Examples
///
/// ```
/// use bytes::BytesMut;
///
/// let mut buf = BytesMut::new();
///
/// buf.resize(3, 0x1);
/// assert_eq!(&buf[..], &[0x1, 0x1, 0x1]);
///
/// buf.resize(2, 0x2);
/// assert_eq!(&buf[..], &[0x1, 0x1]);
///
/// buf.resize(4, 0x3);
/// assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]);
/// ```
pub fn resize(&mut self, new_len: usize, value: u8) {
self.inner.resize(new_len, value);
}
/// Sets the length of the buffer.
///
/// This will explicitly set the size of the buffer without actually
/// modifying the data, so it is up to the caller to ensure that the data
/// has been initialized.
///
/// # Examples
///
/// ```
/// use bytes::BytesMut;
///
/// let mut b = BytesMut::from(&b"hello world"[..]);
///
/// unsafe {
/// b.set_len(5);
/// }
///
/// assert_eq!(&b[..], b"hello");
///
/// unsafe {
/// b.set_len(11);
/// }
///
/// assert_eq!(&b[..], b"hello world");
/// ```
///
/// # Panics
///
/// This method will panic if `len` is out of bounds for the underlying
/// slice or if it comes after the `end` of the configured window.
pub unsafe fn set_len(&mut self, len: usize) {
/// Reserves capacity for at least `additional` more bytes to be inserted
/// into the given `BytesMut`.
///
/// More than `additional` bytes may be reserved in order to avoid frequent
/// reallocations. A call to `reserve` may result in an allocation.
///
/// Before allocating new buffer space, the function will attempt to reclaim
/// space in the existing buffer. If the current handle references a small
/// view in the original buffer and all other handles have been dropped,
/// and the requested capacity is less than or equal to the existing
/// buffer's capacity, then the current view will be copied to the front of
/// the buffer and the handle will take ownership of the full buffer.
///
/// In the following example, a new buffer is allocated.
///
/// let mut buf = BytesMut::from(&b"hello"[..]);
/// buf.reserve(64);
/// assert!(buf.capacity() >= 69);
/// In the following example, the existing buffer is reclaimed.
/// ```
/// use bytes::{BytesMut, BufMut};
/// let mut buf = BytesMut::with_capacity(128);
/// buf.put(&[0; 64][..]);
/// let other = buf.take();
/// assert!(buf.is_empty());
/// assert_eq!(buf.capacity(), 64);
/// drop(other);
/// buf.reserve(128);
/// assert_eq!(buf.capacity(), 128);
/// assert_eq!(buf.as_ptr(), ptr);
/// ```
/// Panics if the new capacity overflows `usize`.
pub fn reserve(&mut self, additional: usize) {
self.inner.reserve(additional)
/// Appends given bytes to this object.
///
/// If this `BytesMut` object has not enough capacity, it is resized first.
/// So unlike `put_slice` operation, `extend_from_slice` does not panic.
///
/// # Examples
///
/// ```
/// use bytes::BytesMut;
///
/// let mut buf = BytesMut::with_capacity(0);
/// buf.extend_from_slice(b"aaabbb");
/// buf.extend_from_slice(b"cccddd");
///
/// assert_eq!(b"aaabbbcccddd", &buf[..]);
/// ```
pub fn extend_from_slice(&mut self, extend: &[u8]) {
self.reserve(extend.len());
self.put_slice(extend);
}
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
/// Combine splitted BytesMut objects back as contiguous.
///
/// If `BytesMut` objects were not contiguous originally, they will be extended.
///
/// # Examples
///
/// ```
/// use bytes::BytesMut;
///
/// let mut buf = BytesMut::with_capacity(64);
/// buf.extend_from_slice(b"aaabbbcccddd");
///
/// let splitted = buf.split_off(6);
/// assert_eq!(b"aaabbb", &buf[..]);
/// assert_eq!(b"cccddd", &splitted[..]);
///
/// buf.unsplit(splitted);
/// assert_eq!(b"aaabbbcccddd", &buf[..]);
/// ```
pub fn unsplit(&mut self, other: BytesMut) {
let ptr;
if other.is_empty() {
return;
}
if self.is_empty() {
*self = other;
return;
}
unsafe {
ptr = self.inner.ptr.offset(self.inner.len as isize);
}
if ptr == other.inner.ptr &&
self.inner.kind() == KIND_ARC &&
other.inner.kind() == KIND_ARC
{
debug_assert_eq!(self.inner.arc.load(Acquire),
other.inner.arc.load(Acquire));
// Contiguous blocks, just combine directly
self.inner.len += other.inner.len;
self.inner.cap += other.inner.cap;
}
else {
impl BufMut for BytesMut {
#[inline]
fn remaining_mut(&self) -> usize {
self.capacity() - self.len()
}
#[inline]
unsafe fn advance_mut(&mut self, cnt: usize) {
let new_len = self.len() + cnt;
self.inner.set_len(new_len);
}
#[inline]
unsafe fn bytes_mut(&mut self) -> &mut [u8] {
let len = self.len();
// This will never panic as `len` can never become invalid
&mut self.inner.as_raw()[len..]
assert!(self.remaining_mut() >= src.len());
let len = src.len();
unsafe {
self.bytes_mut()[..len].copy_from_slice(src);
self.advance_mut(len);
}
}
#[inline]
fn put_u8(&mut self, n: u8) {
self.inner.put_u8(n);
}
#[inline]
fn put_i8(&mut self, n: i8) {
self.put_u8(n as u8);
}
}
impl IntoBuf for BytesMut {
type Buf = Cursor<Self>;
fn into_buf(self) -> Self::Buf {
Cursor::new(self)
}
}
impl<'a> IntoBuf for &'a BytesMut {
type Buf = Cursor<&'a BytesMut>;
fn into_buf(self) -> Self::Buf {
Cursor::new(self)
}
}
impl AsRef<[u8]> for BytesMut {
fn as_ref(&self) -> &[u8] {
self.inner.as_ref()
}
}
impl ops::Deref for BytesMut {
type Target = [u8];
fn deref(&self) -> &[u8] {
self.as_ref()
}
}
impl AsMut<[u8]> for BytesMut {
fn as_mut(&mut self) -> &mut [u8] {
self.inner.as_mut()
}
}
}
}
impl From<Vec<u8>> for BytesMut {
fn from(src: Vec<u8>) -> BytesMut {
impl From<String> for BytesMut {
fn from(src: String) -> BytesMut {
BytesMut::from(src.into_bytes())
}
}
impl<'a> From<&'a [u8]> for BytesMut {
fn from(src: &'a [u8]) -> BytesMut {
let len = src.len();
if len == 0 {
BytesMut::new()
} else if len <= INLINE_CAP {
let mut inner: Inner = mem::uninitialized();
// Set inline mask
inner.arc = AtomicPtr::new(KIND_INLINE as *mut Shared);
inner.set_inline_len(len);
inner.as_raw()[0..len].copy_from_slice(src);
BytesMut::from(src.to_vec())
impl<'a> From<&'a str> for BytesMut {
fn from(src: &'a str) -> BytesMut {
BytesMut::from(src.as_bytes())
}
}
impl From<Bytes> for BytesMut {
fn from(src: Bytes) -> BytesMut {
src.try_mut()
.unwrap_or_else(|src| BytesMut::from(&src[..]))
}
}
impl PartialEq for BytesMut {
fn eq(&self, other: &BytesMut) -> bool {
self.inner.as_ref() == other.inner.as_ref()
}
}
impl PartialOrd for BytesMut {
fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
self.inner.as_ref().partial_cmp(other.inner.as_ref())
}
}
impl Ord for BytesMut {
fn cmp(&self, other: &BytesMut) -> cmp::Ordering {
self.inner.as_ref().cmp(other.inner.as_ref())
}
}
impl Default for BytesMut {
#[inline]
fn default() -> BytesMut {
impl fmt::Debug for BytesMut {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&debug::BsDebug(&self.inner.as_ref()), fmt)
impl hash::Hash for BytesMut {
fn hash<H>(&self, state: &mut H) where H: hash::Hasher {
let s: &[u8] = self.as_ref();
s.hash(state);
}
}
impl Borrow<[u8]> for BytesMut {
fn borrow(&self) -> &[u8] {
self.as_ref()
}
}
impl BorrowMut<[u8]> for BytesMut {
fn borrow_mut(&mut self) -> &mut [u8] {
self.as_mut()
}
}
impl fmt::Write for BytesMut {
#[inline]
fn write_str(&mut self, s: &str) -> fmt::Result {
if self.remaining_mut() >= s.len() {
self.put_slice(s.as_bytes());
Ok(())
} else {
Err(fmt::Error)
}
#[inline]
fn write_fmt(&mut self, args: fmt::Arguments) -> fmt::Result {
fmt::write(self, args)
}
}
impl Clone for BytesMut {
fn clone(&self) -> BytesMut {
BytesMut::from(&self[..])
}
}
impl IntoIterator for BytesMut {
type Item = u8;
type IntoIter = Iter<Cursor<BytesMut>>;
fn into_iter(self) -> Self::IntoIter {
self.into_buf().iter()
}
}
impl<'a> IntoIterator for &'a BytesMut {
type Item = u8;
type IntoIter = Iter<Cursor<&'a BytesMut>>;
fn into_iter(self) -> Self::IntoIter {
self.into_buf().iter()
}
}
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
impl Extend<u8> for BytesMut {
fn extend<T>(&mut self, iter: T) where T: IntoIterator<Item = u8> {
let iter = iter.into_iter();
let (lower, _) = iter.size_hint();
self.reserve(lower);
for b in iter {
unsafe {
self.bytes_mut()[0] = b;
self.advance_mut(1);
}
}
}
}
impl<'a> Extend<&'a u8> for BytesMut {
fn extend<T>(&mut self, iter: T) where T: IntoIterator<Item = &'a u8> {
self.extend(iter.into_iter().map(|b| *b))
}
}
/*
*
* ===== Inner =====
*
*/
impl Inner {
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
#[inline]
fn from_static(bytes: &'static [u8]) -> Inner {
let ptr = bytes.as_ptr() as *mut u8;
Inner {
// `arc` won't ever store a pointer. Instead, use it to
// track the fact that the `Bytes` handle is backed by a
// static buffer.
arc: AtomicPtr::new(KIND_STATIC as *mut Shared),
ptr: ptr,
len: bytes.len(),
cap: bytes.len(),
}
}
#[inline]
fn from_vec(mut src: Vec<u8>) -> Inner {
let len = src.len();
let cap = src.capacity();
let ptr = src.as_mut_ptr();
mem::forget(src);
let original_capacity_repr = original_capacity_to_repr(cap);
let arc = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
arc: AtomicPtr::new(arc as *mut Shared),
ptr: ptr,
len: len,
cap: cap,
}
}
#[inline]
fn with_capacity(capacity: usize) -> Inner {
if capacity <= INLINE_CAP {
unsafe {
// Using uninitialized memory is ~30% faster
let mut inner: Inner = mem::uninitialized();
inner.arc = AtomicPtr::new(KIND_INLINE as *mut Shared);
inner
}
} else {
Inner::from_vec(Vec::with_capacity(capacity))
}
}
/// Return a slice for the handle's view into the shared buffer
unsafe {
if self.is_inline() {
slice::from_raw_parts(self.inline_ptr(), self.inline_len())
} else {
slice::from_raw_parts(self.ptr, self.len)
/// Return a mutable slice for the handle's view into the shared buffer
debug_assert!(!self.is_static());
unsafe {
if self.is_inline() {
slice::from_raw_parts_mut(self.inline_ptr(), self.inline_len())
} else {
slice::from_raw_parts_mut(self.ptr, self.len)
/// Return a mutable slice for the handle's view into the shared buffer
/// including potentially uninitialized bytes.
debug_assert!(!self.is_static());
if self.is_inline() {
slice::from_raw_parts_mut(self.inline_ptr(), INLINE_CAP)
slice::from_raw_parts_mut(self.ptr, self.cap)
/// Insert a byte into the next slot and advance the len by 1.
#[inline]
fn put_u8(&mut self, n: u8) {
if self.is_inline() {
let len = self.inline_len();
assert!(len < INLINE_CAP);
unsafe {
*self.inline_ptr().offset(len as isize) = n;
}
self.set_inline_len(len + 1);
} else {
assert!(self.len < self.cap);
unsafe {
*self.ptr.offset(self.len as isize) = n;
}
self.len += 1;
}
}
if self.is_inline() {
self.inline_len()
} else {
/// Pointer to the start of the inline buffer
#[inline]
unsafe fn inline_ptr(&self) -> *mut u8 {
(self as *const Inner as *mut Inner as *mut u8)
.offset(INLINE_DATA_OFFSET)
#[inline]
fn inline_len(&self) -> usize {
let p: &usize = unsafe { mem::transmute(&self.arc) };
(p & INLINE_LEN_MASK) >> INLINE_LEN_OFFSET
/// Set the length of the inline buffer. This is done by writing to the
/// least significant byte of the `arc` field.
fn set_inline_len(&mut self, len: usize) {
debug_assert!(len <= INLINE_CAP);
let p = self.arc.get_mut();
*p = ((*p as usize & !INLINE_LEN_MASK) | (len << INLINE_LEN_OFFSET)) as _;
assert!(len <= INLINE_CAP);
assert!(len <= self.cap);
self.len = len;
fn is_empty(&self) -> bool {
fn capacity(&self) -> usize {
fn split_off(&mut self, at: usize) -> Inner {
let mut other = unsafe { self.shallow_clone(true) };
unsafe {
other.set_start(at);
self.set_end(at);
}
return other
}
fn split_to(&mut self, at: usize) -> Inner {
let mut other = unsafe { self.shallow_clone(true) };
unsafe {
other.set_end(at);
self.set_start(at);
}
return other
}
fn truncate(&mut self, len: usize) {
if len <= self.len() {
unsafe { self.set_len(len); }
}
}
fn resize(&mut self, new_len: usize, value: u8) {
let len = self.len();
if new_len > len {
let additional = new_len - len;
self.reserve(additional);
unsafe {
let dst = self.as_raw()[len..].as_mut_ptr();
ptr::write_bytes(dst, value, additional);
self.set_len(new_len);
}
} else {
self.truncate(new_len);
}
}
unsafe fn set_start(&mut self, start: usize) {
// Setting the start to 0 is a no-op, so return early if this is the
// case.
if start == 0 {
return;
}
let kind = self.kind();
// Always check `inline` first, because if the handle is using inline
// data storage, all of the `Inner` struct fields will be gibberish.
assert!(start <= INLINE_CAP);
let len = self.inline_len();
} else {
// `set_start` is essentially shifting data off the front of the
// view. Inlined buffers only track the length of the slice.
// So, to update the start, the data at the new starting point
// is copied to the beginning of the buffer.
let new_len = len - start;