Skip to content
Snippets Groups Projects
bytes.rs 83.5 KiB
Newer Older
  • Learn to ignore specific revisions
  • use {IntoBuf, Buf, BufMut};
    use buf::Iter;
    
    use std::{cmp, fmt, mem, hash, ops, slice, ptr, usize};
    
    use std::borrow::{Borrow, BorrowMut};
    
    use std::io::Cursor;
    
    use std::sync::atomic::{self, AtomicUsize, AtomicPtr};
    use std::sync::atomic::Ordering::{Relaxed, Acquire, Release, AcqRel};
    
    Jef's avatar
    Jef committed
    use std::iter::{FromIterator, Iterator};
    
    /// A reference counted contiguous slice of memory.
    ///
    
    /// `Bytes` is an efficient container for storing and operating on contiguous
    
    /// slices of memory. It is intended for use primarily in networking code, but
    /// could have applications elsewhere as well.
    ///
    /// `Bytes` values facilitate zero-copy network programming by allowing multiple
    /// `Bytes` objects to point to the same underlying memory. This is managed by
    /// using a reference count to track when the memory is no longer needed and can
    /// be freed.
    ///
    /// ```
    /// use bytes::Bytes;
    ///
    
    /// let mut mem = Bytes::from(&b"Hello world"[..]);
    
    /// let a = mem.slice(0, 5);
    ///
    /// assert_eq!(&a[..], b"Hello");
    ///
    
    /// let b = mem.split_to(6);
    
    ///
    /// assert_eq!(&mem[..], b"world");
    /// assert_eq!(&b[..], b"Hello ");
    /// ```
    ///
    /// # Memory layout
    ///
    /// The `Bytes` struct itself is fairly small, limited to a pointer to the
    /// memory and 4 `usize` fields used to track information about which segment of
    /// the underlying memory the `Bytes` handle has access to.
    ///
    /// The memory layout looks like this:
    ///
    /// ```text
    /// +-------+
    /// | Bytes |
    /// +-------+
    ///  /      \_____
    /// |              \
    /// v               v
    /// +-----+------------------------------------+
    /// | Arc |         |      Data     |          |
    /// +-----+------------------------------------+
    /// ```
    ///
    /// `Bytes` keeps both a pointer to the shared `Arc` containing the full memory
    /// slice and a pointer to the start of the region visible by the handle.
    /// `Bytes` also tracks the length of its view into the memory.
    ///
    /// # Sharing
    ///
    /// The memory itself is reference counted, and multiple `Bytes` objects may
    /// point to the same region. Each `Bytes` handle point to different sections within
    /// the memory region, and `Bytes` handle may or may not have overlapping views
    /// into the memory.
    ///
    ///
    /// ```text
    ///
    ///    Arc ptrs                   +---------+
    ///    ________________________ / | Bytes 2 |
    ///   /                           +---------+
    ///  /          +-----------+     |         |
    /// |_________/ |  Bytes 1  |     |         |
    /// |           +-----------+     |         |
    /// |           |           | ___/ data     | tail
    /// |      data |      tail |/              |
    /// v           v           v               v
    /// +-----+---------------------------------+-----+
    /// | Arc |     |           |               |     |
    /// +-----+---------------------------------+-----+
    /// ```
    ///
    /// # Mutating
    ///
    /// While `Bytes` handles may potentially represent overlapping views of the
    /// underlying memory slice and may not be mutated, `BytesMut` handles are
    /// guaranteed to be the only handle able to view that slice of memory. As such,
    /// `BytesMut` handles are able to mutate the underlying memory. Note that
    
    /// holding a unique view to a region of memory does not mean that there are no
    
    /// other `Bytes` and `BytesMut` handles with disjoint views of the underlying
    /// memory.
    ///
    
    /// # Inline bytes
    
    /// As an optimization, when the slice referenced by a `Bytes` or `BytesMut`
    
    /// handle is small enough [^1], `with_capacity` will avoid the allocation by
    /// inlining the slice directly in the handle. In this case, a clone is no
    /// longer "shallow" and the data will be copied.  Converting from a `Vec` will
    /// never use inlining.
    
    /// [^1]: Small enough: 31 bytes on 64 bit systems, 15 on 32 bit systems.
    
    ///
    pub struct Bytes {
    
        inner: Inner,
    
    /// A unique reference to a contiguous slice of memory.
    
    ///
    /// `BytesMut` represents a unique view into a potentially shared memory region.
    /// Given the uniqueness guarantee, owners of `BytesMut` handles are able to
    
    Carl Lerche's avatar
    Carl Lerche committed
    /// mutate the memory. It is similar to a `Vec<u8>` but with less copies and
    /// allocations.
    
    ///
    /// For more detail, see [Bytes](struct.Bytes.html).
    ///
    
    Carl Lerche's avatar
    Carl Lerche committed
    /// # Growth
    ///
    /// One key difference from `Vec<u8>` is that most operations **do not
    /// implicitly grow the buffer**. This means that calling `my_bytes.put("hello
    /// world");` could panic if `my_bytes` does not have enough capacity. Before
    /// writing to the buffer, ensure that there is enough remaining capacity by
    /// calling `my_bytes.remaining_mut()`. In general, avoiding calls to `reserve`
    /// is preferable.
    ///
    /// The only exception is `extend` which implicitly reserves required capacity.
    ///
    /// # Examples
    ///
    
    /// ```
    /// use bytes::{BytesMut, BufMut};
    ///
    /// let mut buf = BytesMut::with_capacity(64);
    
    Carl Lerche's avatar
    Carl Lerche committed
    /// buf.put(b'h');
    /// buf.put(b'e');
    /// buf.put("llo");
    
    ///
    /// assert_eq!(&buf[..], b"hello");
    ///
    /// // Freeze the buffer so that it can be shared
    /// let a = buf.freeze();
    ///
    /// // This does not allocate, instead `b` points to the same memory.
    /// let b = a.clone();
    ///
    /// assert_eq!(&a[..], b"hello");
    /// assert_eq!(&b[..], b"hello");
    /// ```
    
    pub struct BytesMut {
    
        inner: Inner,
    
    Carl Lerche's avatar
    Carl Lerche committed
    }
    
    
    // Both `Bytes` and `BytesMut` are backed by `Inner` and functions are delegated
    // to `Inner` functions. The `Bytes` and `BytesMut` shims ensure that functions
    // that mutate the underlying buffer are only performed when the data range
    // being mutated is only available via a single `BytesMut` handle.
    //
    // # Data storage modes
    //
    // The goal of `bytes` is to be as efficient as possible across a wide range of
    // potential usage patterns. As such, `bytes` needs to be able to handle buffers
    // that are never shared, shared on a single thread, and shared across many
    // threads. `bytes` also needs to handle both tiny buffers as well as very large
    // buffers. For example, [Cassandra](http://cassandra.apache.org) values have
    // been known to be in the hundreds of megabyte, and HTTP header values can be a
    // few characters in size.
    //
    // To achieve high performance in these various situations, `Bytes` and
    // `BytesMut` use different strategies for storing the buffer depending on the
    // usage pattern.
    //
    // ## Delayed `Arc` allocation
    //
    // When a `Bytes` or `BytesMut` is first created, there is only one outstanding
    // handle referencing the buffer. Since sharing is not yet required, an `Arc`* is
    // not used and the buffer is backed by a `Vec<u8>` directly. Using an
    // `Arc<Vec<u8>>` requires two allocations, so if the buffer ends up never being
    // shared, that allocation is avoided.
    //
    
    // When sharing does become necessary (`clone`, `split_to`, `split_off`), that
    
    // is when the buffer is promoted to being shareable. The `Vec<u8>` is moved
    // into an `Arc` and both the original handle and the new handle use the same
    // buffer via the `Arc`.
    //
    // * `Arc` is being used to signify an atomically reference counted cell. We
    // don't use the `Arc` implementation provided by `std` and instead use our own.
    // This ends up simplifying a number of the `unsafe` code snippets.
    //
    // ## Inlining small buffers
    //
    // The `Bytes` / `BytesMut` structs require 4 pointer sized fields. On 64 bit
    // systems, this ends up being 32 bytes, which is actually a lot of storage for
    // cases where `Bytes` is being used to represent small byte strings, such as
    // HTTP header names and values.
    //
    // To avoid any allocation at all in these cases, `Bytes` will use the struct
    // itself for storing the buffer, reserving 1 byte for meta data. This means
    // that, on 64 bit systems, 31 byte buffers require no allocation at all.
    //
    
    Stepan Koltsov's avatar
    Stepan Koltsov committed
    // The byte used for metadata stores a 2 bits flag used to indicate that the
    // buffer is stored inline as well as 6 bits for tracking the buffer length (the
    
    // return value of `Bytes::len`).
    //
    // ## Static buffers
    //
    // `Bytes` can also represent a static buffer, which is created with
    
    Stepan Koltsov's avatar
    Stepan Koltsov committed
    // `Bytes::from_static`. No copying or allocations are required for tracking
    
    // static buffers. The pointer to the `&'static [u8]`, the length, and a flag
    // tracking that the `Bytes` instance represents a static buffer is stored in
    // the `Bytes` struct.
    //
    // # Struct layout
    //
    // Both `Bytes` and `BytesMut` are wrappers around `Inner`, which provides the
    // data fields as well as all of the function implementations.
    //
    // The `Inner` struct is carefully laid out in order to support the
    // functionality described above as well as being as small as possible. Size is
    // important as growing the size of the `Bytes` struct from 32 bytes to 40 bytes
    // added as much as 15% overhead in benchmarks using `Bytes` in an HTTP header
    // map structure.
    //
    // The `Inner` struct contains the following fields:
    //
    // * `ptr: *mut u8`
    // * `len: usize`
    // * `cap: usize`
    // * `arc: AtomicPtr<Shared>`
    //
    // ## `ptr: *mut u8`
    //
    // A pointer to start of the handle's buffer view. When backed by a `Vec<u8>`,
    // this is always the `Vec`'s pointer. When backed by an `Arc<Vec<u8>>`, `ptr`
    // may have been shifted to point somewhere inside the buffer.
    //
    // When in "inlined" mode, `ptr` is used as part of the inlined buffer.
    //
    // ## `len: usize`
    //
    // The length of the handle's buffer view. When backed by a `Vec<u8>`, this is
    // always the `Vec`'s length. The slice represented by `ptr` and `len` should
    // (ideally) always be initialized memory.
    //
    // When in "inlined" mode, `len` is used as part of the inlined buffer.
    //
    // ## `cap: usize`
    //
    // The capacity of the handle's buffer view. When backed by a `Vec<u8>`, this is
    // always the `Vec`'s capacity. The slice represented by `ptr+len` and `cap-len`
    // may or may not be initialized memory.
    //
    // When in "inlined" mode, `cap` is used as part of the inlined buffer.
    //
    // ## `arc: AtomicPtr<Shared>`
    //
    // When `Inner` is in allocated mode (backed by Vec<u8> or Arc<Vec<u8>>), this
    // will be the pointer to the `Arc` structure tracking the ref count for the
    // underlying buffer. When the pointer is null, then the `Arc` has not been
    // allocated yet and `self` is the only outstanding handle for the underlying
    // buffer.
    //
    
    // The lower two bits of `arc` are used to track the storage mode of `Inner`.
    // `0b01` indicates inline storage, `0b10` indicates static storage, and `0b11`
    // indicates vector storage, not yet promoted to Arc.  Since pointers to
    // allocated structures are aligned, the lower two bits of a pointer will always
    // be 0. This allows disambiguating between a pointer and the two flags.
    
    //
    // When in "inlined" mode, the least significant byte of `arc` is also used to
    // store the length of the buffer view (vs. the capacity, which is a constant).
    //
    // The rest of `arc`'s bytes are used as part of the inline buffer, which means
    // that those bytes need to be located next to the `ptr`, `len`, and `cap`
    // fields, which make up the rest of the inline buffer. This requires special
    // casing the layout of `Inner` depending on if the target platform is bit or
    // little endian.
    //
    // On little endian platforms, the `arc` field must be the first field in the
    // struct. On big endian platforms, the `arc` field must be the last field in
    // the struct. Since a deterministic struct layout is required, `Inner` is
    // annotated with `#[repr(C)]`.
    //
    // # Thread safety
    //
    // `Bytes::clone()` returns a new `Bytes` handle with no copying. This is done
    // by bumping the buffer ref count and returning a new struct pointing to the
    // same buffer. However, the `Arc` structure is lazily allocated. This means
    // that if `Bytes` is stored itself in an `Arc` (`Arc<Bytes>`), the `clone`
    // function can be called concurrently from multiple threads. This is why an
    // `AtomicPtr` is used for the `arc` field vs. a `*const`.
    //
    // Care is taken to ensure that the need for synchronization is minimized. Most
    // operations do not require any synchronization.
    //
    #[cfg(target_endian = "little")]
    #[repr(C)]
    
    jq-rs's avatar
    jq-rs committed
        // WARNING: Do not access the fields directly unless you know what you are
        // doing. Instead, use the fns. See implementation comment above.
    
        arc: AtomicPtr<Shared>,
        ptr: *mut u8,
        len: usize,
        cap: usize,
    
    #[cfg(target_endian = "big")]
    
    jq-rs's avatar
    jq-rs committed
        // WARNING: Do not access the fields directly unless you know what you are
        // doing. Instead, use the fns. See implementation comment above.
    
    Carl Lerche's avatar
    Carl Lerche committed
        ptr: *mut u8,
    
        len: usize,
        cap: usize,
    
        arc: AtomicPtr<Shared>,
    
    // Thread-safe reference-counted container for the shared storage. This mostly
    // the same as `std::sync::Arc` but without the weak counter. The ref counting
    // fns are based on the ones found in `std`.
    //
    // The main reason to use `Shared` instead of `std::sync::Arc` is that it ends
    // up making the overall code simpler and easier to reason about. This is due to
    // some of the logic around setting `Inner::arc` and other ways the `arc` field
    // is used. Using `Arc` ended up requiring a number of funky transmutes and
    // other shenanigans to make it work.
    struct Shared {
        vec: Vec<u8>,
    
        original_capacity_repr: usize,
    
        ref_count: AtomicUsize,
    }
    
    // Buffer storage strategy flags.
    
    const KIND_ARC: usize = 0b00;
    
    const KIND_INLINE: usize = 0b01;
    const KIND_STATIC: usize = 0b10;
    
    const KIND_VEC: usize = 0b11;
    const KIND_MASK: usize = 0b11;
    
    
    // The max original capacity value. Any `Bytes` allocated with a greater initial
    // capacity will default to this.
    const MAX_ORIGINAL_CAPACITY_WIDTH: usize = 17;
    // The original capacity algorithm will not take effect unless the originally
    // allocated capacity was at least 1kb in size.
    const MIN_ORIGINAL_CAPACITY_WIDTH: usize = 10;
    // The original capacity is stored in powers of 2 starting at 1kb to a max of
    // 64kb. Representing it as such requires only 3 bits of storage.
    const ORIGINAL_CAPACITY_MASK: usize = 0b11100;
    const ORIGINAL_CAPACITY_OFFSET: usize = 2;
    
    // When the storage is in the `Vec` representation, the pointer can be advanced
    // at most this value. This is due to the amount of storage available to track
    // the offset is usize - number of KIND bits and number of ORIGINAL_CAPACITY
    // bits.
    const VEC_POS_OFFSET: usize = 5;
    const MAX_VEC_POS: usize = usize::MAX >> VEC_POS_OFFSET;
    const NOT_VEC_POS_MASK: usize = 0b11111;
    
    
    // Bit op constants for extracting the inline length value from the `arc` field.
    
    const INLINE_LEN_MASK: usize = 0b11111100;
    const INLINE_LEN_OFFSET: usize = 2;
    
    
    // Byte offset from the start of `Inner` to where the inline buffer data
    // starts. On little endian platforms, the first byte of the struct is the
    // storage flag, so the data is shifted by a byte. On big endian systems, the
    // data starts at the beginning of the struct.
    #[cfg(target_endian = "little")]
    const INLINE_DATA_OFFSET: isize = 1;
    #[cfg(target_endian = "big")]
    const INLINE_DATA_OFFSET: isize = 0;
    
    
    #[cfg(target_pointer_width = "64")]
    const PTR_WIDTH: usize = 64;
    #[cfg(target_pointer_width = "32")]
    const PTR_WIDTH: usize = 32;
    
    
    // Inline buffer capacity. This is the size of `Inner` minus 1 byte for the
    // metadata.
    
    #[cfg(target_pointer_width = "64")]
    
    const INLINE_CAP: usize = 4 * 8 - 1;
    
    #[cfg(target_pointer_width = "32")]
    
    const INLINE_CAP: usize = 4 * 4 - 1;
    
    /*
     *
     * ===== Bytes =====
     *
     */
    
    impl Bytes {
    
        /// Creates a new `Bytes` with the specified capacity.
    
        ///
        /// The returned `Bytes` will be able to hold at least `capacity` bytes
    
        /// without reallocating. If `capacity` is under `4 * size_of::<usize>() - 1`,
    
        /// then `BytesMut` will not allocate.
        ///
        /// It is important to note that this function does not specify the length
        /// of the returned `Bytes`, but only the capacity.
        ///
        /// # Examples
        ///
        /// ```
        /// use bytes::Bytes;
        ///
        /// let mut bytes = Bytes::with_capacity(64);
        ///
        /// // `bytes` contains no data, even though there is capacity
        /// assert_eq!(bytes.len(), 0);
        ///
        /// bytes.extend_from_slice(&b"hello world"[..]);
        ///
        /// assert_eq!(&bytes[..], b"hello world");
        /// ```
        #[inline]
        pub fn with_capacity(capacity: usize) -> Bytes {
            Bytes {
    
                inner: Inner::with_capacity(capacity),
    
        /// Creates a new empty `Bytes`.
    
        ///
        /// This will not allocate and the returned `Bytes` handle will be empty.
        ///
        /// # Examples
        ///
        /// ```
        /// use bytes::Bytes;
        ///
        /// let b = Bytes::new();
        /// assert_eq!(&b[..], b"");
        /// ```
    
    Carl Lerche's avatar
    Carl Lerche committed
        #[inline]
        pub fn new() -> Bytes {
    
            Bytes::with_capacity(0)
    
        /// Creates a new `Bytes` from a static slice.
        ///
    
        /// The returned `Bytes` will point directly to the static slice. There is
        /// no allocating or copying.
        ///
        /// # Examples
        ///
        /// ```
        /// use bytes::Bytes;
        ///
        /// let b = Bytes::from_static(b"hello");
        /// assert_eq!(&b[..], b"hello");
        /// ```
    
        #[inline]
        pub fn from_static(bytes: &'static [u8]) -> Bytes {
            Bytes {
    
                inner: Inner::from_static(bytes),
    
        /// Returns the number of bytes contained in this `Bytes`.
    
        ///
        /// # Examples
        ///
        /// ```
        /// use bytes::Bytes;
        ///
        /// let b = Bytes::from(&b"hello"[..]);
        /// assert_eq!(b.len(), 5);
        /// ```
    
        pub fn len(&self) -> usize {
            self.inner.len()
        }
    
    
        /// Returns true if the `Bytes` has a length of 0.
        ///
        /// # Examples
        ///
        /// ```
        /// use bytes::Bytes;
        ///
        /// let b = Bytes::new();
        /// assert!(b.is_empty());
        /// ```
    
        pub fn is_empty(&self) -> bool {
            self.inner.is_empty()
        }
    
    
        /// Returns a slice of self for the index range `[begin..end)`.
        ///
        /// This will increment the reference count for the underlying memory and
        /// return a new `Bytes` handle set to the slice.
        ///
        /// This operation is `O(1)`.
        ///
        /// # Examples
        ///
        /// ```
        /// use bytes::Bytes;
        ///
        /// let a = Bytes::from(&b"hello world"[..]);
        /// let b = a.slice(2, 5);
        ///
        /// assert_eq!(&b[..], b"llo");
        /// ```
        ///
        /// # Panics
        ///
        /// Requires that `begin <= end` and `end <= self.len()`, otherwise slicing
        /// will panic.
        pub fn slice(&self, begin: usize, end: usize) -> Bytes {
    
            assert!(begin <= end);
            assert!(end <= self.len());
    
            if end - begin <= INLINE_CAP {
                return Bytes::from(&self[begin..end]);
    
            let mut ret = self.clone();
    
    Carl Lerche's avatar
    Carl Lerche committed
            unsafe {
                ret.inner.set_end(end);
    
                ret.inner.set_start(begin);
    
        /// Returns a slice of self for the index range `[begin..self.len())`.
        ///
        /// This will increment the reference count for the underlying memory and
        /// return a new `Bytes` handle set to the slice.
        ///
        /// This operation is `O(1)` and is equivalent to `self.slice(begin,
        /// self.len())`.
        ///
        /// # Examples
        ///
        /// ```
        /// use bytes::Bytes;
        ///
        /// let a = Bytes::from(&b"hello world"[..]);
        /// let b = a.slice_from(6);
        ///
        /// assert_eq!(&b[..], b"world");
        /// ```
        ///
        /// # Panics
        ///
        /// Requires that `begin <= self.len()`, otherwise slicing will panic.
        pub fn slice_from(&self, begin: usize) -> Bytes {
            self.slice(begin, self.len())
    
        /// Returns a slice of self for the index range `[0..end)`.
        ///
        /// This will increment the reference count for the underlying memory and
        /// return a new `Bytes` handle set to the slice.
        ///
        /// This operation is `O(1)` and is equivalent to `self.slice(0, end)`.
        ///
        /// # Examples
        ///
        /// ```
        /// use bytes::Bytes;
        ///
        /// let a = Bytes::from(&b"hello world"[..]);
        /// let b = a.slice_to(5);
        ///
        /// assert_eq!(&b[..], b"hello");
        /// ```
        ///
        /// # Panics
        ///
        /// Requires that `end <= self.len()`, otherwise slicing will panic.
    
        pub fn slice_to(&self, end: usize) -> Bytes {
            self.slice(0, end)
        }
    
    
        /// Returns a slice of self that is equivalent to the given `subset`.
        ///
        /// When processing a `Bytes` buffer with other tools, one often gets a
        /// `&[u8]` which is in fact a slice of the `Bytes`, i.e. a subset of it.
        /// This function turns that `&[u8]` into another `Bytes`, as if one had
        /// called `self.slice()` with the offsets that correspond to `subset`.
        ///
        /// This operation is `O(1)`.
        ///
        /// # Examples
        ///
        /// ```
        /// use bytes::Bytes;
        ///
        /// let bytes = Bytes::from(&b"012345678"[..]);
        /// let as_slice = bytes.as_ref();
        /// let subset = &as_slice[2..6];
        /// let subslice = bytes.slice_ref(&subset);
        /// assert_eq!(&subslice[..], b"2345");
        /// ```
        ///
        /// # Panics
        ///
        /// Requires that the given `sub` slice is in fact contained within the
        /// `Bytes` buffer; otherwise this function will panic.
        pub fn slice_ref(&self, subset: &[u8]) -> Bytes {
            let bytes_p = self.as_ptr() as usize;
            let bytes_len = self.len();
    
            let sub_p = subset.as_ptr() as usize;
            let sub_len = subset.len();
    
            assert!(sub_p >= bytes_p);
            assert!(sub_p + sub_len <= bytes_p + bytes_len);
    
            let sub_offset = sub_p - bytes_p;
    
            self.slice(sub_offset, sub_offset + sub_len)
        }
    
    
        /// Splits the bytes into two at the given index.
        ///
        /// Afterwards `self` contains elements `[0, at)`, and the returned `Bytes`
        /// contains elements `[at, len)`.
        ///
    
        /// This is an `O(1)` operation that just increases the reference count and
        /// sets a few indices.
    
        /// # Examples
        ///
        /// ```
        /// use bytes::Bytes;
        ///
    
        /// let mut a = Bytes::from(&b"hello world"[..]);
    
        /// let b = a.split_off(5);
        ///
        /// assert_eq!(&a[..], b"hello");
        /// assert_eq!(&b[..], b" world");
        /// ```
        ///
    
        /// # Panics
        ///
    
        /// Panics if `at > len`.
    
        pub fn split_off(&mut self, at: usize) -> Bytes {
    
            if at == self.len() {
                return Bytes::new();
            }
    
            if at == 0 {
                return mem::replace(self, Bytes::new());
            }
    
    
                inner: self.inner.split_off(at),
    
        /// Splits the bytes into two at the given index.
    
        ///
        /// Afterwards `self` contains elements `[at, len)`, and the returned
        /// `Bytes` contains elements `[0, at)`.
        ///
    
        /// This is an `O(1)` operation that just increases the reference count and
        /// sets a few indices.
    
        /// # Examples
        ///
        /// ```
        /// use bytes::Bytes;
        ///
    
        /// let mut a = Bytes::from(&b"hello world"[..]);
    
        /// let b = a.split_to(5);
    
        ///
        /// assert_eq!(&a[..], b" world");
        /// assert_eq!(&b[..], b"hello");
        /// ```
        ///
    
        /// # Panics
        ///
    
        /// Panics if `at > len`.
    
        pub fn split_to(&mut self, at: usize) -> Bytes {
    
            if at == self.len() {
                return mem::replace(self, Bytes::new());
            }
    
            if at == 0 {
                return Bytes::new();
            }
    
    
                inner: self.inner.split_to(at),
    
        #[deprecated(since = "0.4.1", note = "use split_to instead")]
        #[doc(hidden)]
        pub fn drain_to(&mut self, at: usize) -> Bytes {
            self.split_to(at)
        }
    
    
        /// Shortens the buffer, keeping the first `len` bytes and dropping the
        /// rest.
        ///
        /// If `len` is greater than the buffer's current length, this has no
        /// effect.
        ///
        /// The [`split_off`] method can emulate `truncate`, but this causes the
        /// excess bytes to be returned instead of dropped.
        ///
        /// # Examples
        ///
        /// ```
        /// use bytes::Bytes;
        ///
        /// let mut buf = Bytes::from(&b"hello world"[..]);
        /// buf.truncate(5);
        /// assert_eq!(buf, b"hello"[..]);
        /// ```
        ///
        /// [`split_off`]: #method.split_off
        pub fn truncate(&mut self, len: usize) {
            self.inner.truncate(len);
        }
    
    
        /// Shortens the buffer, dropping the first `cnt` bytes and keeping the
        /// rest.
        ///
        /// This is the same function as `Buf::advance`, and in the next breaking
        /// release of `bytes`, this implementation will be removed in favor of
        /// having `Bytes` implement `Buf`.
        ///
        /// # Panics
        ///
        /// This function panics if `cnt` is greater than `self.len()`
        #[inline]
        pub fn advance(&mut self, cnt: usize) {
            assert!(cnt <= self.len(), "cannot advance past `remaining`");
            unsafe { self.inner.set_start(cnt); }
        }
    
    
        /// Clears the buffer, removing all data.
        ///
        /// # Examples
        ///
        /// ```
        /// use bytes::Bytes;
        ///
        /// let mut buf = Bytes::from(&b"hello world"[..]);
        /// buf.clear();
        /// assert!(buf.is_empty());
        /// ```
        pub fn clear(&mut self) {
            self.truncate(0);
        }
    
    
        /// Attempts to convert into a `BytesMut` handle.
    
        ///
        /// This will only succeed if there are no other outstanding references to
    
        /// the underlying chunk of memory. `Bytes` handles that contain inlined
        /// bytes will always be convertable to `BytesMut`.
        ///
        /// # Examples
        ///
        /// ```
        /// use bytes::Bytes;
        ///
        /// let a = Bytes::from(&b"Mary had a little lamb, little lamb, little lamb..."[..]);
        ///
        /// // Create a shallow clone
        /// let b = a.clone();
        ///
        /// // This will fail because `b` shares a reference with `a`
        /// let a = a.try_mut().unwrap_err();
        ///
        /// drop(b);
        ///
        /// // This will succeed
        /// let mut a = a.try_mut().unwrap();
        ///
        /// a[0] = b'b';
        ///
        /// assert_eq!(&a[..4], b"bary");
        /// ```
    
        pub fn try_mut(mut self) -> Result<BytesMut, Bytes> {
    
    Carl Lerche's avatar
    Carl Lerche committed
            if self.inner.is_mut_safe() {
    
    Carl Lerche's avatar
    Carl Lerche committed
                Ok(BytesMut { inner: self.inner })
    
        /// Appends given bytes to this object.
    
        ///
        /// If this `Bytes` object has not enough capacity, it is resized first.
    
        /// If it is shared (`refcount > 1`), it is copied first.
    
        /// This operation can be less effective than the similar operation on
        /// `BytesMut`, especially on small additions.
    
        ///
        /// # Examples
        ///
        /// ```
        /// use bytes::Bytes;
        ///
        /// let mut buf = Bytes::from("aabb");
        /// buf.extend_from_slice(b"ccdd");
        /// buf.extend_from_slice(b"eeff");
        ///
        /// assert_eq!(b"aabbccddeeff", &buf[..]);
        /// ```
        pub fn extend_from_slice(&mut self, extend: &[u8]) {
            if extend.is_empty() {
                return;
            }
    
            let new_cap = self.len().checked_add(extend.len()).expect("capacity overflow");
    
            let result = match mem::replace(self, Bytes::new()).try_mut() {
                Ok(mut bytes_mut) => {
                    bytes_mut.extend_from_slice(extend);
                    bytes_mut
                },
                Err(bytes) => {
                    let mut bytes_mut = BytesMut::with_capacity(new_cap);
                    bytes_mut.put_slice(&bytes);
                    bytes_mut.put_slice(extend);
                    bytes_mut
                }
            };
    
            mem::replace(self, result.freeze());
        }
    
    impl IntoBuf for Bytes {
    
        type Buf = Cursor<Self>;
    
    
        fn into_buf(self) -> Self::Buf {
    
            Cursor::new(self)
    
        }
    }
    
    impl<'a> IntoBuf for &'a Bytes {
    
        type Buf = Cursor<Self>;
    
    
        fn into_buf(self) -> Self::Buf {
    
            Cursor::new(self)
    
    impl Clone for Bytes {
        fn clone(&self) -> Bytes {
    
                inner: unsafe { self.inner.shallow_clone(false) },
    
        }
    }
    
    impl AsRef<[u8]> for Bytes {
    
        fn as_ref(&self) -> &[u8] {
            self.inner.as_ref()
        }
    }
    
    impl ops::Deref for Bytes {
        type Target = [u8];
    
    
        fn deref(&self) -> &[u8] {
    
    Carl Lerche's avatar
    Carl Lerche committed
            self.inner.as_ref()
    
    Carl Lerche's avatar
    Carl Lerche committed
    impl From<BytesMut> for Bytes {
        fn from(src: BytesMut) -> Bytes {
            src.freeze()
        }
    }
    
    
    impl From<Vec<u8>> for Bytes {
        fn from(src: Vec<u8>) -> Bytes {
            BytesMut::from(src).freeze()
        }
    }
    
    
    Carl Lerche's avatar
    Carl Lerche committed
    impl From<String> for Bytes {
        fn from(src: String) -> Bytes {
            BytesMut::from(src).freeze()
        }
    }
    
    
    impl<'a> From<&'a [u8]> for Bytes {
        fn from(src: &'a [u8]) -> Bytes {
            BytesMut::from(src).freeze()
        }
    }
    
    
    Carl Lerche's avatar
    Carl Lerche committed
    impl<'a> From<&'a str> for Bytes {
        fn from(src: &'a str) -> Bytes {
            BytesMut::from(src).freeze()
        }
    }
    
    
    Jef's avatar
    Jef committed
    impl FromIterator<u8> for BytesMut {
        fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
            let iter = into_iter.into_iter();
            let (min, maybe_max) = iter.size_hint();
    
            let mut out = BytesMut::with_capacity(maybe_max.unwrap_or(min));
    
            for i in iter {
    
                out.reserve(1);
    
    Jef's avatar
    Jef committed
                out.put(i);
            }
    
            out
        }
    }
    
    impl FromIterator<u8> for Bytes {
        fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
            BytesMut::from_iter(into_iter).freeze()
        }
    }
    
    
    impl PartialEq for Bytes {
        fn eq(&self, other: &Bytes) -> bool {
    
    Carl Lerche's avatar
    Carl Lerche committed
            self.inner.as_ref() == other.inner.as_ref()
    
    impl PartialOrd for Bytes {
        fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
            self.inner.as_ref().partial_cmp(other.inner.as_ref())
        }
    }
    
    impl Ord for Bytes {
        fn cmp(&self, other: &Bytes) -> cmp::Ordering {
            self.inner.as_ref().cmp(other.inner.as_ref())
        }
    }
    
    
    Carl Lerche's avatar
    Carl Lerche committed
    impl Eq for Bytes {
    }
    
    
    impl Default for Bytes {
        #[inline]
        fn default() -> Bytes {
            Bytes::new()
        }
    }
    
    
    impl fmt::Debug for Bytes {
        fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
    
            fmt::Debug::fmt(&debug::BsDebug(&self.inner.as_ref()), fmt)
    
    impl hash::Hash for Bytes {
        fn hash<H>(&self, state: &mut H) where H: hash::Hasher {
            let s: &[u8] = self.as_ref();
            s.hash(state);
        }
    }
    
    impl Borrow<[u8]> for Bytes {
        fn borrow(&self) -> &[u8] {
            self.as_ref()
        }
    }
    
    
    impl IntoIterator for Bytes {
        type Item = u8;
        type IntoIter = Iter<Cursor<Bytes>>;
    
        fn into_iter(self) -> Self::IntoIter {
            self.into_buf().iter()
        }
    }
    
    impl<'a> IntoIterator for &'a Bytes {
        type Item = u8;
        type IntoIter = Iter<Cursor<&'a Bytes>>;
    
        fn into_iter(self) -> Self::IntoIter {
            self.into_buf().iter()
        }
    }
    
    
    impl Extend<u8> for Bytes {
        fn extend<T>(&mut self, iter: T) where T: IntoIterator<Item = u8> {
            let iter = iter.into_iter();
    
            let (lower, upper) = iter.size_hint();
    
            // Avoid possible conversion into mut if there's nothing to add