diff --git a/src/alloc.rs b/src/alloc.rs
index 9171c3804e7986e2df658835f3cd2d178e967c63..27336da2a324615642872fa40ec369920c80b016 100644
--- a/src/alloc.rs
+++ b/src/alloc.rs
@@ -1,5 +1,5 @@
 use std::{mem, ptr};
-use std::rt::heap;
+use std::ops::DerefMut;
 use std::sync::atomic::{AtomicUsize, Ordering};
 use std::usize;
 
@@ -52,13 +52,9 @@ impl MemRef {
     }
 
     pub fn bytes(&self) -> &[u8] {
-        use std::raw::Slice;
-
+        use std::slice;
         unsafe {
-            mem::transmute(Slice {
-                data: self.ptr(),
-                len: self.mem().len,
-            })
+            slice::from_raw_parts(self.ptr(), self.mem().len)
         }
     }
 
@@ -139,33 +135,30 @@ impl Heap {
             return MemRef::none();
         }
 
-        let alloc_len = len + mem::size_of::<Mem>();
+        let alloc_len = len +
+            mem::size_of::<Mem>() +
+            mem::size_of::<Vec<u8>>();
 
         unsafe {
-            // Attempt to allocate the memory
-            let ptr: *mut Mem = mem::transmute(
-                heap::allocate(alloc_len, mem::min_align_of::<u8>()));
+            let mut vec: Vec<u8> = Vec::with_capacity(alloc_len);
+            vec.set_len(alloc_len);
 
-            // If failed, return None
-            if ptr.is_null() {
-                return MemRef::none();
-            }
+            let ptr = vec.deref_mut().as_mut_ptr();
 
-            // Write the mem header
-            ptr::write(ptr, Mem::new(len, mem::transmute(self as &Allocator)));
+            ptr::write(ptr as *mut Vec<u8>, vec);
+
+            let ptr = ptr.offset(mem::size_of::<Vec<u8>>() as isize);
+            ptr::write(ptr as *mut Mem, Mem::new(len, mem::transmute(self as &Allocator)));
 
             // Return the info
-            MemRef::new(ptr)
+            MemRef::new(ptr as *mut Mem)
         }
     }
 
     pub fn deallocate(&self, mem: *mut Mem) {
         unsafe {
-            let m: &Mem = mem::transmute(mem);
-
-            heap::deallocate(
-                mem as *mut u8, m.len + mem::size_of::<Mem>(),
-                mem::min_align_of::<u8>())
+            let ptr = mem as *mut u8;
+            let _ = ptr::read(ptr.offset(-(mem::size_of::<Vec<u8>>() as isize)) as *const Vec<u8>);
         }
     }
 }
diff --git a/src/byte_str.rs b/src/byte_str.rs
index 341b5b222260a22b5b7e5e6b41f6e72947941289..886ecc99e59b3ec4e15b3eb9121ea11dcf4e61af 100644
--- a/src/byte_str.rs
+++ b/src/byte_str.rs
@@ -135,8 +135,7 @@ impl SmallByteStr {
     }
 
     pub fn from_slice(bytes: &[u8]) -> Option<SmallByteStr> {
-        use std::mem;
-        use std::slice::bytes;
+        use std::{mem, ptr};
 
         if bytes.len() > MAX_LEN {
             return None;
@@ -148,7 +147,12 @@ impl SmallByteStr {
         };
 
         // Copy the memory
-        bytes::copy_memory(bytes, &mut ret.bytes);
+        unsafe {
+            ptr::copy_nonoverlapping(
+                bytes.as_ptr(),
+                ret.bytes.as_mut_ptr(),
+                bytes.len());
+        }
 
         Some(ret)
     }
diff --git a/src/bytes.rs b/src/bytes.rs
index de12867a96f83d09631705c576facc72a6472075..cca6f8c324d95cdb810a3cfaf7564c3535517fab 100644
--- a/src/bytes.rs
+++ b/src/bytes.rs
@@ -2,15 +2,12 @@ use {ByteBuf, SmallByteStr};
 use traits::{Buf, ByteStr, ToBytes};
 use std::{fmt, mem, ops, ptr};
 use std::any::{Any, TypeId};
-use std::marker::Reflect;
-use std::raw::TraitObject;
-use core::nonzero::NonZero;
 
 const INLINE: usize = 1;
 
 /// A specialized `ByteStr` box.
 pub struct Bytes {
-    vtable: NonZero<usize>,
+    vtable: usize,
     data: *mut (),
 }
 
@@ -40,7 +37,7 @@ impl Bytes {
                 mem::forget(bytes);
 
                 Bytes {
-                    vtable: NonZero::new(vtable as usize | INLINE),
+                    vtable: vtable as usize | INLINE,
                     data: data,
                 }
             } else {
@@ -48,7 +45,7 @@ impl Bytes {
                 let obj: TraitObject = mem::transmute(obj);
 
                 Bytes {
-                    vtable: NonZero::new(obj.vtable as usize),
+                    vtable: obj.vtable as usize,
                     data: obj.data,
                 }
             }
@@ -77,7 +74,7 @@ impl Bytes {
 
     /// If the underlying `ByteStr` is of type `B`, returns the unwraped value,
     /// otherwise, returns the original `Bytes` as `Err`.
-    pub fn try_unwrap<B: ByteStr + Reflect>(self) -> Result<B, Bytes> {
+    pub fn try_unwrap<B: ByteStr>(self) -> Result<B, Bytes> {
         if TypeId::of::<B>() == self.obj().get_type_id() {
             unsafe {
                 // Underlying ByteStr value is of the correct type. Unwrap it
@@ -103,12 +100,12 @@ impl Bytes {
             let obj = if self.is_inline() {
                 TraitObject {
                     data: mem::transmute(&self.data),
-                    vtable: mem::transmute(*self.vtable - 1),
+                    vtable: mem::transmute(self.vtable - 1),
                 }
             } else {
                 TraitObject {
                     data: self.data,
-                    vtable: mem::transmute(*self.vtable),
+                    vtable: mem::transmute(self.vtable),
                 }
             };
 
@@ -121,12 +118,12 @@ impl Bytes {
     }
 
     fn is_inline(&self) -> bool {
-        (*self.vtable & INLINE) == INLINE
+        (self.vtable & INLINE) == INLINE
     }
 }
 
 fn inline<B: ByteStr>() -> bool {
-    mem::size_of::<B>() <= mem::size_of::<usize>()
+    mem::size_of::<B>() <= 2 * mem::size_of::<usize>()
 }
 
 impl ByteStr for Bytes {
@@ -239,7 +236,7 @@ impl<B: ByteStr> ByteStrPriv for B {
     }
 
     fn get_type_id(&self) -> TypeId {
-        Any::get_type_id(self)
+        TypeId::of::<B>()
     }
 
     fn index(&self, index: usize) -> &u8 {
@@ -259,11 +256,19 @@ impl<B: ByteStr> ByteStrPriv for B {
     }
 }
 
+// TODO: Figure out how to not depend on the memory layout of trait objects
+// Blocked: rust-lang/rust#24050
+struct TraitObject {
+    data: *mut (),
+    vtable: *mut (),
+}
+
 #[test]
 pub fn test_size_of() {
     // TODO: One day, there shouldn't be a drop flag
-    let expect = mem::size_of::<usize>() * 3;
+    let ptr_size = mem::size_of::<usize>();
+    let expect = ptr_size * 3;
 
     assert_eq!(expect, mem::size_of::<Bytes>());
-    assert_eq!(expect, mem::size_of::<Option<Bytes>>());
+    assert_eq!(expect + ptr_size, mem::size_of::<Option<Bytes>>());
 }
diff --git a/src/lib.rs b/src/lib.rs
index 98336f734e8eb69621374669267f4792abfa145c..1d66fd65915869ca5551d6753d7def7e0fa68148 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -1,8 +1,6 @@
 #![crate_name = "bytes"]
 #![unstable]
 
-#![feature(alloc, convert, core)]
-
 pub use byte_buf::{ByteBuf, ROByteBuf, MutByteBuf};
 pub use byte_str::{SeqByteStr, SmallByteStr, SmallByteStrBuf};
 pub use bytes::Bytes;
@@ -11,9 +9,7 @@ pub use rope::{Rope, RopeBuf};
 pub use slice::{SliceBuf, MutSliceBuf};
 
 use std::{cmp, fmt, io, ops, ptr, u32};
-use std::marker::Reflect;
-
-extern crate core;
+use std::any::Any;
 
 mod alloc;
 mod byte_buf;
@@ -201,7 +197,7 @@ pub trait MutBufExt {
 /// An immutable sequence of bytes. Operations will not mutate the original
 /// value. Since only immutable access is permitted, operations do not require
 /// copying (though, sometimes copying will happen as an optimization).
-pub trait ByteStr : Clone + Sized + Send + Sync + Reflect + ToBytes + ops::Index<usize, Output=u8> + 'static {
+pub trait ByteStr : Clone + Sized + Send + Sync + Any + ToBytes + ops::Index<usize, Output=u8> + 'static {
 
     // Until HKT lands, the buf must be bound by 'static
     type Buf: Buf+'static;
diff --git a/src/ring.rs b/src/ring.rs
index 518ba8a4971dc6ddf8f214c03caa8e46433f8926..20bab4ac13c59f93589168f997793ac3b14edcfd 100644
--- a/src/ring.rs
+++ b/src/ring.rs
@@ -1,16 +1,14 @@
-use super::{Buf, MutBuf};
-use std::{cmp, fmt, mem, ptr, slice};
-use std::rt::heap;
-use std::io;
+use {alloc, Buf, MutBuf};
+use std::{cmp, fmt, io, ptr};
 
 /// Buf backed by a continous chunk of memory. Maintains a read cursor and a
 /// write cursor. When reads and writes reach the end of the allocated buffer,
 /// wraps around to the start.
 pub struct RingBuf {
-    ptr: *mut u8,  // Pointer to the memory
-    cap: usize,     // Capacity of the buffer
-    pos: usize,     // Offset of read cursor
-    len: usize      // Number of bytes to read
+    ptr: alloc::MemRef,  // Pointer to the memory
+    cap: usize,          // Capacity of the buffer
+    pos: usize,          // Offset of read cursor
+    len: usize           // Number of bytes to read
 }
 
 // TODO: There are most likely many optimizations that can be made
@@ -19,7 +17,7 @@ impl RingBuf {
         // Handle the 0 length buffer case
         if capacity == 0 {
             return RingBuf {
-                ptr: ptr::null_mut(),
+                ptr: alloc::MemRef::none(),
                 cap: 0,
                 pos: 0,
                 len: 0
@@ -29,11 +27,10 @@ impl RingBuf {
         // Round to the next power of 2 for better alignment
         capacity = capacity.next_power_of_two();
 
-        // Allocate the memory
-        let ptr = unsafe { heap::allocate(capacity, mem::min_align_of::<u8>()) };
+        let mem = alloc::HEAP.allocate(capacity as usize);
 
         RingBuf {
-            ptr: ptr as *mut u8,
+            ptr: mem,
             cap: capacity,
             pos: 0,
             len: 0
@@ -75,18 +72,6 @@ impl RingBuf {
         cnt = cmp::min(cnt, self.write_remaining());
         self.len += cnt;
     }
-
-    fn as_slice(&self) -> &[u8] {
-        unsafe {
-            slice::from_raw_parts(self.ptr as *const u8, self.cap)
-        }
-    }
-
-    fn as_mut_slice(&mut self) -> &mut [u8] {
-        unsafe {
-            slice::from_raw_parts_mut(self.ptr, self.cap)
-        }
-    }
 }
 
 impl Clone for RingBuf {
@@ -102,12 +87,12 @@ impl Clone for RingBuf {
             let to = self.pos + self.len;
 
             if to > self.cap {
-                ptr::copy(self.ptr as *const u8, ret.ptr, to % self.cap);
+                ptr::copy(self.ptr.ptr() as *const u8, ret.ptr.ptr(), to % self.cap);
             }
 
             ptr::copy(
-                self.ptr.offset(self.pos as isize) as *const u8,
-                ret.ptr.offset(self.pos as isize),
+                self.ptr.ptr().offset(self.pos as isize) as *const u8,
+                ret.ptr.ptr().offset(self.pos as isize),
                 cmp::min(self.len, self.cap - self.pos));
         }
 
@@ -124,16 +109,6 @@ impl fmt::Debug for RingBuf {
     }
 }
 
-impl Drop for RingBuf {
-    fn drop(&mut self) {
-        if self.cap > 0 {
-            unsafe {
-                heap::deallocate(self.ptr, self.cap, mem::min_align_of::<u8>())
-            }
-        }
-    }
-}
-
 impl Buf for RingBuf {
 
     fn remaining(&self) -> usize {
@@ -147,7 +122,7 @@ impl Buf for RingBuf {
             to = self.cap
         }
 
-        &self.as_slice()[self.pos .. to]
+        &self.ptr.bytes()[self.pos .. to]
     }
 
     fn advance(&mut self, cnt: usize) {
@@ -167,7 +142,7 @@ impl MutBuf for RingBuf {
 
     fn mut_bytes(&mut self) -> &mut [u8] {
         if self.cap == 0 {
-            return self.as_mut_slice();
+            return self.ptr.bytes_mut();
         }
         let mut from;
         let mut to;
@@ -181,7 +156,7 @@ impl MutBuf for RingBuf {
             to = self.cap;
         }
 
-        &mut self.as_mut_slice()[from..to]
+        &mut self.ptr.bytes_mut()[from..to]
     }
 }
 
diff --git a/test/test.rs b/test/test.rs
index a497869dc317c95092cf3c4733ca75323f52bde5..94dce0b1915a2a6b9c524f5bbc72515cfbbfeae9 100644
--- a/test/test.rs
+++ b/test/test.rs
@@ -1,5 +1,3 @@
-#![feature(core)]
-
 use rand::random;
 
 extern crate bytes;
diff --git a/test/test_buf_fill.rs b/test/test_buf_fill.rs
index f040ea1fc19cd262f1ad329b37556c26a21bba95..d32a1ed58a9bee73d175789f708a0955c1a3e599 100644
--- a/test/test_buf_fill.rs
+++ b/test/test_buf_fill.rs
@@ -20,8 +20,7 @@ struct Chunked {
 
 impl io::Read for Chunked {
     fn read(&mut self, dst: &mut [u8]) -> io::Result<usize> {
-        use std::cmp;
-        use std::slice::bytes;
+        use std::{cmp, ptr};
 
         if self.chunks.is_empty() {
             return Ok(0);
@@ -30,9 +29,12 @@ impl io::Read for Chunked {
         let src = self.chunks[0];
         let len = cmp::min(src.len(), dst.len());
 
-        bytes::copy_memory(
-            &src[..len],
-            &mut dst[..len]);
+        unsafe {
+            ptr::copy_nonoverlapping(
+                src[..len].as_ptr(),
+                dst[..len].as_mut_ptr(),
+                len);
+        }
 
         if len < src.len() {
             self.chunks[0] = &src[len..];