Skip to content
Snippets Groups Projects
Commit 4823338d authored by Nadav Har'El's avatar Nadav Har'El
Browse files

Fix memory leaks in mmap/munmap

mmap/munmap and its cousins mmu::map_*() used to leak the 48-byte "vma"
object, twice. The idea to separate reserve() and map() was a good one,
but it was implemented incorrectly causing a vma object to be allocated
twice per mmap, and both of them were leaked when evacuate() didn't free
them.

So switched to a simpler implementation, where the "vma" object is internal
to mmu.cc, and not used by any of the callers; The struct vma is now properly
allocated only once per mmap(), and freed on munmap().
parent 8e73271f
No related branches found
No related tags found
No related merge requests found
...@@ -192,10 +192,10 @@ void file::load_segment(const Elf64_Phdr& phdr) ...@@ -192,10 +192,10 @@ void file::load_segment(const Elf64_Phdr& phdr)
ulong filesz_unaligned = phdr.p_vaddr + phdr.p_filesz - vstart; ulong filesz_unaligned = phdr.p_vaddr + phdr.p_filesz - vstart;
ulong filesz = align_up(filesz_unaligned, page_size); ulong filesz = align_up(filesz_unaligned, page_size);
ulong memsz = align_up(phdr.p_vaddr + phdr.p_memsz, page_size) - vstart; ulong memsz = align_up(phdr.p_vaddr + phdr.p_memsz, page_size) - vstart;
mmu::map_file(_base + vstart, filesz, mmu::perm_rwx, mmu::map_file(_base + vstart, filesz, false, mmu::perm_rwx,
*_f, align_down(phdr.p_offset, page_size), true); *_f, align_down(phdr.p_offset, page_size));
memset(_base + vstart + filesz_unaligned, 0, filesz - filesz_unaligned); memset(_base + vstart + filesz_unaligned, 0, filesz - filesz_unaligned);
mmu::map_anon(_base + vstart + filesz, memsz - filesz, mmu::perm_rwx, true); mmu::map_anon(_base + vstart + filesz, memsz - filesz, false, mmu::perm_rwx);
} }
void object::load_segments() void object::load_segments()
......
...@@ -432,47 +432,33 @@ uintptr_t find_hole(uintptr_t start, uintptr_t size) ...@@ -432,47 +432,33 @@ uintptr_t find_hole(uintptr_t start, uintptr_t size)
abort(); abort();
} }
bool contains(vma& x, vma& y) bool contains(uintptr_t start, uintptr_t end, vma& y)
{ {
return y.start() >= x.start() && y.end() <= x.end(); return y.start() >= start && y.end() <= end;
} }
void evacuate(vma* v) void evacuate(uintptr_t start, uintptr_t end)
{ {
std::lock_guard<mutex> guard(vma_list_mutex); std::lock_guard<mutex> guard(vma_list_mutex);
// FIXME: use equal_range or something // FIXME: use equal_range or something
for (auto i = std::next(vma_list.begin()); for (auto i = std::next(vma_list.begin());
i != std::prev(vma_list.end()); i != std::prev(vma_list.end());
++i) { ++i) {
i->split(v->end()); i->split(end);
i->split(v->start()); i->split(start);
if (contains(*v, *i)) { if (contains(start, end, *i)) {
auto& dead = *i--; auto& dead = *i--;
unpopulate().operate(dead); unpopulate().operate(dead);
vma_list.erase(dead); vma_list.erase(dead);
delete &dead;
} }
} }
} }
vma* reserve(void* hint, size_t size)
{
std::lock_guard<mutex> guard(vma_list_mutex);
// look for a hole around 'hint'
auto start = reinterpret_cast<uintptr_t>(hint);
if (!start) {
start = 0x200000000000ul;
}
start = find_hole(start, size);
auto v = new vma(start, start + size);
vma_list.insert(*v);
return v;
}
void unmap(void* addr, size_t size) void unmap(void* addr, size_t size)
{ {
auto start = reinterpret_cast<uintptr_t>(addr); auto start = reinterpret_cast<uintptr_t>(addr);
vma tmp { start, start+size }; evacuate(start, start+size);
evacuate(&tmp);
} }
struct fill_anon_page : fill_page { struct fill_anon_page : fill_page {
...@@ -498,34 +484,43 @@ struct fill_file_page : fill_page { ...@@ -498,34 +484,43 @@ struct fill_file_page : fill_page {
uint64_t len; uint64_t len;
}; };
vma* allocate(uintptr_t start, uintptr_t end, fill_page& fill, uintptr_t allocate(uintptr_t start, size_t size, bool search,
unsigned perm, bool evac) fill_page& fill, unsigned perm)
{ {
std::lock_guard<mutex> guard(vma_list_mutex); std::lock_guard<mutex> guard(vma_list_mutex);
vma* ret = new vma(start, end);
if (evac)
evacuate(ret);
vma_list.insert(*ret);
populate(&fill, perm).operate((void*)start, end-start); if (search) {
// search for unallocated hole around start
if (!start) {
start = 0x200000000000ul;
}
start = find_hole(start, size);
} else {
// we don't know if the given range is free, need to evacuate it first
evacuate(start, start+size);
}
vma* v = new vma(start, start+size);
vma_list.insert(*v);
populate(&fill, perm).operate((void*)start, size);
return ret; return start;
} }
vma* map_anon(void* addr, size_t size, unsigned perm, bool evac) void* map_anon(void* addr, size_t size, bool search, unsigned perm)
{ {
auto start = reinterpret_cast<uintptr_t>(addr); auto start = reinterpret_cast<uintptr_t>(addr);
fill_anon_page zfill; fill_anon_page zfill;
return allocate(start, start + size, zfill, perm, evac); return (void*) allocate(start, size, search, zfill, perm);
} }
vma* map_file(void* addr, size_t size, unsigned perm, void* map_file(void* addr, size_t size, bool search, unsigned perm,
file& f, f_offset offset, bool evac) file& f, f_offset offset)
{ {
auto start = reinterpret_cast<uintptr_t>(addr); auto start = reinterpret_cast<uintptr_t>(addr);
fill_file_page ffill(f, offset, f.size()); fill_file_page ffill(f, offset, f.size());
vma* ret = allocate(start, start + size, ffill, perm, evac); return (void*) allocate(start, size, search, ffill, perm);
return ret;
} }
......
...@@ -35,10 +35,9 @@ public: ...@@ -35,10 +35,9 @@ public:
boost::intrusive::set_member_hook<> _vma_list_hook; boost::intrusive::set_member_hook<> _vma_list_hook;
}; };
vma* reserve(void* hint, size_t size); void* map_file(void* addr, size_t size, bool search, unsigned perm,
vma* map_file(void* addr, size_t size, unsigned perm, file& file, f_offset offset);
file& file, f_offset offset, bool evac); void* map_anon(void* addr, size_t size, bool search, unsigned perm);
vma* map_anon(void* addr, size_t size, unsigned perm, bool evac);
void unmap(void* addr, size_t size); void unmap(void* addr, size_t size);
int protect(void *addr, size_t size, unsigned int perm); int protect(void *addr, size_t size, unsigned int perm);
......
...@@ -48,21 +48,15 @@ void *mmap(void *addr, size_t length, int prot, int flags, ...@@ -48,21 +48,15 @@ void *mmap(void *addr, size_t length, int prot, int flags,
// make use the payload isn't remapping physical memory // make use the payload isn't remapping physical memory
assert(reinterpret_cast<long>(addr) >= 0); assert(reinterpret_cast<long>(addr) >= 0);
std::unique_ptr<mmu::vma> v;
bool evacuate = true;
if (!(flags & MAP_FIXED)) {
v.reset(mmu::reserve(addr, length));
addr = v->addr();
evacuate = false; // New address range, no previous mapping to evacuate.
}
if (fd == -1) { if (fd == -1) {
mmu::map_anon(addr, length, libc_prot_to_perm(prot), evacuate); return mmu::map_anon(addr, length, !(flags & MAP_FIXED),
libc_prot_to_perm(prot));
} else { } else {
file f(fd); file f(fd);
mmu::map_file(addr, length, libc_prot_to_perm(prot), f, offset, evacuate); return mmu::map_file(addr, length, !(flags & MAP_FIXED),
libc_prot_to_perm(prot), f, offset);
} }
v.release();
return addr;
} }
extern "C" void *mmap64(void *addr, size_t length, int prot, int flags, extern "C" void *mmap64(void *addr, size_t length, int prot, int flags,
......
...@@ -70,15 +70,13 @@ namespace pthread_private { ...@@ -70,15 +70,13 @@ namespace pthread_private {
sched::thread::stack_info pthread::allocate_stack() sched::thread::stack_info pthread::allocate_stack()
{ {
size_t size = 1024*1024; size_t size = 1024*1024;
auto vma = mmu::reserve(nullptr, size); void *addr = mmu::map_anon(nullptr, size, true, mmu::perm_rw);
mmu::map_anon(vma->addr(), vma->size(), mmu::perm_rw, false); return { addr, size };
return { vma->addr(), vma->size() };
} }
void pthread::free_stack(sched::thread::stack_info si) void pthread::free_stack(sched::thread::stack_info si)
{ {
mmu::unmap(si.begin, si.size); mmu::unmap(si.begin, si.size);
// FIXME: free vma?
} }
int pthread::join(void*& retval) int pthread::join(void*& retval)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment