From 850bf8935fdc43cb989bf39b50916208297e0f06 Mon Sep 17 00:00:00 2001 From: Tomasz Grabiec <tgrabiec@cloudius-systems.com> Date: Mon, 19 May 2014 15:46:48 +0200 Subject: [PATCH] mempool: erase page range using an iterator rather than a reference free_page_ranges is an intrusive set. erasing via a reference requires iteration over reference equal_range under the hood, which means traversing the tree to the leafs. Whereas erasing via an iterator requires no such lookups so should be faster. Signed-off-by: Tomasz Grabiec <tgrabiec@cloudius-systems.com> --- core/mempool.cc | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/core/mempool.cc b/core/mempool.cc index 7d85a0b7d..6153c2a26 100644 --- a/core/mempool.cc +++ b/core/mempool.cc @@ -879,7 +879,7 @@ static void refill_page_buffer() total_size += size; void* pages = static_cast<void*>(p) + p->size; if (!p->size) { - free_page_ranges.erase(*p); + free_page_ranges.erase(it); } while (size) { pbuf.free[pbuf.nr++] = pages; @@ -941,12 +941,13 @@ static void* early_alloc_page() abort(); } - auto p = &*free_page_ranges.begin(); + auto begin = free_page_ranges.begin(); + auto p = &*begin; p->size -= page_size; on_alloc(page_size); void* page = static_cast<void*>(p) + p->size; if (!p->size) { - free_page_ranges.erase(*p); + free_page_ranges.erase(begin); } return page; } @@ -1023,7 +1024,7 @@ void* alloc_huge_page(size_t N) size_t alloc_size; if (ret==v) { alloc_size = range->size; - free_page_ranges.erase(*range); + free_page_ranges.erase(i); } else { // Note that this is is done conditionally because we are // operating page ranges. That is what is left on our page -- GitLab