diff --git a/core/mmu.cc b/core/mmu.cc
index 9570d75c90cb5d06aa8dbe4aad7e4c447c2dbc33..3590bee13df18ccc02aac823ed56c8c0e0e57f2e 100644
--- a/core/mmu.cc
+++ b/core/mmu.cc
@@ -260,7 +260,6 @@ void unpopulate_page(void* addr)
             // it is ok to free pieces of a alloc_huge_page() with free_page()
             split_large_page(ptep, level);
         }
-        assert(!pte_large(*ptep));
         pte = *ptep;
         --level;
         pt = phys_cast<pt_element>(pte_phys(pte));
@@ -435,6 +434,110 @@ vma* map_file(void* addr, size_t size, unsigned perm,
     return ret;
 }
 
+void change_perm(pt_element *ptep, unsigned int perm)
+{
+    if (perm & perm_write)
+        *ptep |= 0x2;
+    else
+        *ptep &= ~0x2;
+
+    if (!(perm & perm_exec))
+        *ptep |= pt_element(0x8000000000000000);
+    else
+        *ptep &= ~pt_element(0x8000000000000000);
+
+    // TODO: we ignore here perm & perm_read, breaking mmap()'s
+    // ability to set PROT_NONE, i.e., unaccessible memory.
+    // We could have zeroed the present bit in this case, but
+    // the problem is that if the present bit is unset, it also
+    // tells us (e.g., in unpopulate()) that the memory is
+    // unmapped. So to support !perm_read, we'll need to change
+    // the code....
+}
+
+int protect_page(void *addr, unsigned int perm)
+{
+    pt_element pte = processor::read_cr3();
+    auto pt = phys_cast<pt_element>(pte_phys(pte));
+    auto ptep = &pt[pt_index(addr, nlevels - 1)];
+    unsigned level = nlevels - 1;
+    while (level > 0) {
+        if (!pte_present(*ptep))
+            return 0;
+        else if (pte_large(*ptep)) {
+            // We're trying to change the protection of part of a huge page, so
+            // we need to split the huge page into small pages. This is fine
+            // because in in our implementation it is ok to free pieces of a
+            // alloc_huge_page() with free_page()
+            split_large_page(ptep, level);
+        }
+        pte = *ptep;
+        --level;
+        pt = phys_cast<pt_element>(pte_phys(pte));
+        ptep = &pt[pt_index(addr, level)];
+    }
+    if (!pte_present(*ptep))
+        return 0;
+    change_perm(ptep, perm);
+    return 1;
+}
+
+int protect_huge_page(void *addr, unsigned int perm)
+{
+    pt_element pte = processor::read_cr3();
+    auto pt = phys_cast<pt_element>(pte_phys(pte));
+    auto ptep = &pt[pt_index(addr, nlevels - 1)];
+    unsigned level = nlevels - 1;
+    while (level > 1) {
+        if (!pte_present(*ptep))
+            return 0;
+        else if (pte_large(*ptep))
+            split_large_page(ptep, level);
+        pte = *ptep;
+        --level;
+        pt = phys_cast<pt_element>(pte_phys(pte));
+        ptep = &pt[pt_index(addr, level)];
+    }
+    if (!pte_present(*ptep))
+        return 0;
+
+    if (pte_large(*ptep)){
+        change_perm(ptep, perm);
+        return 1;
+    } else {
+        int ret = 1;
+        pt_element* pt = phys_cast<pt_element>(pte_phys(*ptep));
+        for(int i=0; i<pte_per_page; ++i)
+            if(pte_present(pt[i]))
+                change_perm(&pt[i], perm);
+            else
+                ret = 0;
+        return ret;
+    }
+}
+
+int protect(void *start, size_t size, unsigned int perm)
+{
+    void *end = start+size; // one byte after the end
+    void *hp_start = (void*) ((((uintptr_t)start-1) & ~(huge_page_size-1)) +
+            huge_page_size);
+    void *hp_end = (void*) ((uintptr_t)end & ~(huge_page_size-1));
+    if (hp_start > end)
+        hp_start = end;
+    if (hp_end < start)
+        hp_end = start;
+
+    int ret=1;
+    for (auto addr = start; addr < hp_start; addr += page_size)
+        ret &= protect_page(addr, perm);
+    for (auto addr = hp_start; addr < hp_end; addr += huge_page_size)
+        ret &= protect_huge_page(addr, perm);
+    for (auto addr = hp_end; addr < end; addr += page_size)
+        ret &= protect_page(addr, perm);
+    return ret;
+}
+
+
 namespace {
 
 uintptr_t align_down(uintptr_t ptr)
diff --git a/include/mmu.hh b/include/mmu.hh
index 4806b90c2e52e5c1dae9aba9211a64c79b31ae0a..96537974b89d4dade90e25c46d3fc67e2f9fbf19 100644
--- a/include/mmu.hh
+++ b/include/mmu.hh
@@ -40,6 +40,7 @@ vma* map_file(void* addr, size_t size, unsigned perm,
               file& file, f_offset offset);
 vma* map_anon(void* addr, size_t size, unsigned perm);
 void unmap(void* addr, size_t size);
+int protect(void *addr, size_t size, unsigned int perm);
 
 typedef uint64_t phys;
 phys virt_to_phys(void *virt);
diff --git a/libc/mman.cc b/libc/mman.cc
index 2a3bccaaeeffc088de16bfe5f2af0c5c2f992485..7cf68800c443c641e18ad4fe93870692fee914a2 100644
--- a/libc/mman.cc
+++ b/libc/mman.cc
@@ -20,7 +20,21 @@ unsigned libc_prot_to_perm(int prot)
 
 int mprotect(void *addr, size_t len, int prot)
 {
-    debug("stub mprotect()");
+    if(!(prot & PROT_READ)){
+        // FIXME: currently we do not implement PROT_NONE :( see change_perm()...
+        debug(fmt("mprotect(%x,%d,0x%x) - PROT_NONE unimplemented, using PROT_READ\n")%addr%len%prot,false);
+    }
+    if ((reinterpret_cast<intptr_t>(addr) & 4095) || (len & 4095)) {
+        // address not page aligned
+        errno = EINVAL;
+        return -1;
+    }
+    if (!mmu::protect(addr, len, libc_prot_to_perm(prot))) {
+        // NOTE: we return ENOMEM when part of the range was not mapped,
+        // but nevertheless, set the protection on the rest!
+        errno = ENOMEM;
+        return -1;
+    }
     return 0;
 }
 
diff --git a/tests/tst-mmap.hh b/tests/tst-mmap.hh
index 546f78480b3852fa44fc4b109accc096b00d8740..b52892551082078d8c72a618c1ff6e550fc60363 100644
--- a/tests/tst-mmap.hh
+++ b/tests/tst-mmap.hh
@@ -38,6 +38,28 @@ public:
         }
         munmap(buf, hugepagesize*9+4096);
 
+        // test mprotect. Fault-causing tests commented out until I write a
+        // framework for verifing these faults.
+        buf = mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_ANONYMOUS, -1, 0);
+//        debug("testing write ok");
+//        *(char*)buf = 0;
+//        debug("testing write failure");
+        mprotect(buf, 4096, PROT_READ);
+//        *(char*)buf = 0;
+        munmap(buf, 4096);
+        // test mprotect with part of huge page
+        buf = mmap(NULL, 3*hugepagesize, PROT_READ|PROT_WRITE, MAP_ANONYMOUS, -1, 0);
+        void *hp = (void*) (((uintptr_t)buf&~(hugepagesize-1))+hugepagesize);
+        mprotect(hp+4096, 4096, PROT_READ);
+//        debug("should be fine");
+//        *(char*)hp = 0; // should be fine
+//        debug("should be fine");
+//        *(char*)(hp+8192) = 0; // should be fine
+//        debug("should croak");
+//        *(char*)(hp+4096) = 0; // should croak
+        munmap(buf, 3*hugepagesize);
+
+
         // TODO: verify that mmapping more than available physical memory doesn't
         // panic just return -1 and ENOMEM.
         // TODO: verify that huge-page-sized allocations get a huge-page aligned address