diff --git a/.gitignore b/.gitignore
index 590f83ab14c49a5a41eae44f0c81cf8d8fe886e3..58eae60023b9f2e2e18e81d6f2a940735a0d68a5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -103,3 +103,4 @@ tests/check_events
 tests/check_subscribe_local
 tests/subscribe-archiver.c
 tests/check_cdo_selectors
+/tests/check_memlock
diff --git a/include/Makefile.am b/include/Makefile.am
index dd8e6401c9cf717e2904bcfd82cc1f0dbf999405..0714a2256ce4bbff26fb3566c4b2f86b576f05b7 100644
--- a/include/Makefile.am
+++ b/include/Makefile.am
@@ -81,6 +81,7 @@ noinst_HEADERS = \
 	maestro/i_khash.h      \
 	maestro/i_ketopt.h     \
 	maestro/i_hashing.h    \
+	maestro/i_memlock.h    \
 	maestro/i_misc.h
 
 
diff --git a/include/maestro/i_memlock.h b/include/maestro/i_memlock.h
new file mode 100644
index 0000000000000000000000000000000000000000..201d5a209d54c48934bdafde581fe81ecfa0c8b7
--- /dev/null
+++ b/include/maestro/i_memlock.h
@@ -0,0 +1,95 @@
+/* -*- mode:c -*- */
+/** @file
+ ** @brief Maestro memory locking abstraction
+ **/
+/*
+ * Copyright (C) 2020 HPE, HP Schweiz GmbH
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MAESTRO_I_MEMLOCK_H_
+#define MAESTRO_I_MEMLOCK_H_ 1
+
+#include "maestro.h"
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include <assert.h>
+
+/**@ingroup MSTRO_Internal
+ **/
+
+/**@defgroup MSTRO_I_MEMLOCK Recursive, overlapping memory locking support
+ **@{
+ **/
+
+/**@brief Lock @arg LEN bytes of memory starting at @arg ADDR
+ *
+ * In contrast to mlock() this functions handles locking of multiple
+ * allocations that are on the same page correctly.
+ *
+ * This function can fail for resource limit reasons, but such
+ * failures need not be catastrophic for the application overall. If
+ * the caller can deal with it, they can try again later, or for
+ * smaller @arg len allocations.
+ */
+mstro_status
+mstro_memlock(void* addr, size_t len);
+
+/**@brief Unlock @arg LEN bytes of memory starting at @arg ADDR
+ *
+ * In contrast to mlock() this functions handles locking of multiple
+ * allocations that are on the same page correctly. It will unlock
+ * only the pages which are not holding other allocations locked using
+ * @ref mstro_memlock()
+ */
+mstro_status
+mstro_memunlock(void* addr, size_t len);
+
+
+/**@brief Initialize the memlock subsystem
+ *
+ * Check resource limits whether at least min_required bytes can be locked (RLIMIT_MEMLOCK), and
+ * if not, return MSTRO_NOMEM.
+ */
+mstro_status
+mstro_memlock_init(size_t min_required);
+
+/**@brief De-Initialize the memlock subsystem
+ *
+ */
+mstro_status
+mstro_memlock_finalize(void);
+
+/**@} (end of group MSTRO_I_MEMLOCK) */
+/**@} (end of group MSTRO_Internal) */
+
+#endif /* MAESTRO_I_CDO_H_ */
diff --git a/include/maestro/i_misc.h b/include/maestro/i_misc.h
index df83bd4dfd1399f1f27512f433f3aef684869f3a..94fb609ac8d4d3687ff8ed3dc2b37a31069fea3c 100644
--- a/include/maestro/i_misc.h
+++ b/include/maestro/i_misc.h
@@ -56,6 +56,14 @@ popcount(unsigned int v)
 #endif
 
 
+#ifdef __builtin_clz
+/** A count low-end zeros */
+#define ctz(x) __builtin_clz(x)
+#else
+#include <strings.h>
+#define ctz(x) (ffs(x)-1)
+#endif
+
 /**@} (end of group MSTRO_Internal) */
 
 #endif /* MAESTRO_I_MISC_H_ */
diff --git a/maestro/Makefile.am b/maestro/Makefile.am
index f7d2f4807d8cbb6c41caba8c05d5095748f79378..26b9feedcccab128d44ca5b7f9347932cd1197dd 100644
--- a/maestro/Makefile.am
+++ b/maestro/Makefile.am
@@ -68,6 +68,7 @@ libmaestro_core_la_SOURCES = \
 	i_event.h event.c \
 	cdo_sel_parse.c cdo_sel_parse.h \
 	i_groups.h groups.c \
+	memlock.c \
         cdo-attributes-default.txt 
 
 #	mempool.c 
diff --git a/maestro/core.c b/maestro/core.c
index b49fe4f6a5cdce08e5e14b887d01dd28d21e54f9..83c1fdfb718fb12a33c1711e5a80cf50812c52fc 100644
--- a/maestro/core.c
+++ b/maestro/core.c
@@ -37,6 +37,8 @@
 
 #include "maestro/i_globals.h"
 #include "maestro/i_pool.h"
+#include "maestro/i_memlock.h"
+
 #include "mamba.h"
 
 #include "i_subscription_registry.h"
@@ -139,12 +141,21 @@ BAILOUT:
     return MSTRO_OK;
 }
 
+
+/** minimum mlock() limit */
+#define MSTRO_MIN_MEMLOCK (4*sizeof(g_component_descriptor))
+
 mstro_status
 mstro_core_init(const char *workflow_name,
                 const char *component_name,
                 uint64_t component_index)
 {
   mstro_status status = MSTRO_UNIMPL;
+
+  status = mstro_memlock_init(MSTRO_MIN_MEMLOCK);
+  if(status!=MSTRO_OK) {
+    return status;
+  }
   
   struct mstro_core_initdata *data = malloc(sizeof(struct mstro_core_initdata));
 
@@ -360,7 +371,7 @@ mstro_core_finalize(void)
     goto BAILOUT;
   }
 
-  status = MSTRO_OK;
+  status = mstro_memlock_finalize();
 
 BAILOUT:  
   return status;
diff --git a/maestro/memlock.c b/maestro/memlock.c
new file mode 100644
index 0000000000000000000000000000000000000000..863ec81ad0558850efd86525b0fd9b4317e4f537
--- /dev/null
+++ b/maestro/memlock.c
@@ -0,0 +1,338 @@
+#include "maestro/i_memlock.h"
+#include "maestro/i_misc.h"
+#include "maestro/logging.h"
+#include "maestro/i_hashing.h"
+
+/* simplify logging */
+#define DEBUG(...) LOG_DEBUG(MSTRO_LOG_MODULE_CORE,__VA_ARGS__)
+#define INFO(...)  LOG_INFO(MSTRO_LOG_MODULE_CORE,__VA_ARGS__)
+#define WARN(...)  LOG_WARN(MSTRO_LOG_MODULE_CORE,__VA_ARGS__)
+#define ERR(...)   LOG_ERR(MSTRO_LOG_MODULE_CORE,__VA_ARGS__)
+
+
+#include <unistd.h>
+#include <pthread.h>
+#include <sys/mman.h>
+#include <errno.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+
+/* Terminology:
+ *
+ * * An allocation is described by starting address and length;
+ *
+ * * A pageset is given by the address of the starting page and the
+ *   number of pages following it
+ *
+ * * A risky page ("risky to have another allocation overlapping it")
+ *   is one that is at the start or end of a page set.
+ *
+ */
+
+/** the pagesize -- set at call to mstro_memlock_init() */
+static long g_pagesize = 0;
+
+/** number of bits for addresses in a page */
+static size_t g_page_bits = 0;
+
+/** the bits valid in an address that is a valid page start */
+static uintptr_t g_pagify_mask = 0;
+
+
+/** Assign pageset data to STARTPAGE/EXTRAPAGES/ENDPAGE for allocation
+ * (ADDR,len).  EXTRAPAGES is the number of pages other than STARTPAGE
+ * spanned by the allocation */
+#define WITH_PAGESET(startpage,extrapages,endpage,addr,len, body)       \
+  do {                                                                  \
+    startpage  = (void*)(((uintptr_t)addr)       & g_pagify_mask);      \
+    endpage    = (void*)((((uintptr_t)addr)+len) & g_pagify_mask);      \
+    extrapages = (((uintptr_t)endpage) - ((uintptr_t)startpage)) > g_page_bits; \
+    do {                                                                \
+      body;                                                             \
+    } while(0);                                                         \
+  } while(0)                                                            \
+
+
+
+/* We optimize for reasonably few pages being locked. That is
+ * reasonable because allocators should be grouping user allocations
+ * into contiguous pages, or people will be using hugepages, of which
+ * there are few anyway. */
+
+/** define a hash table from page address to refcount (size_t) */
+KHASH_INIT(page_set, void *, size_t, 1, kh_voidptr_hash_func, kh_voidptr_hash_equal)
+
+/** the table of locked pages. */
+khash_t(page_set) *g_locked_pages = NULL;
+
+/** lock protecting g_locked_pages */
+static pthread_mutex_t g_locked_pages_mtx = PTHREAD_MUTEX_INITIALIZER;
+
+#define WITH_LOCKED_PAGETABLE(body,exitlabel) do {                      \
+    int wlpt_stat=pthread_mutex_lock(&g_locked_pages_mtx);              \
+    if(wlpt_stat!=0) {                                                  \
+      ERR("Failed to lock page-set table: %d (%s)\n",                   \
+          wlpt_stat, strerror(wlpt_stat));                              \
+      goto exitlabel;                                                   \
+    }                                                                   \
+                                                                        \
+    do {                                                                \
+      body;                                                             \
+    } while(0);                                                         \
+                                                                        \
+    wlpt_stat=pthread_mutex_unlock(&g_locked_pages_mtx);                \
+    if(wlpt_stat!=0) {                                                  \
+      ERR("Failed to unlock page-set table: %d (%s)\n",                 \
+          wlpt_stat, strerror(wlpt_stat));                              \
+      goto exitlabel;                                                   \
+    }                                                                   \
+  } while(0)
+
+
+mstro_status
+mstro_memlock_init(size_t min_required)
+{
+  g_pagesize = sysconf(_SC_PAGESIZE);
+  assert(popcount(g_pagesize)==1);
+  g_page_bits = ctz(g_pagesize);
+  g_pagify_mask = ~((uintptr_t)g_pagesize-1); 
+
+  DEBUG("Page size is %ld, %zu bits, mask %" PRIxPTR "\n",
+        g_pagesize, g_page_bits, g_pagify_mask);
+
+  struct rlimit l;
+  int s = getrlimit(RLIMIT_MEMLOCK, &l);
+  DEBUG("RLIMIT_MEMLOCK is %zu (soft), %zu (hard)\n",
+        l.rlim_cur, l.rlim_max);
+  if(l.rlim_cur < min_required) {
+    ERR("RLIMIT_MEMLOCK too small; please set ulimit -l %d or higher\n",
+        min_required/1024);
+    /* We tried to do setrlimit here, but get a segv on linux (Cray XC)
+     * after it, so ask the user to do it */
+    return MSTRO_NOMEM;
+  }
+
+  g_locked_pages = kh_init(page_set);
+
+  if(g_locked_pages==NULL)
+    return MSTRO_NOMEM;
+  else
+    return MSTRO_OK;
+}
+
+
+mstro_status
+mstro_memlock_finalize(void)
+{
+  mstro_status s=MSTRO_OK;
+
+  WITH_LOCKED_PAGETABLE({
+    if(g_locked_pages) {
+      if(kh_size(g_locked_pages)!=0) {
+        ERR("Table of locked page-sets nonempty: %d live entries\n",
+	    kh_size(g_locked_pages));
+        s=MSTRO_FAIL;
+      }
+      kh_destroy(page_set, g_locked_pages);
+    } else {
+      ERR("called before init, or called twice\n");
+    }
+  }, BAILOUT);
+BAILOUT:
+  return s;
+}
+
+
+/** check whether @arg page is locked.
+ *
+ * Must be called while holding @ref g_locked_pages_mtx.
+ */
+static inline
+bool
+mstro_memlock__page_locked(void *page)
+{
+  assert(g_pagify_mask == ((uintptr_t)page |g_pagify_mask)); /* check that it's a page address */
+  khiter_t entry_for_page = kh_get(page_set, g_locked_pages, page);
+  if(entry_for_page!=kh_end(g_locked_pages)) {
+    return true;
+  } else {
+    return false;
+  }
+}
+
+
+/** Register one level of locking on @arg page.
+ *
+ * Must be called while holding @ref g_locked_pages_mtx.
+ *
+ * If this is the first entry for @arg page, mlock() it.
+ */
+static inline
+mstro_status
+mstro_memlock__page_ref(void *page)
+{
+  assert(g_pagify_mask == ((uintptr_t)page |g_pagify_mask)); /* check that it's a page address */
+
+  int r=-2;
+
+  khiter_t entry_for_page = kh_put(page_set, g_locked_pages, page, &r);
+  switch(r) {
+    case -1:
+      ERR("Failed to insert %p into page-set table\n", page);
+      return MSTRO_FAIL;
+      break;
+    case 0: /* page known: increase refcount */
+      kh_val(g_locked_pages,entry_for_page) += 1;
+      break;
+    case 1: /* fallthrough */
+    case 2: /* new entry */
+      {
+        kh_val(g_locked_pages,entry_for_page) = 1;
+        int s=mlock(page,g_pagesize);
+        if(s!=0) {
+          ERR("Failed to mlock() page at %p (%zu bytes): %d (%s)\n",
+              page, g_pagesize, errno, strerror(errno));
+          kh_del(page_set, g_locked_pages, entry_for_page);
+          return MSTRO_FAIL;
+        }
+        break;
+      }
+    default:
+      ERR("Unexpected return code from kh_put: %d\n", r);
+      return MSTRO_FAIL;
+      break;
+  }
+
+  DEBUG("lock-page refcount for page %p now at %zu\n", page, kh_val(g_locked_pages, entry_for_page));
+
+  return MSTRO_OK;
+}
+
+/** De-Register one level of locking on @arg page.
+ *
+ * Must be called while holding @ref g_locked_pages_mtx.
+ *
+ * If level drops to 0, munlock() the page.
+ */
+static inline
+mstro_status
+mstro_memlock__page_deref(void *page)
+{
+  assert(g_pagify_mask == ((uintptr_t)page |g_pagify_mask)); /* check that it's a page address */
+
+  int r=-2;
+
+  khiter_t entry_for_page = kh_get(page_set, g_locked_pages, page);
+  if(entry_for_page==kh_end(g_locked_pages)) {
+    ERR("Tried to deref page %p from page-set table, but it's not present\n",
+        page);
+    return MSTRO_FAIL;
+  }
+
+  int refcount = kh_val(g_locked_pages, entry_for_page);
+  assert(refcount>0);
+
+  kh_val(g_locked_pages, entry_for_page) -= 1;
+  refcount--;
+
+  DEBUG("lock-page refcount for page %p now at %zu\n", page, refcount);
+  
+  if(refcount==0) {
+    int s = munlock(page, g_pagesize);
+    if(s!=0) {
+      ERR("Failed to munlock() page at %p: %d (%s)\n", page, errno, strerror(errno));
+      return MSTRO_FAIL;
+    }
+    kh_del(page_set, g_locked_pages, entry_for_page);
+  }
+  
+  return MSTRO_OK;
+}
+
+mstro_status
+mstro_memlock(void* addr, size_t len)
+{
+  assert(g_pagesize!=0); /* users must call mstro_memlock_init() before */
+  mstro_status status = MSTRO_OK;
+  
+  void *startpage,*endpage;
+  size_t extrapages;
+  
+  WITH_PAGESET(startpage,extrapages,endpage, addr, len, {
+      DEBUG("Allocation %p, %zu: page set %p,%p,%zu\n",
+            addr, len, startpage, endpage,extrapages);
+      WITH_LOCKED_PAGETABLE({
+          status = mstro_memlock__page_ref(startpage);
+          if(status==MSTRO_OK) {
+            status = mstro_memlock__page_ref(endpage);
+            if(status!=MSTRO_OK) {
+              mstro_memlock__page_deref(startpage);
+            } else {
+              if(extrapages>1) {
+                /* start and end on different pages, and at least one
+                 * page in between: Lock the piece between */
+                void *range_start = (void*)((uintptr_t)startpage + g_pagesize);
+                size_t range = (extrapages-1) * g_pagesize;
+                int s = mlock(range_start, range);
+                if(s!=0) {
+                  ERR("Failed to lock inner region for %p at %p (%zu bytes) for addr %p (%zu bytes): %d (%s)\n",
+                      startpage, range_start, range, addr, len,
+                      errno, strerror(errno));
+                  status = MSTRO_FAIL;
+                  mstro_memlock__page_deref(startpage);
+                  mstro_memlock__page_deref(endpage);
+                }
+              }
+            }
+          }
+        },
+        BAILOUT_FAIL);
+    });
+
+BAILOUT:
+  return status;
+BAILOUT_FAIL:
+  return MSTRO_FAIL;
+}
+
+mstro_status
+mstro_memunlock(void* addr, size_t len)
+{
+  assert(g_pagesize!=0); /* users must call mstro_memlock_init() before */
+  mstro_status status = MSTRO_OK;
+  
+  void *startpage,*endpage;
+  size_t extrapages;
+  
+  WITH_PAGESET(startpage,extrapages,endpage, addr, len, {
+      DEBUG("Allocation %p, %zu: page set %p,%p,%zu\n",
+            addr, len, startpage, endpage,extrapages);
+      WITH_LOCKED_PAGETABLE({
+          status = mstro_memlock__page_deref(startpage);
+          if(status==MSTRO_OK) {
+            status = mstro_memlock__page_deref(endpage);
+            if(status==MSTRO_OK) {
+              if(extrapages>1) {
+                /* start and end on different pages, and at least one
+                 * page in between: Lock the piece between */
+                void *range_start = (void*)((uintptr_t)startpage + g_pagesize);
+                size_t range = (extrapages-1) * g_pagesize;
+                int s = munlock(range_start, range);
+                if(s!=0) {
+                  ERR("Failed to unlock inner region for %p at %p (%zu bytes) for addr %p (%zu bytes): %d (%s)\n",
+                      startpage, range_start, range, addr, len,
+                      errno, strerror(errno));
+                  status = MSTRO_FAIL;
+                }
+              }
+            }
+          }
+        },
+        BAILOUT_FAIL);
+    });
+
+BAILOUT:
+  return status;
+BAILOUT_FAIL:
+  return MSTRO_FAIL;
+}
diff --git a/maestro/ofi.c b/maestro/ofi.c
index 8778bc1341f9d2ffa1540f4897bc2eb577186f07..c76a084e2bfd9f1d5aa004487fb436fdc2c61ea9 100644
--- a/maestro/ofi.c
+++ b/maestro/ofi.c
@@ -1670,26 +1670,7 @@ mstro_ofi__order_fi_list(struct fi_info **fi)
   return MSTRO_OK;
 }
 
-/** minimum mlock() limit */
-#define MSTRO_MIN_MEMLOCK (4*sizeof(g_component_descriptor))
 
-static inline
-mstro_status
-mstro__init_memlock()
-{
-    struct rlimit l;
-    int s = getrlimit(RLIMIT_MEMLOCK,&l);
-    DEBUG("RLIMIT_MEMLOCK is %zu (soft), %zu (hard)\n",
-          l.rlim_cur, l.rlim_max);
-    if(l.rlim_cur <MSTRO_MIN_MEMLOCK) {
-	ERR("RLIMIT_MEMLOCK too small; please set ulimit -l %d or higher\n",
-              MSTRO_MIN_MEMLOCK/1024);
-	/* We tried to do setrlimit here, but get a segv on linux (Cray XC)
-	 * after it, so ask the user to do it */
-	return MSTRO_NOMEM;
-    }
-    return MSTRO_OK;
-}
 
 /** Populate @ref g_endpoints with an enabled endpoint for each useful
  * OFI endpoint discovered */
@@ -1703,11 +1684,6 @@ mstro_ofi_init(void)
   struct fi_info *fi = NULL,
                  *hints = NULL;
 
-  /* ensure we can use some RDMA */
-  retstat = mstro__init_memlock();
-  if(retstat!=MSTRO_OK)
-	  return retstat;
-
   /* prepare for DRC */
   /* (only needed on GNI/Cray, but dummy emulation is provided) */
   if(g_drc_info==NULL) {
diff --git a/tests/Makefile.am b/tests/Makefile.am
index 3a9fb1590f5055c0a86389c6ad289581ec961247..8beb8d0608c58da39e00a75edea32c8a8f3700df 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -53,6 +53,7 @@ LDADD = $(top_builddir)/libmaestro.la
 check_HEADERS = cheat.h
 
 TESTS = check_version check_init check_uuid \
+	coverage check_memlock \
 	check_schema_parse \
 	check_cdo_selectors \
 	check_type_parser \
@@ -75,8 +76,7 @@ TESTS = check_version check_init check_uuid \
 	check_pm_declare.sh \
 	check_pm_interlock.sh \
 	check_subscribe.sh \
-	check_pm_declare_group.sh \
-	coverage
+	check_pm_declare_group.sh 
 
 
 XFAIL_TESTS = \
@@ -88,6 +88,7 @@ XFAIL_TESTS = \
 # 	check_mempool 
 
 check_PROGRAMS = check_version check_init check_uuid \
+		 coverage check_memlock \
 		 check_protobuf_c \
 		 check_transport_gfs \
 		 check_layout \
@@ -116,8 +117,7 @@ check_PROGRAMS = check_version check_init check_uuid \
 		 simple_injector \
 		 simple_archiver \
 		 simple_telemetry_listener \
-		 check_events \
-		 coverage
+		 check_events 
 
 
 if WITH_MIO
diff --git a/tests/check_memlock.c b/tests/check_memlock.c
new file mode 100644
index 0000000000000000000000000000000000000000..7a9719eb3c6bfea5fd66284deb69596b4e99b550
--- /dev/null
+++ b/tests/check_memlock.c
@@ -0,0 +1,114 @@
+/* -*- mode:c -*- */
+/** @file
+ ** @brief check mstro_memlock
+ **/
+
+/*
+ * Copyright (C) 2020 HPE, HP Schweiz GmbH
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* needed before inclusion of cheat.h: */
+#ifndef __BASE_FILE__
+#define __BASE_FILE__ __FILE__
+#endif
+
+#include "cheat.h"
+
+#include "maestro.h"
+#include "maestro/i_memlock.h"
+#include <unistd.h>
+
+
+CHEAT_TEST(lock_unlock,
+           {
+             cheat_assert(MSTRO_OK==mstro_memlock_init(2*8000));
+             void *x =malloc(8000);
+             cheat_assert(x!=NULL);
+
+             cheat_assert(MSTRO_OK==mstro_memlock(x,8000));
+             cheat_assert(MSTRO_OK==mstro_memunlock(x,8000));
+
+             free(x);
+             cheat_assert(MSTRO_OK==mstro_memlock_finalize());
+           })
+
+
+CHEAT_TEST(lock_unlock_multi,
+           {
+             cheat_assert(MSTRO_OK==mstro_memlock_init(2*8000));
+             void *x =malloc(8000);
+             cheat_assert(x!=NULL);
+
+             cheat_assert(MSTRO_OK==mstro_memlock(x,8000));
+             cheat_assert(MSTRO_OK==mstro_memlock(x,8000));
+             cheat_assert(MSTRO_OK==mstro_memunlock(x,8000));
+             cheat_assert(MSTRO_OK==mstro_memunlock(x,8000));
+
+             cheat_assert(MSTRO_OK==mstro_memlock(x,8000));
+             cheat_assert(MSTRO_OK==mstro_memlock(x,8000));
+             cheat_assert(MSTRO_OK==mstro_memunlock(x,8000));
+             cheat_assert(MSTRO_OK==mstro_memunlock(x,8000));
+
+             cheat_assert(MSTRO_FAIL==mstro_memunlock(x,8000));
+
+             free(x);
+             cheat_assert(MSTRO_OK==mstro_memlock_finalize());
+           })
+
+CHEAT_TEST(lock_overlapping,
+           {
+             long pagesize = sysconf(_SC_PAGESIZE);
+             void *x = malloc(3*pagesize);
+             cheat_assert(x!=NULL);
+             cheat_assert(pagesize>4);
+
+             cheat_assert(MSTRO_OK==mstro_memlock_init(4*pagesize));
+
+             /* one on the first page */
+             void *start1 = x;
+             size_t  len1 = pagesize/2;
+             /* one on the same page */
+             void *start2 = (void*)((uintptr_t)x+len1);
+             size_t  len2 = pagesize/4;
+             /* one extending into the third page */
+             void *start3 = (void*)((uintptr_t)x+len1+len2);
+             size_t  len3 = 2*pagesize;
+
+             cheat_assert(MSTRO_OK==mstro_memlock(start1, len1));
+             cheat_assert(MSTRO_OK==mstro_memlock(start2, len2));
+             cheat_assert(MSTRO_OK==mstro_memlock(start3, len3));
+
+             cheat_assert(MSTRO_OK==mstro_memunlock(start2, len2));
+             cheat_assert(MSTRO_OK==mstro_memunlock(start1, len1));
+             cheat_assert(MSTRO_OK==mstro_memunlock(start3, len3));
+
+             free(x);
+             cheat_assert(MSTRO_OK==mstro_memlock_finalize());
+           })