summaryrefslogtreecommitdiff
path: root/package/lvm/patches/patch-lib_mm_memlock_c
diff options
context:
space:
mode:
authorWaldemar Brodkorb <wbx@openadk.org>2015-02-28 11:25:36 +0100
committerWaldemar Brodkorb <wbx@openadk.org>2015-03-01 13:51:51 +0100
commita9c484fd58d0535dda5e5a292dab120f83611b80 (patch)
treea7deff7e338f9a290bec40f4e97d39aa4f4fbaed /package/lvm/patches/patch-lib_mm_memlock_c
parentbf401a7ecf628c21d7eeb0edb32401a648afd93d (diff)
revert this commit, as it breaks with uClibc-ng
https://www.redhat.com/archives/lvm-devel/2014-November/msg00132.html uClibc-ng uses simpler malloc as default.
Diffstat (limited to 'package/lvm/patches/patch-lib_mm_memlock_c')
-rw-r--r--package/lvm/patches/patch-lib_mm_memlock_c76
1 files changed, 76 insertions, 0 deletions
diff --git a/package/lvm/patches/patch-lib_mm_memlock_c b/package/lvm/patches/patch-lib_mm_memlock_c
new file mode 100644
index 000000000..8cc6d8514
--- /dev/null
+++ b/package/lvm/patches/patch-lib_mm_memlock_c
@@ -0,0 +1,76 @@
+--- LVM2.2.02.114.orig/lib/mm/memlock.c 2014-11-29 00:07:42.000000000 +0100
++++ LVM2.2.02.114/lib/mm/memlock.c 2015-02-28 09:44:51.000000000 +0100
+@@ -25,7 +25,6 @@
+ #include <sys/mman.h>
+ #include <sys/time.h>
+ #include <sys/resource.h>
+-#include <malloc.h>
+
+ #ifndef DEVMAPPER_SUPPORT
+
+@@ -134,10 +133,8 @@ static void _touch_memory(void *mem, siz
+ static void _allocate_memory(void)
+ {
+ #ifndef VALGRIND_POOL
+- void *stack_mem;
++ void *stack_mem, *temp_malloc_mem;
+ struct rlimit limit;
+- int i, area = 0, missing = _size_malloc_tmp, max_areas = 32, hblks;
+- char *areas[max_areas];
+
+ /* Check if we could preallocate requested stack */
+ if ((getrlimit (RLIMIT_STACK, &limit) == 0) &&
+@@ -146,50 +143,13 @@ static void _allocate_memory(void)
+ _touch_memory(stack_mem, _size_stack);
+ /* FIXME else warn user setting got ignored */
+
+- /*
+- * When a brk() fails due to fragmented address space (which sometimes
+- * happens when we try to grab 8M or so), glibc will make a new
+- * arena. In this arena, the rules for using “direct” mmap are relaxed,
+- * circumventing the MAX_MMAPs and MMAP_THRESHOLD settings. We can,
+- * however, detect when this happens with mallinfo() and try to co-opt
+- * malloc into using MMAP as a MORECORE substitute instead of returning
+- * MMAP'd memory directly. Since MMAP-as-MORECORE does not munmap the
+- * memory on free(), this is good enough for our purposes.
+- */
+- while (missing > 0) {
+- struct mallinfo inf = mallinfo();
+- hblks = inf.hblks;
+-
+- if ((areas[area] = malloc(_size_malloc_tmp)))
+- _touch_memory(areas[area], _size_malloc_tmp);
+-
+- inf = mallinfo();
+-
+- if (hblks < inf.hblks) {
+- /* malloc cheated and used mmap, even though we told it
+- not to; we try with twice as many areas, each half
+- the size, to circumvent the faulty logic in glibc */
+- free(areas[area]);
+- _size_malloc_tmp /= 2;
+- } else {
+- ++ area;
+- missing -= _size_malloc_tmp;
+- }
+-
+- if (area == max_areas && missing > 0) {
+- /* Too bad. Warn the user and proceed, as things are
+- * most likely going to work out anyway. */
+- log_warn("WARNING: Failed to reserve memory, %d bytes missing.", missing);
+- break;
+- }
+- }
++ if ((temp_malloc_mem = malloc(_size_malloc_tmp)))
++ _touch_memory(temp_malloc_mem, _size_malloc_tmp);
+
+ if ((_malloc_mem = malloc(_size_malloc)))
+ _touch_memory(_malloc_mem, _size_malloc);
+
+- /* free up the reserves so subsequent malloc's can use that memory */
+- for (i = 0; i < area; ++i)
+- free(areas[i]);
++ free(temp_malloc_mem);
+ #endif
+ }
+