summaryrefslogtreecommitdiff
path: root/ldso/ldso/dl-elf.c
diff options
context:
space:
mode:
authorEric Andersen <andersen@codepoet.org>2004-07-14 12:27:03 +0000
committerEric Andersen <andersen@codepoet.org>2004-07-14 12:27:03 +0000
commitb998ae0611fc9abb6b093209a6c67aeb833e46ad (patch)
treea9be4d01684a45f9b56e8585b695e8aea3c2645f /ldso/ldso/dl-elf.c
parente79b93975e4a949e77e372083b86f1093f6de29c (diff)
Based on a patch from Alexandre Oliva, make sure _dl_malloc returns a nicely
aligned pointer that may be aligned up to page_size. Also add _dl_free,
Diffstat (limited to 'ldso/ldso/dl-elf.c')
-rw-r--r--ldso/ldso/dl-elf.c52
1 files changed, 44 insertions, 8 deletions
diff --git a/ldso/ldso/dl-elf.c b/ldso/ldso/dl-elf.c
index d27e4ccb3..7f0f76c35 100644
--- a/ldso/ldso/dl-elf.c
+++ b/ldso/ldso/dl-elf.c
@@ -31,6 +31,7 @@
#include "ldso.h"
+void *(*_dl_malloc_function) (size_t size) = NULL;
#ifdef USE_CACHE
@@ -405,7 +406,7 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure,
tpnt = _dl_check_hashed_files(libname);
if (tpnt) {
if (*rpnt) {
- (*rpnt)->next = (struct dyn_elf *) _dl_malloc(sizeof(struct dyn_elf));
+ (*rpnt)->next = (struct dyn_elf *) _dl_malloc_function(sizeof(struct dyn_elf));
_dl_memset((*rpnt)->next, 0, sizeof(struct dyn_elf));
(*rpnt)->next->prev = (*rpnt);
*rpnt = (*rpnt)->next;
@@ -691,7 +692,7 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure,
* Add this object into the symbol chain
*/
if (*rpnt) {
- (*rpnt)->next = (struct dyn_elf *) _dl_malloc(sizeof(struct dyn_elf));
+ (*rpnt)->next = (struct dyn_elf *) _dl_malloc_function(sizeof(struct dyn_elf));
_dl_memset((*rpnt)->next, 0, sizeof(struct dyn_elf));
(*rpnt)->next->prev = (*rpnt);
*rpnt = (*rpnt)->next;
@@ -883,12 +884,21 @@ char *_dl_strdup(const char *string)
int len;
len = _dl_strlen(string);
- retval = _dl_malloc(len + 1);
+ retval = _dl_malloc_function(len + 1);
_dl_strcpy(retval, string);
return retval;
}
-void *(*_dl_malloc_function) (size_t size) = NULL;
+union __align_type
+{
+ void *p;
+ void (*fp)(void);
+ long long ll;
+#if defined __UCLIBC_HAS_FLOATS__ && ! defined __UCLIBC_HAS_SOFT_FLOAT__
+ double d;
+#endif
+};
+
void *_dl_malloc(int size)
{
void *retval;
@@ -902,11 +912,29 @@ void *_dl_malloc(int size)
if (_dl_malloc_function)
return (*_dl_malloc_function) (size);
- if (_dl_malloc_addr - _dl_mmap_zero + size > _dl_pagesize) {
+ if ((int)(_dl_malloc_addr - _dl_mmap_zero + size) > (int)_dl_pagesize) {
+ int rounded_size;
+
+ /* Since the above assumes we get a full page even if
+ we request less than that, make sure we request a
+ full page, since uClinux may give us less than than
+ a full page. We might round even
+ larger-than-a-page sizes, but we end up never
+ reusing _dl_mmap_zero/_dl_malloc_addr in that case,
+ so we don't do it.
+
+ The actual page size doesn't really matter; as long
+ as we're self-consistent here, we're safe. */
+ if (size < (int)_dl_pagesize)
+ rounded_size = (size + _dl_pagesize - 1) & _dl_pagesize;
+ else
+ rounded_size = size;
+
+
#ifdef __SUPPORT_LD_DEBUG_EARLY__
_dl_dprintf(2, "malloc: mmapping more memory\n");
#endif
- _dl_mmap_zero = _dl_malloc_addr = _dl_mmap((void *) 0, size,
+ _dl_mmap_zero = _dl_malloc_addr = _dl_mmap((void *) 0, rounded_size,
PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (_dl_mmap_check_error(_dl_mmap_zero)) {
_dl_dprintf(2, "%s: mmap of a spare page failed!\n", _dl_progname);
@@ -920,8 +948,16 @@ void *_dl_malloc(int size)
* Align memory to 4 byte boundary. Some platforms require this, others
* simply get better performance.
*/
- _dl_malloc_addr = (unsigned char *) (((unsigned long) _dl_malloc_addr + 3) & ~(3));
+ _dl_malloc_addr = (unsigned char *)
+ (((unsigned long) _dl_malloc_addr +
+ __alignof__(union __align_type) - 1)
+ & ~(__alignof__(union __align_type) - 1));
return retval;
}
-
+void (*_dl_free_function) (void *p) = NULL;
+void
+_dl_free (void *p) {
+ if (_dl_free_function)
+ (*_dl_free_function) (p);
+}