diff options
author | Carmelo Amoroso <carmelo.amoroso@st.com> | 2011-05-04 08:31:16 +0200 |
---|---|---|
committer | Carmelo Amoroso <carmelo.amoroso@st.com> | 2011-05-04 08:31:16 +0200 |
commit | 3004ce0c9619f89bf8e64931edd696bf4df8d2e1 (patch) | |
tree | f03f898fa5c2506c4e30f5f89ce097acf01bc016 /ldso | |
parent | 3b3285b1b7c02d36c74a6ae265fdb02ca991c96b (diff) | |
parent | 4916fd889ec1c60710faa528a3ccdb50973198e2 (diff) |
Merge remote-tracking branch 'origin/master' into prelink
* origin/master: (32 commits)
libubacktrace: fix backtrace support on arm-eabi, which needs libgcc_eh linked too
getaddrinfo.c: fix incorrect check for ERANGE from gethostbyaddr_r
getaddrinfo.c: improve code readability. No functional changes
string: remove unused variable
x86_64: silence warning if !TLS
buildsys: prettify ssp.c handling
madvise is LINUX_SPECIFIC
test_nptl: fix expected result for tst-cputimer[123]
test_nptl: fix expected result for tst-clock2 test
buildsys: make $(LOCAL_INSTALL_PATH) phony
ether_aton: reject invalid input
tests: disable ether tests if !HAS_SOCKET
inet: add ether_aton testcase
sysconf: clock_getres depends on HAS_REALTIME
__rt_sigwaitinfo: depends on HAS_REALTIME
buildsys: minor fixes in Makefile.arch for C6X
buildsys: minor fixes in Makefile.arch for microblaze
libubacktrace: enabled for all archs indeed.
sparc: don't access fp registers when configured for no fpu
libubacktrace: generic implementation based dwarf
...
Conflicts:
ldso/ldso/dl-elf.c
ldso/ldso/mips/elfinterp.c
ldso/ldso/x86_64/elfinterp.c
Signed-off-by: Carmelo Amoroso <carmelo.amoroso@st.com>
Diffstat (limited to 'ldso')
-rw-r--r-- | ldso/include/dl-elf.h | 2 | ||||
-rw-r--r-- | ldso/include/ldso.h | 2 | ||||
-rw-r--r-- | ldso/ldso/bfin/dl-inlines.h | 41 | ||||
-rw-r--r-- | ldso/ldso/bfin/dl-sysdep.h | 2 | ||||
-rw-r--r-- | ldso/ldso/c6x/dl-inlines.h | 29 | ||||
-rw-r--r-- | ldso/ldso/c6x/dl-sysdep.h | 3 | ||||
-rw-r--r-- | ldso/ldso/c6x/elfinterp.c | 6 | ||||
-rw-r--r-- | ldso/ldso/dl-elf.c | 318 | ||||
-rw-r--r-- | ldso/ldso/dl-startup.c | 16 | ||||
-rw-r--r-- | ldso/ldso/frv/dl-inlines.h | 41 | ||||
-rw-r--r-- | ldso/ldso/frv/dl-sysdep.h | 2 | ||||
-rw-r--r-- | ldso/ldso/mips/elfinterp.c | 5 | ||||
-rw-r--r-- | ldso/ldso/x86_64/elfinterp.c | 8 |
13 files changed, 301 insertions, 174 deletions
diff --git a/ldso/include/dl-elf.h b/ldso/include/dl-elf.h index 908cb63ce..495bd2bca 100644 --- a/ldso/include/dl-elf.h +++ b/ldso/include/dl-elf.h @@ -222,7 +222,7 @@ unsigned int __dl_parse_dynamic_info(ElfW(Dyn) *dpnt, unsigned long dynamic_info ADJUST_DYN_INFO(DT_DSBT_BASE_IDX, load_off); /* Initialize loadmap dsbt info. */ - load_off.map->dsbt_table = dynamic_info[DT_DSBT_BASE_IDX]; + load_off.map->dsbt_table = (void *)dynamic_info[DT_DSBT_BASE_IDX]; load_off.map->dsbt_size = dynamic_info[DT_DSBT_SIZE_IDX]; load_off.map->dsbt_index = dynamic_info[DT_DSBT_INDEX_IDX]; #endif diff --git a/ldso/include/ldso.h b/ldso/include/ldso.h index 9aa610e7b..3152b744a 100644 --- a/ldso/include/ldso.h +++ b/ldso/include/ldso.h @@ -35,6 +35,8 @@ #include <sys/types.h> /* Pull in the arch specific page size */ #include <bits/uClibc_page.h> +/* Pull in the MIN macro */ +#include <sys/param.h> /* Pull in the ldso syscalls and string functions */ #ifndef __ARCH_HAS_NO_SHARED__ #include <dl-syscall.h> diff --git a/ldso/ldso/bfin/dl-inlines.h b/ldso/ldso/bfin/dl-inlines.h index 6524f5edc..969986218 100644 --- a/ldso/ldso/bfin/dl-inlines.h +++ b/ldso/ldso/bfin/dl-inlines.h @@ -88,14 +88,39 @@ __dl_init_loadaddr_hdr (struct elf32_fdpic_loadaddr loadaddr, void *addr, segdata->p_memsz = phdr->p_memsz; #if defined (__SUPPORT_LD_DEBUG__) - { - extern char *_dl_debug; - extern int _dl_debug_file; - if (_dl_debug) - _dl_dprintf(_dl_debug_file, "%i: mapped %x at %x, size %x\n", - loadaddr.map->nsegs-1, - segdata->p_vaddr, segdata->addr, segdata->p_memsz); - } + if (_dl_debug) + _dl_dprintf(_dl_debug_file, "%i: mapped %x at %x, size %x\n", + loadaddr.map->nsegs-1, + segdata->p_vaddr, segdata->addr, segdata->p_memsz); +#endif +} + +/* Replace an existing entry in the load map. */ +static __always_inline void +__dl_update_loadaddr_hdr (struct elf32_fdpic_loadaddr loadaddr, void *addr, + Elf32_Phdr *phdr) +{ + struct elf32_fdpic_loadseg *segdata; + void *oldaddr; + int i; + + for (i = 0; i < loadaddr.map->nsegs; i++) + if (loadaddr.map->segs[i].p_vaddr == phdr->p_vaddr + && loadaddr.map->segs[i].p_memsz == phdr->p_memsz) + break; + if (i == loadaddr.map->nsegs) + _dl_exit (-1); + + segdata = loadaddr.map->segs + i; + oldaddr = (void *)segdata->addr; + _dl_munmap (oldaddr, segdata->p_memsz); + segdata->addr = (Elf32_Addr) addr; + +#if defined (__SUPPORT_LD_DEBUG__) + if (_dl_debug) + _dl_dprintf(_dl_debug_file, "%i: changed mapping %x at %x (old %x), size %x\n", + loadaddr.map->nsegs-1, + segdata->p_vaddr, segdata->addr, oldaddr, segdata->p_memsz); #endif } diff --git a/ldso/ldso/bfin/dl-sysdep.h b/ldso/ldso/bfin/dl-sysdep.h index 50c750990..168e5c89a 100644 --- a/ldso/ldso/bfin/dl-sysdep.h +++ b/ldso/ldso/bfin/dl-sysdep.h @@ -120,6 +120,8 @@ struct funcdesc_ht; #define DL_INIT_LOADADDR_HDR(LOADADDR, ADDR, PHDR) \ (__dl_init_loadaddr_hdr ((LOADADDR), (ADDR), (PHDR), \ dl_init_loadaddr_load_count)) +#define DL_UPDATE_LOADADDR_HDR(LOADADDR, ADDR, PHDR) \ + (__dl_update_loadaddr_hdr ((LOADADDR), (ADDR), (PHDR))) #define DL_LOADADDR_UNMAP(LOADADDR, LEN) \ (__dl_loadaddr_unmap ((LOADADDR), (NULL))) #define DL_LIB_UNMAP(LIB, LEN) \ diff --git a/ldso/ldso/c6x/dl-inlines.h b/ldso/ldso/c6x/dl-inlines.h index d8fb42c55..62e1cc9ca 100644 --- a/ldso/ldso/c6x/dl-inlines.h +++ b/ldso/ldso/c6x/dl-inlines.h @@ -74,6 +74,35 @@ __dl_init_loadaddr_hdr (struct elf32_dsbt_loadaddr loadaddr, void *addr, #endif } +/* Replace an existing entry in the load map. */ +static __always_inline void +__dl_update_loadaddr_hdr (struct elf32_dsbt_loadaddr loadaddr, void *addr, + Elf32_Phdr *phdr) +{ + struct elf32_dsbt_loadseg *segdata; + void *oldaddr; + int i; + + for (i = 0; i < loadaddr.map->nsegs; i++) + if (loadaddr.map->segs[i].p_vaddr == phdr->p_vaddr + && loadaddr.map->segs[i].p_memsz == phdr->p_memsz) + break; + if (i == loadaddr.map->nsegs) + _dl_exit (-1); + + segdata = loadaddr.map->segs + i; + oldaddr = (void *)segdata->addr; + _dl_munmap (oldaddr, segdata->p_memsz); + segdata->addr = (Elf32_Addr) addr; + +#if defined (__SUPPORT_LD_DEBUG__) + if (_dl_debug) + _dl_dprintf(_dl_debug_file, "%i: changed mapping %x at %x (old %x), size %x\n", + loadaddr.map->nsegs-1, + segdata->p_vaddr, segdata->addr, oldaddr, segdata->p_memsz); +#endif +} + static __always_inline void __dl_loadaddr_unmap (struct elf32_dsbt_loadaddr loadaddr) { diff --git a/ldso/ldso/c6x/dl-sysdep.h b/ldso/ldso/c6x/dl-sysdep.h index 8f1b122d3..ff7accdf1 100644 --- a/ldso/ldso/c6x/dl-sysdep.h +++ b/ldso/ldso/c6x/dl-sysdep.h @@ -104,6 +104,9 @@ struct elf32_dsbt_loadaddr; (__dl_init_loadaddr_hdr ((LOADADDR), (ADDR), (PHDR), \ dl_init_loadaddr_load_count)) +#define DL_UPDATE_LOADADDR_HDR(LOADADDR, ADDR, PHDR) \ + (__dl_update_loadaddr_hdr ((LOADADDR), (ADDR), (PHDR))) + #define DL_LOADADDR_UNMAP(LOADADDR, LEN) \ (__dl_loadaddr_unmap ((LOADADDR))) diff --git a/ldso/ldso/c6x/elfinterp.c b/ldso/ldso/c6x/elfinterp.c index 7c79171ce..f5d3ad41e 100644 --- a/ldso/ldso/c6x/elfinterp.c +++ b/ldso/ldso/c6x/elfinterp.c @@ -198,6 +198,10 @@ _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, new_val = sym_val; *reloc_addr = sym_val; break; + case R_C6000_DSBT_INDEX: + new_val = (old_val & ~0x007fff00) | ((tpnt->loadaddr.map->dsbt_index & 0x7fff) << 8); + *reloc_addr = new_val; + break; case R_C6000_ABS_L16: new_val = (old_val & ~0x007fff80) | ((sym_val & 0xffff) << 7); *reloc_addr = new_val; @@ -224,7 +228,7 @@ _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, (char *)symbol_addr, symtab[symtab_index].st_size); } - break; + return 0; default: return -1; /*call _dl_exit(1) */ } diff --git a/ldso/ldso/dl-elf.c b/ldso/ldso/dl-elf.c index a881b7df7..9e1415b83 100644 --- a/ldso/ldso/dl-elf.c +++ b/ldso/ldso/dl-elf.c @@ -314,6 +314,121 @@ goof: return NULL; } +/* + * Make a writeable mapping of a segment, regardless of whether PF_W is + * set or not. + */ +static void * +map_writeable (int infile, ElfW(Phdr) *ppnt, int piclib, int flags, + unsigned long libaddr) +{ + int prot_flags = ppnt->p_flags | PF_W; + char *status, *retval; + char *tryaddr; + ssize_t size; + unsigned long map_size; + char *cpnt; + char *piclib2map = NULL; + + if (piclib == 2 && + /* We might be able to avoid this call if memsz doesn't + require an additional page, but this would require mmap + to always return page-aligned addresses and a whole + number of pages allocated. Unfortunately on uClinux + may return misaligned addresses and may allocate + partial pages, so we may end up doing unnecessary mmap + calls. + + This is what we could do if we knew mmap would always + return aligned pages: + + ((ppnt->p_vaddr + ppnt->p_filesz + ADDR_ALIGN) & + PAGE_ALIGN) < ppnt->p_vaddr + ppnt->p_memsz) + + Instead, we have to do this: */ + ppnt->p_filesz < ppnt->p_memsz) + { + piclib2map = (char *) + _dl_mmap(0, (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_memsz, + LXFLAGS(prot_flags), flags | MAP_ANONYMOUS, -1, 0); + if (_dl_mmap_check_error(piclib2map)) + return 0; + } + + tryaddr = piclib == 2 ? piclib2map + : ((char*) (piclib ? libaddr : 0) + + (ppnt->p_vaddr & PAGE_ALIGN)); + + size = (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_filesz; + + /* For !MMU, mmap to fixed address will fail. + So instead of desperately call mmap and fail, + we set status to MAP_FAILED to save a call + to mmap (). */ +#ifndef __ARCH_USE_MMU__ + if (piclib2map == 0) +#endif + status = (char *) _dl_mmap + (tryaddr, size, LXFLAGS(prot_flags), + flags | (piclib2map ? MAP_FIXED : 0), + infile, ppnt->p_offset & OFFS_ALIGN); +#ifndef __ARCH_USE_MMU__ + else + status = MAP_FAILED; +#endif +#ifdef _DL_PREAD + if (_dl_mmap_check_error(status) && piclib2map + && (_DL_PREAD (infile, tryaddr, size, + ppnt->p_offset & OFFS_ALIGN) == size)) + status = tryaddr; +#endif + if (_dl_mmap_check_error(status) || (tryaddr && tryaddr != status)) + return 0; + + if (piclib2map) + retval = piclib2map; + else + retval = status; + + /* Now we want to allocate and zero-out any data from the end + of the region we mapped in from the file (filesz) to the + end of the loadable segment (memsz). We may need + additional pages for memsz, that we map in below, and we + can count on the kernel to zero them out, but we have to + zero out stuff in the last page that we mapped in from the + file. However, we can't assume to have actually obtained + full pages from the kernel, since we didn't ask for them, + and uClibc may not give us full pages for small + allocations. So only zero out up to memsz or the end of + the page, whichever comes first. */ + + /* CPNT is the beginning of the memsz portion not backed by + filesz. */ + cpnt = (char *) (status + size); + + /* MAP_SIZE is the address of the + beginning of the next page. */ + map_size = (ppnt->p_vaddr + ppnt->p_filesz + + ADDR_ALIGN) & PAGE_ALIGN; + + _dl_memset (cpnt, 0, + MIN (map_size + - (ppnt->p_vaddr + + ppnt->p_filesz), + ppnt->p_memsz + - ppnt->p_filesz)); + + if (map_size < ppnt->p_vaddr + ppnt->p_memsz && !piclib2map) { + tryaddr = map_size + (char*)(piclib ? libaddr : 0); + status = (char *) _dl_mmap(tryaddr, + ppnt->p_vaddr + ppnt->p_memsz - map_size, + LXFLAGS(prot_flags), + flags | MAP_ANONYMOUS | MAP_FIXED, -1, 0); + if (_dl_mmap_check_error(status) || tryaddr != status) + return NULL; + } + return retval; +} /* * Read one ELF library into memory, mmap it into the correct locations and @@ -482,6 +597,7 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure, status = (char *) _dl_mmap((char *) (piclib ? 0 : minvma), maxvma - minvma, PROT_NONE, flags | MAP_ANONYMOUS, -1, 0); if (_dl_mmap_check_error(status)) { + cant_map: _dl_dprintf(2, "%s:%i: can't map '%s'\n", _dl_progname, __LINE__, libname); _dl_internal_error_number = LD_ERROR_MMAP_FAILED; _dl_close(infile); @@ -502,8 +618,11 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure, char *addr; addr = DL_MAP_SEGMENT (epnt, ppnt, infile, flags); - if (addr == NULL) + if (addr == NULL) { + cant_map1: + DL_LOADADDR_UNMAP (lib_loadaddr, maxvma - minvma); goto cant_map; + } DL_INIT_LOADADDR_HDR (lib_loadaddr, addr, ppnt); ppnt++; @@ -518,141 +637,9 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure, ssize_t size; if (ppnt->p_flags & PF_W) { - unsigned long map_size; - char *cpnt; - char *piclib2map = 0; - - if (piclib == 2 && - /* We might be able to avoid this - call if memsz doesn't require - an additional page, but this - would require mmap to always - return page-aligned addresses - and a whole number of pages - allocated. Unfortunately on - uClinux may return misaligned - addresses and may allocate - partial pages, so we may end up - doing unnecessary mmap calls. - - This is what we could do if we - knew mmap would always return - aligned pages: - - ((ppnt->p_vaddr + ppnt->p_filesz - + ADDR_ALIGN) - & PAGE_ALIGN) - < ppnt->p_vaddr + ppnt->p_memsz) - - Instead, we have to do this: */ - ppnt->p_filesz < ppnt->p_memsz) - { - piclib2map = (char *) - _dl_mmap(0, (ppnt->p_vaddr & ADDR_ALIGN) - + ppnt->p_memsz, - LXFLAGS(ppnt->p_flags), - flags | MAP_ANONYMOUS, -1, 0); - if (_dl_mmap_check_error(piclib2map)) - goto cant_map; - DL_INIT_LOADADDR_HDR - (lib_loadaddr, piclib2map - + (ppnt->p_vaddr & ADDR_ALIGN), ppnt); - } - - tryaddr = piclib == 2 ? piclib2map - : ((char*) (piclib ? libaddr : lib_loadaddr) + - (ppnt->p_vaddr & PAGE_ALIGN)); - - size = (ppnt->p_vaddr & ADDR_ALIGN) - + ppnt->p_filesz; - - /* For !MMU, mmap to fixed address will fail. - So instead of desperately call mmap and fail, - we set status to MAP_FAILED to save a call - to mmap (). */ -#ifndef __ARCH_USE_MMU__ - if (piclib2map == 0) -#endif - status = (char *) _dl_mmap - (tryaddr, size, LXFLAGS(ppnt->p_flags), - flags | (piclib2map ? MAP_FIXED : 0), - infile, ppnt->p_offset & OFFS_ALIGN); -#ifndef __ARCH_USE_MMU__ - else - status = MAP_FAILED; -#endif -#ifdef _DL_PREAD - if (_dl_mmap_check_error(status) && piclib2map - && (_DL_PREAD (infile, tryaddr, size, - ppnt->p_offset & OFFS_ALIGN) - == size)) - status = tryaddr; -#endif - if (_dl_mmap_check_error(status) - || (tryaddr && tryaddr != status)) { - cant_map: - _dl_dprintf(2, "%s:%i: can't map '%s'\n", - _dl_progname, __LINE__, libname); - _dl_internal_error_number = LD_ERROR_MMAP_FAILED; - DL_LOADADDR_UNMAP (lib_loadaddr, maxvma - minvma); - _dl_close(infile); - _dl_munmap(header, _dl_pagesize); - return NULL; - } - - if (! piclib2map) { - DL_INIT_LOADADDR_HDR - (lib_loadaddr, status - + (ppnt->p_vaddr & ADDR_ALIGN), ppnt); - } - /* Now we want to allocate and - zero-out any data from the end of - the region we mapped in from the - file (filesz) to the end of the - loadable segment (memsz). We may - need additional pages for memsz, - that we map in below, and we can - count on the kernel to zero them - out, but we have to zero out stuff - in the last page that we mapped in - from the file. However, we can't - assume to have actually obtained - full pages from the kernel, since - we didn't ask for them, and uClibc - may not give us full pages for - small allocations. So only zero - out up to memsz or the end of the - page, whichever comes first. */ - - /* CPNT is the beginning of the memsz - portion not backed by filesz. */ - cpnt = (char *) (status + size); - - /* MAP_SIZE is the address of the - beginning of the next page. */ - map_size = (ppnt->p_vaddr + ppnt->p_filesz - + ADDR_ALIGN) & PAGE_ALIGN; - -#ifndef MIN -# define MIN(a,b) ((a) < (b) ? (a) : (b)) -#endif - _dl_memset (cpnt, 0, - MIN (map_size - - (ppnt->p_vaddr - + ppnt->p_filesz), - ppnt->p_memsz - - ppnt->p_filesz)); - - if (map_size < ppnt->p_vaddr + ppnt->p_memsz - && !piclib2map) { - tryaddr = map_size + (char*)(piclib ? libaddr : lib_loadaddr); - status = (char *) _dl_mmap(tryaddr, - ppnt->p_vaddr + ppnt->p_memsz - map_size, - LXFLAGS(ppnt->p_flags), flags | MAP_ANONYMOUS | MAP_FIXED, -1, 0); - if (_dl_mmap_check_error(status) - || tryaddr != status) - goto cant_map; - } + status = map_writeable (infile, ppnt, piclib, flags, libaddr); + if (status == NULL) + goto cant_map1; } else { tryaddr = (piclib == 2 ? 0 : (char *) (ppnt->p_vaddr & PAGE_ALIGN) @@ -665,11 +652,11 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure, infile, ppnt->p_offset & OFFS_ALIGN); if (_dl_mmap_check_error(status) || (tryaddr && tryaddr != status)) - goto cant_map; - DL_INIT_LOADADDR_HDR - (lib_loadaddr, status - + (ppnt->p_vaddr & ADDR_ALIGN), ppnt); + goto cant_map1; } + DL_INIT_LOADADDR_HDR(lib_loadaddr, + status + (ppnt->p_vaddr & ADDR_ALIGN), + ppnt); /* if (libaddr == 0 && piclib) { libaddr = (unsigned long) status; @@ -678,7 +665,6 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure, } ppnt++; } - _dl_close(infile); /* * The dynamic_addr must be take into acount lib_loadaddr value, to note @@ -700,6 +686,7 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure, _dl_dprintf(2, "%s: '%s' is missing a dynamic section\n", _dl_progname, libname); _dl_munmap(header, _dl_pagesize); + _dl_close(infile); return NULL; } @@ -715,10 +702,23 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure, ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff]; for (i = 0; i < epnt->e_phnum; i++, ppnt++) { if (ppnt->p_type == PT_LOAD && !(ppnt->p_flags & PF_W)) { +#ifdef __ARCH_USE_MMU__ _dl_mprotect((void *) ((piclib ? libaddr : lib_loadaddr) + (ppnt->p_vaddr & PAGE_ALIGN)), (ppnt->p_vaddr & ADDR_ALIGN) + (unsigned long) ppnt->p_filesz, PROT_READ | PROT_WRITE | PROT_EXEC); +#else + void *new_addr; + new_addr = map_writeable (infile, ppnt, piclib, flags, libaddr); + if (!new_addr) { + _dl_dprintf(_dl_debug_file, "Can't modify %s's text section.", + libname); + _dl_exit(1); + } + DL_UPDATE_LOADADDR_HDR(lib_loadaddr, + new_addr + (ppnt->p_vaddr & ADDR_ALIGN), + ppnt); +#endif } } #else @@ -729,6 +729,8 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure, #endif } + _dl_close(infile); + tpnt = _dl_add_elf_hash_table(libname, lib_loadaddr, dynamic_info, dynamic_addr, 0); tpnt->mapaddr = libaddr; @@ -829,20 +831,44 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure, #ifdef __DSBT__ /* Handle DSBT initialization */ { - struct elf_resolve *t, *ref = NULL; + struct elf_resolve *t, *ref; int idx = tpnt->loadaddr.map->dsbt_index; unsigned *dsbt = tpnt->loadaddr.map->dsbt_table; if (idx == 0) { - /* This DSO has not been assigned an index */ - _dl_dprintf(2, "%s: '%s' is missing a dsbt index assignment!\n", - _dl_progname, libname); - _dl_exit(1); + if (!dynamic_info[DT_TEXTREL]) { + /* This DSO has not been assigned an index. */ + _dl_dprintf(2, "%s: '%s' is missing a dsbt index assignment!\n", + _dl_progname, libname); + _dl_exit(1); + } + /* Find a dsbt table from another module. */ + ref = NULL; + for (t = _dl_loaded_modules; t; t = t->next) { + if (ref == NULL && t != tpnt) { + ref = t; + break; + } + } + idx = tpnt->loadaddr.map->dsbt_size; + while (idx-- > 0) + if (!ref || ref->loadaddr.map->dsbt_table[idx] == NULL) + break; + if (idx <= 0) { + _dl_dprintf(2, "%s: '%s' caused DSBT table overflow!\n", + _dl_progname, libname); + _dl_exit(1); + } + _dl_if_debug_dprint("\n\tfile='%s'; assigned index %d\n", + libname, idx); + tpnt->loadaddr.map->dsbt_index = idx; + } /* * Setup dsbt slot for this module in dsbt of all modules. */ + ref = NULL; for (t = _dl_loaded_modules; t; t = t->next) { /* find a dsbt table from another module */ if (ref == NULL && t != tpnt) { diff --git a/ldso/ldso/dl-startup.c b/ldso/ldso/dl-startup.c index 4799846ee..fa7972d90 100644 --- a/ldso/ldso/dl-startup.c +++ b/ldso/ldso/dl-startup.c @@ -32,8 +32,8 @@ /* * The main trick with this program is that initially, we ourselves are not - * dynamicly linked. This means that we cannot access any global variables or - * call any functions. No globals initially, since the Global Offset Table + * dynamically linked. This means that we cannot access any global variables + * or call any functions. No globals initially, since the Global Offset Table * (GOT) is initialized by the linker assuming a virtual address of 0, and no * function calls initially since the Procedure Linkage Table (PLT) is not yet * initialized. @@ -55,12 +55,12 @@ * * Fortunately, the linker itself leaves a few clues lying around, and when the * kernel starts the image, there are a few further clues. First of all, there - * is Auxiliary Vector Table information sitting on which is provided to us by - * the kernel, and which includes information about the load address that the - * program interpreter was loaded at, the number of sections, the address the - * application was loaded at and so forth. Here this information is stored in - * the array auxvt. For details see linux/fs/binfmt_elf.c where it calls - * NEW_AUX_ENT() a bunch of time.... + * is Auxiliary Vector Table information sitting on the stack which is provided + * to us by the kernel, and which includes information about the address + * that the program interpreter was loaded at, the number of sections, the + * address the application was loaded at, and so forth. Here this information + * is stored in the array auxvt. For details see linux/fs/binfmt_elf.c where + * it calls NEW_AUX_ENT() a bunch of times.... * * Next, we need to find the GOT. On most arches there is a register pointing * to the GOT, but just in case (and for new ports) I've added some (slow) C diff --git a/ldso/ldso/frv/dl-inlines.h b/ldso/ldso/frv/dl-inlines.h index 95233a7c0..0395a7e23 100644 --- a/ldso/ldso/frv/dl-inlines.h +++ b/ldso/ldso/frv/dl-inlines.h @@ -72,14 +72,39 @@ __dl_init_loadaddr_hdr (struct elf32_fdpic_loadaddr loadaddr, void *addr, segdata->p_memsz = phdr->p_memsz; #if defined (__SUPPORT_LD_DEBUG__) - { - extern char *_dl_debug; - extern int _dl_debug_file; - if (_dl_debug) - _dl_dprintf(_dl_debug_file, "%i: mapped %x at %x, size %x\n", - loadaddr.map->nsegs-1, - segdata->p_vaddr, segdata->addr, segdata->p_memsz); - } + if (_dl_debug) + _dl_dprintf(_dl_debug_file, "%i: mapped %x at %x, size %x\n", + loadaddr.map->nsegs-1, + segdata->p_vaddr, segdata->addr, segdata->p_memsz); +#endif +} + +/* Replace an existing entry in the load map. */ +static __always_inline void +__dl_update_loadaddr_hdr (struct elf32_fdpic_loadaddr loadaddr, void *addr, + Elf32_Phdr *phdr) +{ + struct elf32_fdpic_loadseg *segdata; + void *oldaddr; + int i; + + for (i = 0; i < loadaddr.map->nsegs; i++) + if (loadaddr.map->segs[i].p_vaddr == phdr->p_vaddr + && loadaddr.map->segs[i].p_memsz == phdr->p_memsz) + break; + if (i == loadaddr.map->nsegs) + _dl_exit (-1); + + segdata = loadaddr.map->segs + i; + oldaddr = (void *)segdata->addr; + _dl_munmap (oldaddr, segdata->p_memsz); + segdata->addr = (Elf32_Addr) addr; + +#if defined (__SUPPORT_LD_DEBUG__) + if (_dl_debug) + _dl_dprintf(_dl_debug_file, "%i: changed mapping %x at %x (old %x), size %x\n", + loadaddr.map->nsegs-1, + segdata->p_vaddr, segdata->addr, oldaddr, segdata->p_memsz); #endif } diff --git a/ldso/ldso/frv/dl-sysdep.h b/ldso/ldso/frv/dl-sysdep.h index e9c847a69..206a66247 100644 --- a/ldso/ldso/frv/dl-sysdep.h +++ b/ldso/ldso/frv/dl-sysdep.h @@ -95,6 +95,8 @@ struct funcdesc_ht; #define DL_INIT_LOADADDR_HDR(LOADADDR, ADDR, PHDR) \ (__dl_init_loadaddr_hdr ((LOADADDR), (ADDR), (PHDR), \ dl_init_loadaddr_load_count)) +#define DL_UPDATE_LOADADDR_HDR(LOADADDR, ADDR, PHDR) \ + (__dl_update_loadaddr_hdr ((LOADADDR), (ADDR), (PHDR))) #define DL_LOADADDR_UNMAP(LOADADDR, LEN) \ (__dl_loadaddr_unmap ((LOADADDR), (NULL))) #define DL_LIB_UNMAP(LIB, LEN) \ diff --git a/ldso/ldso/mips/elfinterp.c b/ldso/ldso/mips/elfinterp.c index 3ca403609..159e59d33 100644 --- a/ldso/ldso/mips/elfinterp.c +++ b/ldso/ldso/mips/elfinterp.c @@ -381,8 +381,11 @@ void _dl_perform_mips_global_got_relocations(struct elf_resolve *tpnt, int lazy) *got_entry += (unsigned long) tpnt->loadaddr; } else { + struct symbol_ref sym_ref; + sym_ref.sym = sym; + sym_ref.tpnt = NULL; *got_entry = (unsigned long) _dl_find_hash(strtab + - sym->st_name, &_dl_loaded_modules->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT, NULL); + sym->st_name, &_dl_loaded_modules->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT, &sym_ref); } got_entry++; diff --git a/ldso/ldso/x86_64/elfinterp.c b/ldso/ldso/x86_64/elfinterp.c index 44e2c66d8..dd9479987 100644 --- a/ldso/ldso/x86_64/elfinterp.c +++ b/ldso/ldso/x86_64/elfinterp.c @@ -157,7 +157,9 @@ _dl_do_reloc(struct elf_resolve *tpnt, struct r_scope_elem *scope, int reloc_type; int symtab_index; char *symname; - struct elf_resolve *tls_tpnt = NULL; +#if defined USE_TLS && USE_TLS + struct elf_resolve *tls_tpnt; +#endif struct symbol_ref sym_ref; ElfW(Addr) *reloc_addr; ElfW(Addr) symbol_addr; @@ -189,13 +191,17 @@ _dl_do_reloc(struct elf_resolve *tpnt, struct r_scope_elem *scope, if (_dl_trace_prelink) _dl_debug_lookup (symname, tpnt, &symtab[symtab_index], &sym_ref, elf_machine_type_class(reloc_type)); +#if defined USE_TLS && USE_TLS tls_tpnt = sym_ref.tpnt; +#endif } else { /* Relocs against STN_UNDEF are usually treated as using a * symbol value of zero, and using the module containing the * reloc itself. */ symbol_addr = sym_ref.sym->st_value; +#if defined USE_TLS && USE_TLS tls_tpnt = tpnt; +#endif } #if defined (__SUPPORT_LD_DEBUG__) |