diff options
Diffstat (limited to 'ldso')
140 files changed, 9874 insertions, 4402 deletions
diff --git a/ldso/include/dl-defs.h b/ldso/include/dl-defs.h index 791d068bb..bedfa977e 100644 --- a/ldso/include/dl-defs.h +++ b/ldso/include/dl-defs.h @@ -5,8 +5,8 @@ * GNU Lesser General Public License version 2.1 or later. */ -#ifndef _LD_DEFS_H -#define _LD_DEFS_H +#ifndef _DL_DEFS_H +#define _DL_DEFS_H #define FLAG_ANY -1 #define FLAG_TYPE_MASK 0x00ff @@ -72,6 +72,13 @@ typedef struct { #endif +#ifdef _LIBC +#ifndef __ARCH_HAS_NO_SHARED__ +/* arch specific defines */ +#include <dl-sysdep.h> +#endif +#endif + /* Provide a means for a port to pass additional arguments to the _dl_start function. */ #ifndef DL_START @@ -99,7 +106,7 @@ typedef struct { * from DL_START, so additional arguments passed to it may be referenced. */ #ifndef DL_BOOT_COMPUTE_DYN #define DL_BOOT_COMPUTE_DYN(DPNT, GOT, LOAD_ADDR) \ - ((DPNT) = ((ElfW(Dyn) *) DL_RELOC_ADDR(load_addr, got))) + ((DPNT) = ((ElfW(Dyn) *) DL_RELOC_ADDR(LOAD_ADDR, GOT))) #endif /* Initialize the location of the global offset table. This is only called @@ -179,6 +186,14 @@ typedef struct { #define DL_LOOKUP_ADDRESS(ADDRESS) (ADDRESS) #endif +/* On some architectures dladdr can't use st_size of all symbols this way. */ +#define DL_ADDR_SYM_MATCH(SYM_ADDR, SYM, MATCHSYM, ADDR) \ + ((ADDR) >= (SYM_ADDR) \ + && ((((SYM)->st_shndx == SHN_UNDEF || (SYM)->st_size == 0) \ + && (ADDR) == (SYM_ADDR)) \ + || (ADDR) < (SYM_ADDR) + (SYM)->st_size) \ + && (!(MATCHSYM) || MATCHSYM < (SYM_ADDR))) + /* Use this macro to convert a pointer to a function's entry point to * a pointer to function. The pointer is assumed to have already been * relocated. LOADADDR is passed because it may contain additional @@ -212,7 +227,7 @@ typedef struct { _dl_find_hash for this reloc TYPE. TPNT is the module in which the matching SYM was found. */ #ifndef DL_FIND_HASH_VALUE -# define DL_FIND_HASH_VALUE(TPNT, TYPE, SYM) (DL_RELOC_ADDR ((SYM)->st_value, (TPNT)->loadaddr)) +# define DL_FIND_HASH_VALUE(TPNT, TYPE, SYM) (DL_RELOC_ADDR ((TPNT)->loadaddr, (SYM)->st_value)) #endif /* Unmap all previously-mapped segments accumulated in LOADADDR. @@ -225,7 +240,7 @@ typedef struct { /* Similar to DL_LOADADDR_UNMAP, but used for libraries that have been dlopen()ed successfully, when they're dlclose()d. */ #ifndef DL_LIB_UNMAP -# define DL_LIB_UNMAP(LIB, LEN) (DL_LOADADDR_UNMAP ((LIB)->loadaddr, (LEN))) +# define DL_LIB_UNMAP(LIB, LEN) (DL_LOADADDR_UNMAP ((LIB)->mapaddr, (LEN))) #endif /* Define this to verify that a library named LIBNAME, whose ELF @@ -251,4 +266,26 @@ typedef struct { # define DL_MAP_SEGMENT(EPNT, PPNT, INFILE, FLAGS) 0 #endif -#endif /* _LD_DEFS_H */ +/* Define this to declare the library offset. */ +#ifndef DL_DEF_LIB_OFFSET +# define DL_DEF_LIB_OFFSET static unsigned long _dl_library_offset +#endif + +/* Define this to get the library offset. */ +#ifndef DL_GET_LIB_OFFSET +# define DL_GET_LIB_OFFSET() _dl_library_offset +#endif + +/* Define this to set the library offset as difference beetwen the mapped + library address and the smallest virtual address of the first PT_LOAD + segment. */ +#ifndef DL_SET_LIB_OFFSET +# define DL_SET_LIB_OFFSET(offset) (_dl_library_offset = (offset)) +#endif + +/* Define this to get the real object's runtime address. */ +#ifndef DL_GET_RUN_ADDR +# define DL_GET_RUN_ADDR(loadaddr, mapaddr) (mapaddr) +#endif + +#endif /* _DL_DEFS_H */ diff --git a/ldso/include/dl-elf.h b/ldso/include/dl-elf.h index 076678cfc..80625fd5b 100644 --- a/ldso/include/dl-elf.h +++ b/ldso/include/dl-elf.h @@ -5,18 +5,22 @@ * GNU Lesser General Public License version 2.1 or later. */ -#ifndef LINUXELF_H -#define LINUXELF_H +#ifndef _DL_ELF_H +#define _DL_ELF_H +#include <features.h> +#include <bits/wordsize.h> #include <dl-string.h> /* before elf.h to get ELF_USES_RELOCA right */ #include <elf.h> #include <link.h> +#include <dl-defs.h> +#include <dlfcn.h> -/* Forward declarations for stuff defined in ld_hash.h */ +/* Forward declarations for stuff defined in dl-hash.h */ struct dyn_elf; struct elf_resolve; +struct r_scope_elem; -#include <dl-defs.h> #ifdef __LDSO_CACHE_SUPPORT__ extern int _dl_map_cache(void); extern int _dl_unmap_cache(void); @@ -25,21 +29,18 @@ static __inline__ void _dl_map_cache(void) { } static __inline__ void _dl_unmap_cache(void) { } #endif - -/* Function prototypes for non-static stuff in readelflib1.c */ +/* Function prototypes for non-static stuff in elfinterp.c */ extern void _dl_parse_lazy_relocation_information(struct dyn_elf *rpnt, unsigned long rel_addr, unsigned long rel_size); extern int _dl_parse_relocation_information(struct dyn_elf *rpnt, - unsigned long rel_addr, unsigned long rel_size); -extern struct elf_resolve * _dl_load_shared_library(int secure, + struct r_scope_elem *scope, unsigned long rel_addr, unsigned long rel_size); +extern struct elf_resolve * _dl_load_shared_library(unsigned int rflags, struct dyn_elf **rpnt, struct elf_resolve *tpnt, char *full_libname, int trace_loaded_objects); -extern struct elf_resolve * _dl_load_elf_shared_library(int secure, - struct dyn_elf **rpnt, char *libname); -extern struct elf_resolve *_dl_check_if_named_library_is_loaded(const char *full_libname, - int trace_loaded_objects); +extern struct elf_resolve * _dl_load_elf_shared_library(unsigned int rflags, + struct dyn_elf **rpnt, const char *libname); extern int _dl_linux_resolve(void); -extern int _dl_fixup(struct dyn_elf *rpnt, int flag); +extern int _dl_fixup(struct dyn_elf *rpnt, struct r_scope_elem *scope, int flag); extern void _dl_protect_relro (struct elf_resolve *l); /* @@ -84,33 +85,58 @@ extern void _dl_protect_relro (struct elf_resolve *l); #endif /* OS and/or GNU dynamic extensions */ + +#define OS_NUM_BASE 1 /* for DT_RELOCCOUNT */ + #ifdef __LDSO_GNU_HASH_SUPPORT__ -# define OS_NUM 2 /* for DT_RELOCCOUNT and DT_GNU_HASH entries */ +# define OS_NUM_GNU_HASH 1 /* for DT_GNU_HASH entry */ +#else +# define OS_NUM_GNU_HASH 0 +#endif + +#ifdef __LDSO_PRELINK_SUPPORT__ +# define OS_NUM_PRELINK 6 /* for DT_GNU_PRELINKED entry */ #else -# define OS_NUM 1 /* for DT_RELOCCOUNT entry */ +# define OS_NUM_PRELINK 0 #endif +#define OS_NUM (OS_NUM_BASE + OS_NUM_GNU_HASH + OS_NUM_PRELINK) + #ifndef ARCH_DYNAMIC_INFO /* define in arch specific code, if needed */ # define ARCH_NUM 0 #endif -#define DYNAMIC_SIZE (DT_NUM+OS_NUM+ARCH_NUM) +#define DYNAMIC_SIZE (DT_NUM + OS_NUM + ARCH_NUM) /* Keep ARCH specific entries into dynamic section at the end of the array */ #define DT_RELCONT_IDX (DYNAMIC_SIZE - OS_NUM - ARCH_NUM) #ifdef __LDSO_GNU_HASH_SUPPORT__ /* GNU hash comes just after the relocation count */ # define DT_GNU_HASH_IDX (DT_RELCONT_IDX + 1) +#else +# define DT_GNU_HASH_IDX DT_RELCONT_IDX #endif -extern void _dl_parse_dynamic_info(ElfW(Dyn) *dpnt, unsigned long dynamic_info[], - void *debug_addr, DL_LOADADDR_TYPE load_off); +#ifdef __LDSO_PRELINK_SUPPORT__ +/* GNU prelink comes just after the GNU hash if present */ +#define DT_GNU_PRELINKED_IDX (DT_GNU_HASH_IDX + 1) +#define DT_GNU_CONFLICT_IDX (DT_GNU_HASH_IDX + 2) +#define DT_GNU_CONFLICTSZ_IDX (DT_GNU_HASH_IDX + 3) +#define DT_GNU_LIBLIST_IDX (DT_GNU_HASH_IDX + 4) +#define DT_GNU_LIBLISTSZ_IDX (DT_GNU_HASH_IDX + 5) +#define DT_CHECKSUM_IDX (DT_GNU_HASH_IDX + 6) +#endif + +extern unsigned int _dl_parse_dynamic_info(ElfW(Dyn) *dpnt, unsigned long dynamic_info[], + void *debug_addr, DL_LOADADDR_TYPE load_off); static __always_inline -void __dl_parse_dynamic_info(ElfW(Dyn) *dpnt, unsigned long dynamic_info[], - void *debug_addr, DL_LOADADDR_TYPE load_off) +unsigned int __dl_parse_dynamic_info(ElfW(Dyn) *dpnt, unsigned long dynamic_info[], + void *debug_addr, DL_LOADADDR_TYPE load_off) { + unsigned int rtld_flags = 0; + for (; dpnt->d_tag; dpnt++) { if (dpnt->d_tag < DT_NUM) { dynamic_info[dpnt->d_tag] = dpnt->d_un.d_val; @@ -138,13 +164,30 @@ void __dl_parse_dynamic_info(ElfW(Dyn) *dpnt, unsigned long dynamic_info[], } else if (dpnt->d_tag < DT_LOPROC) { if (dpnt->d_tag == DT_RELOCCOUNT) dynamic_info[DT_RELCONT_IDX] = dpnt->d_un.d_val; - if (dpnt->d_tag == DT_FLAGS_1 && - (dpnt->d_un.d_val & DF_1_NOW)) - dynamic_info[DT_BIND_NOW] = 1; + if (dpnt->d_tag == DT_FLAGS_1) { + if (dpnt->d_un.d_val & DF_1_NOW) + dynamic_info[DT_BIND_NOW] = 1; + if (dpnt->d_un.d_val & DF_1_NODELETE) + rtld_flags |= RTLD_NODELETE; + } #ifdef __LDSO_GNU_HASH_SUPPORT__ if (dpnt->d_tag == DT_GNU_HASH) dynamic_info[DT_GNU_HASH_IDX] = dpnt->d_un.d_ptr; #endif +#ifdef __LDSO_PRELINK_SUPPORT__ + if (dpnt->d_tag == DT_GNU_PRELINKED) + dynamic_info[DT_GNU_PRELINKED_IDX] = dpnt->d_un.d_val; + if (dpnt->d_tag == DT_GNU_CONFLICT) + dynamic_info[DT_GNU_CONFLICT_IDX] = dpnt->d_un.d_ptr; + if (dpnt->d_tag == DT_GNU_CONFLICTSZ) + dynamic_info[DT_GNU_CONFLICTSZ_IDX] = dpnt->d_un.d_val; + if (dpnt->d_tag == DT_GNU_LIBLIST) + dynamic_info[DT_GNU_LIBLIST_IDX] = dpnt->d_un.d_ptr; + if (dpnt->d_tag == DT_GNU_LIBLISTSZ) + dynamic_info[DT_GNU_LIBLISTSZ_IDX] = dpnt->d_un.d_val; + if (dpnt->d_tag == DT_CHECKSUM) + dynamic_info[DT_CHECKSUM_IDX] = dpnt->d_un.d_val; +#endif } #ifdef ARCH_DYNAMIC_INFO else { @@ -157,16 +200,29 @@ void __dl_parse_dynamic_info(ElfW(Dyn) *dpnt, unsigned long dynamic_info[], if (dynamic_info[tag]) \ dynamic_info[tag] = (unsigned long) DL_RELOC_ADDR(load_off, dynamic_info[tag]); \ } while (0) - ADJUST_DYN_INFO(DT_HASH, load_off); - ADJUST_DYN_INFO(DT_PLTGOT, load_off); - ADJUST_DYN_INFO(DT_STRTAB, load_off); - ADJUST_DYN_INFO(DT_SYMTAB, load_off); - ADJUST_DYN_INFO(DT_RELOC_TABLE_ADDR, load_off); - ADJUST_DYN_INFO(DT_JMPREL, load_off); + /* Don't adjust .dynamic unnecessarily. For FDPIC targets, + we'd have to walk all the loadsegs to find out if it was + actually unnecessary, so skip this optimization. */ +#if !defined __FRV_FDPIC__ && !defined __BFIN_FDPIC__ && !defined __DSBT__ + if (load_off != 0) +#endif + { + ADJUST_DYN_INFO(DT_HASH, load_off); + ADJUST_DYN_INFO(DT_PLTGOT, load_off); + ADJUST_DYN_INFO(DT_STRTAB, load_off); + ADJUST_DYN_INFO(DT_SYMTAB, load_off); + ADJUST_DYN_INFO(DT_RELOC_TABLE_ADDR, load_off); + ADJUST_DYN_INFO(DT_JMPREL, load_off); #ifdef __LDSO_GNU_HASH_SUPPORT__ - ADJUST_DYN_INFO(DT_GNU_HASH_IDX, load_off); + ADJUST_DYN_INFO(DT_GNU_HASH_IDX, load_off); +#endif + } +#ifdef __DSBT__ + /* Get the mapped address of the DSBT base. */ + ADJUST_DYN_INFO(DT_DSBT_BASE_IDX, load_off); #endif #undef ADJUST_DYN_INFO + return rtld_flags; } /* Reloc type classes as returned by elf_machine_type_class(). @@ -196,4 +252,4 @@ void __dl_parse_dynamic_info(ElfW(Dyn) *dpnt, unsigned long dynamic_info[], (((X) & PF_X) ? PROT_EXEC : 0)) -#endif /* LINUXELF_H */ +#endif /* _DL_ELF_H */ diff --git a/ldso/include/dl-hash.h b/ldso/include/dl-hash.h index 9b87783fa..d6282bb0c 100644 --- a/ldso/include/dl-hash.h +++ b/ldso/include/dl-hash.h @@ -5,8 +5,8 @@ * GNU Lesser General Public License version 2.1 or later. */ -#ifndef _LD_HASH_H_ -#define _LD_HASH_H_ +#ifndef _DL_HASH_H +#define _DL_HASH_H #ifndef RTLD_NEXT #define RTLD_NEXT ((void*)-1) @@ -25,6 +25,19 @@ struct dyn_elf { struct dyn_elf * prev; }; +struct symbol_ref { + const ElfW(Sym) *sym; + struct elf_resolve *tpnt; +}; + +/* Structure to describe a single list of scope elements. The lookup + functions get passed an array of pointers to such structures. */ +struct r_scope_elem { + struct elf_resolve **r_list; /* Array of maps for the scope. */ + unsigned int r_nlist; /* Number of entries in the scope. */ + struct r_scope_elem *next; +}; + struct elf_resolve { /* These entries must be in this order to be compatible with the interface used by gdb to obtain the list of symbols. */ @@ -34,9 +47,41 @@ struct elf_resolve { struct elf_resolve * next; struct elf_resolve * prev; /* Nothing after this address is used by gdb. */ - ElfW(Addr) mapaddr; /* Address at which ELF segments (either main app and DSO) are mapped into */ + +#if defined(USE_TLS) && USE_TLS + /* Thread-local storage related info. */ + + /* Start of the initialization image. */ + void *l_tls_initimage; + /* Size of the initialization image. */ + size_t l_tls_initimage_size; + /* Size of the TLS block. */ + size_t l_tls_blocksize; + /* Alignment requirement of the TLS block. */ + size_t l_tls_align; + /* Offset of first byte module alignment. */ + size_t l_tls_firstbyte_offset; +# ifndef NO_TLS_OFFSET +# define NO_TLS_OFFSET 0 +# endif + /* For objects present at startup time: offset in the static TLS block. */ + ptrdiff_t l_tls_offset; + /* Index of the module in the dtv array. */ + size_t l_tls_modid; + /* Nonzero if _dl_init_static_tls should be called for this module */ + unsigned int l_need_tls_init:1; + /* Address of TLS descriptor hash table. */ + void *l_tlsdesc_table; +#endif + + ElfW(Addr) mapaddr; +#ifdef __LDSO_STANDALONE_SUPPORT__ + /* Store the entry point from the ELF header (e_entry) */ + ElfW(Addr) l_entry; +#endif enum {elf_lib, elf_executable,program_interpreter, loaded_file} libtype; - struct dyn_elf * symbol_scope; + /* This is the local scope of the shared object */ + struct r_scope_elem symbol_scope; unsigned short usage_count; unsigned short int init_flag; unsigned long rtld_flags; /* RTLD_GLOBAL, RTLD_NOW etc. */ @@ -90,12 +135,18 @@ struct elf_resolve { unsigned long data_words; #endif -#ifdef __FDPIC__ +#if defined(__FRV_FDPIC__) || defined(__BFIN_FDPIC__) /* Every loaded module holds a hashtable of function descriptors of functions defined in it, such that it's easy to release the memory when the module is dlclose()d. */ struct funcdesc_ht *funcdesc_ht; #endif +#ifdef __DSBT__ + /* Information for DSBT */ + void **dsbt_table; + unsigned long dsbt_size; + unsigned long dsbt_index; +#endif }; #define RELOCS_DONE 0x000001 @@ -103,43 +154,21 @@ struct elf_resolve { #define INIT_FUNCS_CALLED 0x000004 #define FINI_FUNCS_CALLED 0x000008 #define DL_OPENED 0x000010 +#define DL_RESERVED 0x000020 extern struct dyn_elf * _dl_symbol_tables; extern struct elf_resolve * _dl_loaded_modules; -extern struct dyn_elf * _dl_handles; +extern struct dyn_elf * _dl_handles; extern struct elf_resolve * _dl_add_elf_hash_table(const char * libname, DL_LOADADDR_TYPE loadaddr, unsigned long * dynamic_info, unsigned long dynamic_addr, unsigned long dynamic_size); -extern char * _dl_lookup_hash(const char * name, struct dyn_elf * rpnt, - struct elf_resolve *mytpnt, int type_class -#ifdef __FDPIC__ - , struct elf_resolve **tpntp -#endif - ); - -static __always_inline char *_dl_find_hash(const char *name, struct dyn_elf *rpnt, - struct elf_resolve *mytpnt, int type_class) -{ -#ifdef __FDPIC__ - return _dl_lookup_hash(name, rpnt, mytpnt, type_class, NULL); -#else - return _dl_lookup_hash(name, rpnt, mytpnt, type_class); -#endif -} - -extern int _dl_linux_dynamic_link(void); +extern char *_dl_find_hash(const char *name, struct r_scope_elem *scope, + struct elf_resolve *mytpnt, int type_class, + struct symbol_ref *symbol); extern char * _dl_library_path; -extern char * _dl_not_lazy; - -static __inline__ int _dl_symbol(char * name) -{ - if (name[0] != '_' || name[1] != 'd' || name[2] != 'l' || name[3] != '_') - return 0; - return 1; -} #define LD_ERROR_NOFILE 1 #define LD_ERROR_NOZERO 2 @@ -148,8 +177,9 @@ static __inline__ int _dl_symbol(char * name) #define LD_ERROR_NOTDYN 5 #define LD_ERROR_MMAP_FAILED 6 #define LD_ERROR_NODYNAMIC 7 -#define LD_WRONG_RELOCS 8 -#define LD_BAD_HANDLE 9 -#define LD_NO_SYMBOL 10 +#define LD_ERROR_TLS_FAILED 8 +#define LD_WRONG_RELOCS 9 +#define LD_BAD_HANDLE 10 +#define LD_NO_SYMBOL 11 -#endif /* _LD_HASH_H_ */ +#endif /* _DL_HASH_H */ diff --git a/ldso/include/dl-string.h b/ldso/include/dl-string.h index 746bd91c6..14ae617c3 100644 --- a/ldso/include/dl-string.h +++ b/ldso/include/dl-string.h @@ -5,12 +5,16 @@ * GNU Lesser General Public License version 2.1 or later. */ -#ifndef _LINUX_STRING_H_ -#define _LINUX_STRING_H_ +#ifndef _DL_STRING_H +#define _DL_STRING_H -#include <dl-sysdep.h> /* for do_rem */ #include <features.h> +#define __need_NULL +#include <stddef.h> + +#include <dl-defs.h> /* for do_rem by dl-sysdep.h */ + /* provide some sane defaults */ #ifndef do_rem # define do_rem(result, n, base) ((result) = (n) % (base)) @@ -19,26 +23,8 @@ # define do_div_10(result, remain) ((result) /= 10) #endif -static size_t _dl_strlen(const char * str); -static char *_dl_strcat(char *dst, const char *src); -static char * _dl_strcpy(char * dst,const char *src); -static int _dl_strcmp(const char * s1,const char * s2); -static int _dl_strncmp(const char * s1,const char * s2,size_t len); -static char * _dl_strchr(const char * str,int c); -static char *_dl_strrchr(const char *str, int c); -static char *_dl_strstr(const char *s1, const char *s2); -static void * _dl_memcpy(void * dst, const void * src, size_t len); -static int _dl_memcmp(const void * s1,const void * s2,size_t len); -static void *_dl_memset(void * str,int c,size_t len); -static char *_dl_get_last_path_component(char *path); -static char *_dl_simple_ltoa(char * local, unsigned long i); -static char *_dl_simple_ltoahex(char * local, unsigned long i); - -#ifndef NULL -#define NULL ((void *) 0) -#endif - -static __always_inline size_t _dl_strlen(const char * str) +#ifdef IS_IN_rtld +static __always_inline size_t _dl_strlen(const char *str) { register const char *ptr = (char *) str-1; while (*++ptr) @@ -59,7 +45,7 @@ static __always_inline char * _dl_strcat(char *dst, const char *src) return dst; } -static __always_inline char * _dl_strcpy(char * dst,const char *src) +static __always_inline char * _dl_strcpy(char *dst, const char *src) { register char *ptr = dst; @@ -70,7 +56,7 @@ static __always_inline char * _dl_strcpy(char * dst,const char *src) return ptr; } -static __always_inline int _dl_strcmp(const char * s1,const char * s2) +static __always_inline int _dl_strcmp(const char *s1, const char *s2) { register unsigned char c1, c2; s1--;s2--; @@ -84,23 +70,7 @@ static __always_inline int _dl_strcmp(const char * s1,const char * s2) return c1 - c2; } -static __always_inline int _dl_strncmp(const char * s1,const char * s2,size_t len) -{ - register unsigned char c1 = '\0'; - register unsigned char c2 = '\0'; - - s1--;s2--; - while (len > 0) { - c1 = (unsigned char) *++s1; - c2 = (unsigned char) *++s2; - if (c1 == '\0' || c1 != c2) - return c1 - c2; - len--; - } - return c1 - c2; -} - -static __always_inline char * _dl_strchr(const char * str,int c) +static __always_inline char * _dl_strchr(const char *str, int c) { register char ch; str--; @@ -147,7 +117,7 @@ static __always_inline char * _dl_strstr(const char *s1, const char *s2) } while (1); } -static __always_inline void * _dl_memcpy(void * dst, const void * src, size_t len) +static __always_inline void * _dl_memcpy(void *dst, const void *src, size_t len) { register char *a = dst-1; register const char *b = src-1; @@ -159,7 +129,7 @@ static __always_inline void * _dl_memcpy(void * dst, const void * src, size_t le return dst; } -static __always_inline int _dl_memcmp(const void * s1,const void * s2,size_t len) +static __always_inline int _dl_memcmp(const void *s1, const void *s2, size_t len) { unsigned char *c1 = (unsigned char *)s1-1; unsigned char *c2 = (unsigned char *)s2-1; @@ -172,7 +142,7 @@ static __always_inline int _dl_memcmp(const void * s1,const void * s2,size_t len return 0; } -#if defined(powerpc) +#if defined(__powerpc__) /* Will generate smaller and faster code due to loop unrolling.*/ static __always_inline void * _dl_memset(void *to, int c, size_t n) { @@ -200,7 +170,7 @@ lessthan4: return to; } #else -static __always_inline void * _dl_memset(void * str,int c,size_t len) +static __always_inline void * _dl_memset(void *str, int c, size_t len) { register char *a = str; @@ -228,11 +198,25 @@ static __always_inline char * _dl_get_last_path_component(char *path) ;/* empty */ return ptr == path ? ptr : ptr+1; } - +#else /* IS_IN_rtld */ +# include <string.h> +# define _dl_strlen strlen +# define _dl_strcat strcat +# define _dl_strcpy strcpy +# define _dl_strcmp strcmp +# define _dl_strchr strchr +# define _dl_strrchr strrchr +# define _dl_strstr strstr +# define _dl_memcpy memcpy +# define _dl_memcmp memcmp +# define _dl_memset memset +#endif /* IS_IN_rtld */ + +#if defined IS_IN_rtld || defined __SUPPORT_LD_DEBUG__ /* Early on, we can't call printf, so use this to print out * numbers using the SEND_STDERR() macro. Avoid using mod * or using long division */ -static __always_inline char * _dl_simple_ltoa(char * local, unsigned long i) +static __always_inline char * _dl_simple_ltoa(char *local, unsigned long i) { /* 20 digits plus a null terminator should be good for * 64-bit or smaller ints (2^64 - 1)*/ @@ -246,8 +230,10 @@ static __always_inline char * _dl_simple_ltoa(char * local, unsigned long i) } while (i > 0); return p; } +#endif -static __always_inline char * _dl_simple_ltoahex(char * local, unsigned long i) +#ifdef IS_IN_rtld +static __always_inline char * _dl_simple_ltoahex(char *local, unsigned long i) { /* 16 digits plus a leading "0x" plus a null terminator, * should be good for 64-bit or smaller ints */ @@ -266,9 +252,6 @@ static __always_inline char * _dl_simple_ltoahex(char * local, unsigned long i) return p; } - - - /* The following macros may be used in dl-startup.c to debug * ldso before ldso has fixed itself up to make function calls */ @@ -285,8 +268,8 @@ static __always_inline char * _dl_simple_ltoahex(char * local, unsigned long i) /* On some arches constant strings are referenced through the GOT. * This requires that load_addr must already be defined... */ #if defined(mc68000) || defined(__arm__) || defined(__thumb__) || \ - defined(__mips__) || defined(__sh__) || defined(__powerpc__) || \ - defined(__avr32__) || defined(__xtensa__) + defined(__sh__) || defined(__powerpc__) || \ + defined(__avr32__) || defined(__xtensa__) || defined(__sparc__) || defined(__microblaze__) # define CONSTANT_STRING_GOT_FIXUP(X) \ if ((X) < (const char *) load_addr) (X) += load_addr # define NO_EARLY_SEND_STDERR @@ -362,4 +345,6 @@ static __always_inline char * _dl_simple_ltoahex(char * local, unsigned long i) # define SEND_ADDRESS_STDERR_DEBUG(X, add_a_newline) #endif -#endif +#endif /* IS_IN_rtld */ + +#endif /* _DL_STRING_H */ diff --git a/ldso/include/dl-syscall.h b/ldso/include/dl-syscall.h index 1cbbbad0f..5528ba6a0 100644 --- a/ldso/include/dl-syscall.h +++ b/ldso/include/dl-syscall.h @@ -5,51 +5,40 @@ * GNU Lesser General Public License version 2.1 or later. */ -#ifndef _LD_SYSCALL_H_ -#define _LD_SYSCALL_H_ +#ifndef _DL_SYSCALL_H +#define _DL_SYSCALL_H + +/* We can't use the real errno in ldso, since it has not yet + * been dynamicly linked in yet. */ +#include "sys/syscall.h" +extern int _dl_errno; +#undef __set_errno +#define __set_errno(X) {(_dl_errno) = (X);} /* Pull in the arch specific syscall implementation */ #include <dl-syscalls.h> /* For MAP_ANONYMOUS -- differs between platforms */ #define _SYS_MMAN_H 1 #include <bits/mman.h> + +#ifdef __ARCH_HAS_DEPRECATED_SYSCALLS__ /* Pull in whatever this particular arch's kernel thinks the kernel version of * struct stat should look like. It turns out that each arch has a different * opinion on the subject, and different kernel revs use different names... */ -#if defined(__sparc_v9__) && (__WORDSIZE == 64) -#define kernel_stat64 stat -#else #define kernel_stat stat -#endif #include <bits/kernel_stat.h> #include <bits/kernel_types.h> -/* _dl_open() parameters */ -#define O_RDONLY 00 -#define O_WRONLY 01 -#define O_RDWR 02 -#define O_CREAT 0100 - -/* Encoding of the file mode. */ -#define S_IFMT 0170000 /* These bits determine file type. */ - -/* File types. */ -#define S_IFDIR 0040000 /* Directory. */ -#define S_IFCHR 0020000 /* Character device. */ -#define S_IFBLK 0060000 /* Block device. */ -#define S_IFREG 0100000 /* Regular file. */ -#define S_IFIFO 0010000 /* FIFO. */ -#define S_IFLNK 0120000 /* Symbolic link. */ -#define S_IFSOCK 0140000 /* Socket. */ - /* Protection bits. */ #define S_ISUID 04000 /* Set user ID on execution. */ #define S_ISGID 02000 /* Set group ID on execution. */ -#define S_ISVTX 01000 /* Save swapped text after use (sticky). */ -#define S_IREAD 0400 /* Read by owner. */ -#define S_IWRITE 0200 /* Write by owner. */ -#define S_IEXEC 0100 /* Execute by owner. */ +#else +/* 1. common-generic ABI doesn't need kernel_stat translation + * 3. S_IS?ID already provided by stat.h + */ +#include <sys/stat.h> +#endif /* Here are the definitions for some syscalls that are used @@ -59,107 +48,140 @@ dynamic linking at all, so we cannot return any error codes. We just punt if there is an error. */ #define __NR__dl_exit __NR_exit -static __always_inline _syscall1(void, _dl_exit, int, status); +static __always_inline attribute_noreturn __cold void _dl_exit(int status) +{ + INLINE_SYSCALL(_dl_exit, 1, status); +#if __GNUC_PREREQ(4, 5) + __builtin_unreachable(); /* shut up warning: 'noreturn' function does return*/ +#else + while (1); +#endif +} #define __NR__dl_close __NR_close -static __always_inline _syscall1(int, _dl_close, int, fd); +static __always_inline _syscall1(int, _dl_close, int, fd) -#define __NR__dl_open __NR_open +#if defined __NR_openat && !defined __NR_open +static __always_inline int _dl_open(const char *fn, + int flags, __kernel_mode_t mode) +{ + return INLINE_SYSCALL(openat, 4, AT_FDCWD, fn, flags, mode); +} + +#elif defined __NR_open +# define __NR__dl_open __NR_open static __always_inline _syscall3(int, _dl_open, const char *, fn, int, flags, - __kernel_mode_t, mode); + __kernel_mode_t, mode) +#endif #define __NR__dl_write __NR_write static __always_inline _syscall3(unsigned long, _dl_write, int, fd, - const void *, buf, unsigned long, count); + const void *, buf, unsigned long, count) #define __NR__dl_read __NR_read static __always_inline _syscall3(unsigned long, _dl_read, int, fd, - const void *, buf, unsigned long, count); + const void *, buf, unsigned long, count) #define __NR__dl_mprotect __NR_mprotect static __always_inline _syscall3(int, _dl_mprotect, const void *, addr, - unsigned long, len, int, prot); + unsigned long, len, int, prot) -#define __NR__dl_stat __NR_stat +#if defined __NR_fstatat64 && !defined __NR_stat +# define __NR__dl_fstatat64 __NR_fstatat64 +static __always_inline _syscall4(int, _dl_fstatat64, int, fd, const char *, + fn, struct stat *, stat, int, flags) + +static __always_inline int _dl_stat(const char *file_name, + struct stat *buf) +{ + return _dl_fstatat64(AT_FDCWD, file_name, buf, 0); +} +#elif defined __NR_stat +# define __NR__dl_stat __NR_stat static __always_inline _syscall2(int, _dl_stat, const char *, file_name, - struct stat *, buf); + struct stat *, buf) +#endif -#define __NR__dl_fstat __NR_fstat -static __always_inline _syscall2(int, _dl_fstat, int, fd, struct stat *, buf); +#if defined __NR_fstat64 && !defined __NR_fstat +# define __NR__dl_fstat __NR_fstat64 +#elif defined __NR_fstat +# define __NR__dl_fstat __NR_fstat +#endif +static __always_inline _syscall2(int, _dl_fstat, int, fd, struct stat *, buf) #define __NR__dl_munmap __NR_munmap -static __always_inline _syscall2(int, _dl_munmap, void *, start, unsigned long, length); +static __always_inline _syscall2(int, _dl_munmap, void *, start, unsigned long, length) #ifdef __NR_getxuid # define __NR_getuid __NR_getxuid #endif #define __NR__dl_getuid __NR_getuid -static __always_inline _syscall0(uid_t, _dl_getuid); +static __always_inline _syscall0(uid_t, _dl_getuid) #ifndef __NR_geteuid # define __NR_geteuid __NR_getuid #endif #define __NR__dl_geteuid __NR_geteuid -static __always_inline _syscall0(uid_t, _dl_geteuid); +static __always_inline _syscall0(uid_t, _dl_geteuid) #ifdef __NR_getxgid # define __NR_getgid __NR_getxgid #endif #define __NR__dl_getgid __NR_getgid -static __always_inline _syscall0(gid_t, _dl_getgid); +static __always_inline _syscall0(gid_t, _dl_getgid) #ifndef __NR_getegid # define __NR_getegid __NR_getgid #endif #define __NR__dl_getegid __NR_getegid -static __always_inline _syscall0(gid_t, _dl_getegid); +static __always_inline _syscall0(gid_t, _dl_getegid) #ifdef __NR_getxpid # define __NR_getpid __NR_getxpid #endif #define __NR__dl_getpid __NR_getpid -static __always_inline _syscall0(gid_t, _dl_getpid); +static __always_inline _syscall0(gid_t, _dl_getpid) + +#if defined __NR_readlinkat +# define __NR__dl_readlink __NR_readlinkat +static __always_inline _syscall4(int, _dl_readlink, int, id, const char *, path, + char *, buf, size_t, bufsiz) +#endif -#define __NR__dl_readlink __NR_readlink -static __always_inline _syscall3(int, _dl_readlink, const char *, path, char *, buf, - size_t, bufsiz); +#ifdef __NR_pread64 +#define __NR___syscall_pread __NR_pread64 +static __always_inline _syscall5(ssize_t, __syscall_pread, int, fd, void *, buf, + size_t, count, off_t, offset_hi, off_t, offset_lo) + +static __always_inline ssize_t +_dl_pread(int fd, void *buf, size_t count, off_t offset) +{ + return __syscall_pread(fd, buf, count, offset, offset >> 31); +} +#elif defined __NR_pread +#define __NR___syscall_pread __NR_pread +static __always_inline _syscall5(ssize_t, __syscall_pread, int, fd, void *, buf, + size_t, count, off_t, offset_hi, off_t, offset_lo) + +static __always_inline ssize_t +_dl_pread(int fd, void *buf, size_t count, off_t offset) +{ + return __syscall_pread(fd, buf, count, __LONG_LONG_PAIR(offset >> 31, offset)); +} +#endif #ifdef __UCLIBC_HAS_SSP__ # include <sys/time.h> # define __NR__dl_gettimeofday __NR_gettimeofday static __always_inline _syscall2(int, _dl_gettimeofday, struct timeval *, tv, # ifdef __USE_BSD - struct timezone *, tz); + struct timezone * # else - void *, tz); + void * # endif + , tz) #endif - -/* handle all the fun mmap intricacies */ -#if (defined(__UCLIBC_MMAP_HAS_6_ARGS__) && defined(__NR_mmap)) || !defined(__NR_mmap2) -# define _dl_MAX_ERRNO 4096 -# define _dl_mmap_check_error(__res) \ - (((long)__res) < 0 && ((long)__res) >= -_dl_MAX_ERRNO) -#else -# define MAP_FAILED ((void *) -1) -# define _dl_mmap_check_error(X) (((void *)X) == MAP_FAILED) -#endif - -/* first try mmap(), syscall6() style */ -#if defined(__UCLIBC_MMAP_HAS_6_ARGS__) && defined(__NR_mmap) - -# define __NR__dl_mmap __NR_mmap -static __always_inline _syscall6(void *, _dl_mmap, void *, start, size_t, length, - int, prot, int, flags, int, fd, off_t, offset); - -/* then try mmap2() */ -#elif defined(__NR_mmap2) - -# define __NR___syscall_mmap2 __NR_mmap2 -static __always_inline _syscall6(__ptr_t, __syscall_mmap2, __ptr_t, addr, size_t, len, - int, prot, int, flags, int, fd, off_t, offset); - /* Some architectures always use 12 as page shift for mmap2() eventhough the * real PAGE_SHIFT != 12. Other architectures use the same value as * PAGE_SHIFT... @@ -168,36 +190,41 @@ static __always_inline _syscall6(__ptr_t, __syscall_mmap2, __ptr_t, addr, size_t # define MMAP2_PAGE_SHIFT 12 #endif -static __always_inline void * _dl_mmap(void * addr, unsigned long size, int prot, - int flags, int fd, unsigned long offset) +#define MAP_FAILED ((void *) -1) +#define _dl_mmap_check_error(X) (((void *)X) == MAP_FAILED) + +static __always_inline +void *_dl_mmap(void *addr, unsigned long size, int prot, + int flags, int fd, unsigned long offset) { +#if defined(__UCLIBC_MMAP_HAS_6_ARGS__) && defined(__NR_mmap) + /* first try mmap(), syscall6() style */ + return (void *)INLINE_SYSCALL(mmap, 6, addr, size, prot, flags, fd, offset); + +#elif defined(__NR_mmap2) && !defined (__mcoldfire__) + /* then try mmap2() */ + unsigned long shifted; + if (offset & ((1 << MMAP2_PAGE_SHIFT) - 1)) return MAP_FAILED; - return __syscall_mmap2(addr, size, prot, flags, - fd, (off_t) (offset >> MMAP2_PAGE_SHIFT)); -} -/* finally, fall back to mmap(), syscall1() style */ -#elif defined(__NR_mmap) + /* gcc needs help with putting things onto the stack */ + shifted = offset >> MMAP2_PAGE_SHIFT; + return (void *)INLINE_SYSCALL(mmap2, 6, addr, size, prot, flags, fd, shifted); -# define __NR__dl_mmap_real __NR_mmap -static __always_inline _syscall1(void *, _dl_mmap_real, unsigned long *, buffer); -static __always_inline void * _dl_mmap(void * addr, unsigned long size, int prot, - int flags, int fd, unsigned long offset) -{ +#elif defined(__NR_mmap) + /* finally, fall back to mmap(), syscall1() style */ unsigned long buffer[6]; - buffer[0] = (unsigned long) addr; buffer[1] = (unsigned long) size; buffer[2] = (unsigned long) prot; buffer[3] = (unsigned long) flags; buffer[4] = (unsigned long) fd; buffer[5] = (unsigned long) offset; - return (void *) _dl_mmap_real(buffer); -} - + return (void *)INLINE_SYSCALL(mmap, 1, buffer); #else # error "Your architecture doesn't seem to provide mmap() !?" #endif +} -#endif /* _LD_SYSCALL_H_ */ +#endif /* _DL_SYSCALL_H */ diff --git a/ldso/include/dlfcn.h b/ldso/include/dlfcn.h index 03afd34fb..5cdd6be53 100644 --- a/ldso/include/dlfcn.h +++ b/ldso/include/dlfcn.h @@ -19,9 +19,9 @@ `dladdr'. */ typedef struct { - __const char *dli_fname; /* File name of defining object. */ + const char *dli_fname; /* File name of defining object. */ void *dli_fbase; /* Load address of that object. */ - __const char *dli_sname; /* Name of nearest symbol. */ + const char *dli_sname; /* Name of nearest symbol. */ void *dli_saddr; /* Exact value of nearest symbol. */ } Dl_info; diff --git a/ldso/include/inline-hashtab.h b/ldso/include/inline-hashtab.h new file mode 100644 index 000000000..4a4812027 --- /dev/null +++ b/ldso/include/inline-hashtab.h @@ -0,0 +1,265 @@ +/* + * The hashcode handling code below is heavily inspired in libiberty's + * hashtab code, but with most adaptation points and support for + * deleting elements removed. + * + * Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc. + * Contributed by Vladimir Makarov (vmakarov@cygnus.com). + */ + +#ifndef INLINE_HASHTAB_H +# define INLINE_HASHTAB_H 1 + +static __always_inline unsigned long +higher_prime_number(unsigned long n) +{ + /* These are primes that are near, but slightly smaller than, a power of two. */ + static const unsigned long primes[] = { + 7, + 13, + 31, + 61, + 127, + 251, + 509, + 1021, + 2039, + 4093, + 8191, + 16381, + 32749, + 65521, + 131071, + 262139, + 524287, + 1048573, + 2097143, + 4194301, + 8388593, + 16777213, + 33554393, + 67108859, + 134217689, + 268435399, + 536870909, + 1073741789, + /* 4294967291 */ + ((unsigned long) 2147483647) + ((unsigned long) 2147483644), + }; + const unsigned long *low = &primes[0]; + const unsigned long *high = &primes[ARRAY_SIZE(primes)]; + + while (low != high) { + const unsigned long *mid = low + (high - low) / 2; + if (n > *mid) + low = mid + 1; + else + high = mid; + } + +#if 0 + /* If we've run out of primes, abort. */ + if (n > *low) { + fprintf(stderr, "Cannot find prime bigger than %lu\n", n); + abort(); + } +#endif + + return *low; +} + +struct funcdesc_ht +{ + /* Table itself */ + void **entries; + + /* Current size (in entries) of the hash table */ + size_t size; + + /* Current number of elements */ + size_t n_elements; +}; + +static __always_inline struct funcdesc_ht * +htab_create(void) +{ + struct funcdesc_ht *ht = _dl_malloc(sizeof(*ht)); + size_t ent_size; + + if (!ht) + return NULL; + ht->size = 3; + ent_size = sizeof(void *) * ht->size; + ht->entries = _dl_malloc(ent_size); + if (!ht->entries) + return NULL; + + ht->n_elements = 0; + _dl_memset(ht->entries, 0, ent_size); + + return ht; +} + +/* + * This is only called from _dl_loadaddr_unmap, so it's safe to call + * _dl_free(). See the discussion below. + */ +static __always_inline void +htab_delete(struct funcdesc_ht *htab) +{ + size_t i; + + for (i = htab->size - 1; i >= 0; i--) + if (htab->entries[i]) + _dl_free(htab->entries[i]); + + _dl_free(htab->entries); + _dl_free(htab); +} + +/* + * Similar to htab_find_slot, but without several unwanted side effects: + * - Does not call htab->eq_f when it finds an existing entry. + * - Does not change the count of elements/searches/collisions in the + * hash table. + * This function also assumes there are no deleted entries in the table. + * HASH is the hash value for the element to be inserted. + */ +static __always_inline void ** +find_empty_slot_for_expand(struct funcdesc_ht *htab, int hash) +{ + size_t size = htab->size; + unsigned int index = hash % size; + void **slot = htab->entries + index; + int hash2; + + if (!*slot) + return slot; + + hash2 = 1 + hash % (size - 2); + for (;;) { + index += hash2; + if (index >= size) + index -= size; + + slot = htab->entries + index; + if (!*slot) + return slot; + } +} + +/* + * The following function changes size of memory allocated for the + * entries and repeatedly inserts the table elements. The occupancy + * of the table after the call will be about 50%. Naturally the hash + * table must already exist. Remember also that the place of the + * table entries is changed. If memory allocation failures are allowed, + * this function will return zero, indicating that the table could not be + * expanded. If all goes well, it will return a non-zero value. + */ +static __always_inline int +htab_expand(struct funcdesc_ht *htab, int (*hash_fn) (void *)) +{ + void **oentries; + void **olimit; + void **p; + void **nentries; + size_t nsize; + + oentries = htab->entries; + olimit = oentries + htab->size; + + /* + * Resize only when table after removal of unused elements is either + * too full or too empty. + */ + if (htab->n_elements * 2 > htab->size) + nsize = higher_prime_number(htab->n_elements * 2); + else + nsize = htab->size; + + nentries = _dl_malloc(sizeof(*nentries) * nsize); + _dl_memset(nentries, 0, sizeof(*nentries) * nsize); + if (nentries == NULL) + return 0; + htab->entries = nentries; + htab->size = nsize; + + p = oentries; + do { + if (*p) + *find_empty_slot_for_expand(htab, hash_fn(*p)) = *p; + p++; + } while (p < olimit); + +#if 0 + /* + * We can't tell whether this was allocated by the _dl_malloc() + * built into ld.so or malloc() in the main executable or libc, + * and calling free() for something that wasn't malloc()ed could + * do Very Bad Things (TM). Take the conservative approach + * here, potentially wasting as much memory as actually used by + * the hash table, even if multiple growths occur. That's not + * so bad as to require some overengineered solution that would + * enable us to keep track of how it was allocated. + */ + _dl_free(oentries); +#endif + return 1; +} + +/* + * This function searches for a hash table slot containing an entry + * equal to the given element. To delete an entry, call this with + * INSERT = 0, then call htab_clear_slot on the slot returned (possibly + * after doing some checks). To insert an entry, call this with + * INSERT = 1, then write the value you want into the returned slot. + * When inserting an entry, NULL may be returned if memory allocation + * fails. + */ +static __always_inline void ** +htab_find_slot(struct funcdesc_ht *htab, void *ptr, int insert, + int (*hash_fn)(void *), int (*eq_fn)(void *, void *)) +{ + unsigned int index; + int hash, hash2; + size_t size; + void **entry; + + if (htab->size * 3 <= htab->n_elements * 4 && + htab_expand(htab, hash_fn) == 0) + return NULL; + + hash = hash_fn(ptr); + + size = htab->size; + index = hash % size; + + entry = &htab->entries[index]; + if (!*entry) + goto empty_entry; + else if (eq_fn(*entry, ptr)) + return entry; + + hash2 = 1 + hash % (size - 2); + for (;;) { + index += hash2; + if (index >= size) + index -= size; + + entry = &htab->entries[index]; + if (!*entry) + goto empty_entry; + else if (eq_fn(*entry, ptr)) + return entry; + } + + empty_entry: + if (!insert) + return NULL; + + htab->n_elements++; + return entry; +} + +#endif diff --git a/ldso/include/ldso.h b/ldso/include/ldso.h index 35a72fc5e..e237885b9 100644 --- a/ldso/include/ldso.h +++ b/ldso/include/ldso.h @@ -5,8 +5,8 @@ * GNU Lesser General Public License version 2.1 or later. */ -#ifndef _LDSO_H_ -#define _LDSO_H_ +#ifndef _LDSO_H +#define _LDSO_H #include <features.h> @@ -27,18 +27,46 @@ /* Pull in compiler and arch stuff */ #include <stdlib.h> #include <stdarg.h> +#include <stddef.h> /* for ptrdiff_t */ +#include <stdbool.h> +#define _FCNTL_H +/* We need this if arch has only new syscalls defined */ +#ifndef AT_FDCWD +#define AT_FDCWD -100 +#endif /* AT_FDCWD */ +#include <bits/fcntl.h> #include <bits/wordsize.h> /* Pull in the arch specific type information */ #include <sys/types.h> /* Pull in the arch specific page size */ #include <bits/uClibc_page.h> +/* Pull in the MIN macro */ +#include <sys/param.h> /* Pull in the ldso syscalls and string functions */ +#ifndef __ARCH_HAS_NO_SHARED__ #include <dl-syscall.h> #include <dl-string.h> /* Now the ldso specific headers */ #include <dl-elf.h> +#ifdef __UCLIBC_HAS_TLS__ +/* Defines USE_TLS */ +#include <tls.h> +#endif #include <dl-hash.h> +/* common align masks, if not specified by sysdep headers */ +#ifndef ADDR_ALIGN +#define ADDR_ALIGN (_dl_pagesize - 1) +#endif + +#ifndef PAGE_ALIGN +#define PAGE_ALIGN (~ADDR_ALIGN) +#endif + +#ifndef OFFS_ALIGN +#define OFFS_ALIGN (PAGE_ALIGN & ~(1ul << (sizeof(_dl_pagesize) * 8 - 1))) +#endif + /* For INIT/FINI dependency sorting. */ struct init_fini_list { struct init_fini_list *next; @@ -48,10 +76,25 @@ struct init_fini_list { /* Global variables used within the shared library loader */ extern char *_dl_library_path; /* Where we look for libraries */ extern char *_dl_preload; /* Things to be loaded before the libs */ -extern char *_dl_ldsopath; /* Where the shared lib loader was found */ +#ifdef __LDSO_SEARCH_INTERP_PATH__ +extern const char *_dl_ldsopath; /* Where the shared lib loader was found */ +#endif extern const char *_dl_progname; /* The name of the executable being run */ -extern int _dl_secure; /* Are we dealing with setuid stuff? */ extern size_t _dl_pagesize; /* Store the page size for use later */ +#ifdef __LDSO_PRELINK_SUPPORT__ +extern char *_dl_trace_prelink; /* Library for prelinking trace */ +extern struct elf_resolve *_dl_trace_prelink_map; /* Library map for prelinking trace */ +#else +#define _dl_trace_prelink 0 +#endif +#ifdef __DSBT__ +extern void **_dl_ldso_dsbt; +#endif + +#if defined(USE_TLS) && USE_TLS +extern void _dl_add_to_slotinfo (struct link_map *l); +extern void ** __attribute__ ((const)) _dl_initial_error_catch_tsd (void); +#endif #ifdef __SUPPORT_LD_DEBUG__ extern char *_dl_debug; @@ -63,13 +106,14 @@ extern char *_dl_debug_nofixups; extern char *_dl_debug_bindings; extern int _dl_debug_file; # define __dl_debug_dprint(fmt, args...) \ - _dl_dprintf(_dl_debug_file, "%s:%i: " fmt, __FUNCTION__, __LINE__, ## args); + _dl_dprintf(_dl_debug_file, "%s:%i: " fmt, __func__, __LINE__, ## args); # define _dl_if_debug_dprint(fmt, args...) \ do { if (_dl_debug) __dl_debug_dprint(fmt, ## args); } while (0) #else # define __dl_debug_dprint(fmt, args...) do {} while (0) # define _dl_if_debug_dprint(fmt, args...) do {} while (0) -# define _dl_debug_file 2 +/* disabled on purpose, _dl_debug_file should be guarded by __SUPPORT_LD_DEBUG__ +# define _dl_debug_file 2*/ #endif /* __SUPPORT_LD_DEBUG__ */ #ifdef IS_IN_rtld @@ -100,11 +144,24 @@ extern int _dl_debug_file; #endif extern void *_dl_malloc(size_t size); +extern void *_dl_calloc(size_t __nmemb, size_t __size); +extern void *_dl_realloc(void *__ptr, size_t __size); extern void _dl_free(void *); extern char *_dl_getenv(const char *symbol, char **envp); extern void _dl_unsetenv(const char *symbol, char **envp); +#ifdef IS_IN_rtld extern char *_dl_strdup(const char *string); extern void _dl_dprintf(int, const char *, ...); +#else +# include <string.h> +# define _dl_strdup strdup +# include <stdio.h> +# ifdef __USE_GNU +# define _dl_dprintf dprintf +# else +# define _dl_dprintf(fd, fmt, args...) fprintf(stderr, fmt, ## args) +# endif +#endif #ifndef DL_GET_READY_TO_RUN_EXTRA_PARMS # define DL_GET_READY_TO_RUN_EXTRA_PARMS @@ -113,7 +170,7 @@ extern void _dl_dprintf(int, const char *, ...); # define DL_GET_READY_TO_RUN_EXTRA_ARGS #endif -extern void _dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load_addr, +extern void *_dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load_addr, ElfW(auxv_t) auxvt[AT_EGID + 1], char **envp, char **argv DL_GET_READY_TO_RUN_EXTRA_PARMS); @@ -121,4 +178,8 @@ extern void _dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load #include <dl-inlines.h> #endif -#endif /* _LDSO_H_ */ +#else /* __ARCH_HAS_NO_SHARED__ */ +#include <dl-defs.h> +#endif + +#endif /* _LDSO_H */ diff --git a/ldso/include/ldsodefs.h b/ldso/include/ldsodefs.h new file mode 100644 index 000000000..9ae645c60 --- /dev/null +++ b/ldso/include/ldsodefs.h @@ -0,0 +1,154 @@ +#ifndef _LDSODEFS_H +#define _LDSODEFS_H 1 + +#include <bits/kernel-features.h> + +#include <features.h> +#include <tls.h> + +#ifdef __mips__ +/* The MIPS ABI specifies that the dynamic section has to be read-only. */ + +#define DL_RO_DYN_SECTION 1 + +/* TODO: Import in 64-bit relocations from glibc. */ +#endif + +#ifndef SHARED +# define EXTERN extern +#else +# ifdef IS_IN_rtld +# define EXTERN +# else +# define EXTERN extern +# endif +#endif + +/* Non-shared code has no support for multiple namespaces. */ +#ifdef SHARED +# define DL_NNS 16 +#else +# define DL_NNS 1 +#endif + +#define GL(x) _##x +#define GLRO(x) _##x + +/* Variable pointing to the end of the stack (or close to it). This value + must be constant over the runtime of the application. Some programs + might use the variable which results in copy relocations on some + platforms. But this does not matter, ld.so can always use the local + copy. */ +extern void *__libc_stack_end; + +/* Determine next available module ID. */ +extern size_t _dl_next_tls_modid (void) internal_function attribute_hidden; + +/* Calculate offset of the TLS blocks in the static TLS block. */ +extern void _dl_determine_tlsoffset (void) internal_function attribute_hidden; + +/* Set up the data structures for TLS, when they were not set up at startup. + Returns nonzero on malloc failure. + This is called from _dl_map_object_from_fd or by libpthread. */ +extern int _dl_tls_setup (void) internal_function; +rtld_hidden_proto (_dl_tls_setup) + +/* Allocate memory for static TLS block (unless MEM is nonzero) and dtv. */ +extern void *_dl_allocate_tls (void *mem) internal_function; + +/* Get size and alignment requirements of the static TLS block. */ +extern void _dl_get_tls_static_info (size_t *sizep, size_t *alignp) + internal_function; + +extern void _dl_allocate_static_tls (struct link_map *map) + internal_function attribute_hidden; +extern int _dl_try_allocate_static_tls (struct link_map* map) + internal_function attribute_hidden; + +/* Taken from glibc/elf/dl-reloc.c */ +#define CHECK_STATIC_TLS(sym_map) \ + do { \ + if (__builtin_expect ((sym_map)->l_tls_offset == NO_TLS_OFFSET, 0)) \ + _dl_allocate_static_tls (sym_map); \ + } while (0) +#define TRY_STATIC_TLS(sym_map) \ + (__builtin_expect ((sym_map)->l_tls_offset != NO_TLS_OFFSET, 1) \ + || _dl_try_allocate_static_tls (sym_map) == 0) + +/* These are internal entry points to the two halves of _dl_allocate_tls, + only used within rtld.c itself at startup time. */ +extern void *_dl_allocate_tls_storage (void) + internal_function attribute_hidden; +extern void *_dl_allocate_tls_init (void *) internal_function; + +/* Deallocate memory allocated with _dl_allocate_tls. */ +extern void _dl_deallocate_tls (void *tcb, bool dealloc_tcb) internal_function; + +extern void _dl_nothread_init_static_tls (struct link_map *) attribute_hidden; + +/* Highest dtv index currently needed. */ +EXTERN size_t _dl_tls_max_dtv_idx; +/* Flag signalling whether there are gaps in the module ID allocation. */ +EXTERN bool _dl_tls_dtv_gaps; +/* Information about the dtv slots. */ +EXTERN struct dtv_slotinfo_list +{ + size_t len; + struct dtv_slotinfo_list *next; + struct dtv_slotinfo + { + size_t gen; + bool is_static; + struct link_map *map; + } slotinfo[0]; +} *_dl_tls_dtv_slotinfo_list; +/* Number of modules in the static TLS block. */ +EXTERN size_t _dl_tls_static_nelem; +/* Size of the static TLS block. */ +EXTERN size_t _dl_tls_static_size; +/* Size actually allocated in the static TLS block. */ +EXTERN size_t _dl_tls_static_used; +/* Alignment requirement of the static TLS block. */ +EXTERN size_t _dl_tls_static_align; +/* Function pointer for catching TLS errors. */ +#if 1 /* def _LIBC_REENTRANT */ +EXTERN void **(*_dl_error_catch_tsd) (void) __attribute__ ((const)); +#endif + +/* Number of additional entries in the slotinfo array of each slotinfo + list element. A large number makes it almost certain take we never + have to iterate beyond the first element in the slotinfo list. */ +# define TLS_SLOTINFO_SURPLUS (62) + +/* Number of additional slots in the dtv allocated. */ +# define DTV_SURPLUS (14) + +/* Initial dtv of the main thread, not allocated with normal malloc. */ +EXTERN void *_dl_initial_dtv; +/* Generation counter for the dtv. */ +EXTERN size_t _dl_tls_generation; + +EXTERN void (*_dl_init_static_tls) (struct link_map *); + +/* We have the auxiliary vector. */ +#define HAVE_AUX_VECTOR + +/* We can assume that the kernel always provides the AT_UID, AT_EUID, + AT_GID, and AT_EGID values in the auxiliary vector from 2.4.0 or so on. */ +#if __ASSUME_AT_XID +# define HAVE_AUX_XID +#endif + +/* We can assume that the kernel always provides the AT_SECURE value + in the auxiliary vector from 2.5.74 or so on. */ +#if __ASSUME_AT_SECURE +# define HAVE_AUX_SECURE +#endif + +/* Starting with one of the 2.4.0 pre-releases the Linux kernel passes + up the page size information. */ +#if __ASSUME_AT_PAGESIZE +# define HAVE_AUX_PAGESIZE +#endif + +#endif diff --git a/ldso/include/tlsdeschtab.h b/ldso/include/tlsdeschtab.h new file mode 100644 index 000000000..056f859b7 --- /dev/null +++ b/ldso/include/tlsdeschtab.h @@ -0,0 +1,121 @@ +/* Hash table for TLS descriptors. + Copyright (C) 2005-2013 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Alexandre Oliva <aoliva@redhat.com> + + uClibc port by Baruch Siach <baruch@tkos.co.il> + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef TLSDESCHTAB_H +# define TLSDESCHTAB_H 1 + +# ifdef SHARED + +# include <inline-hashtab.h> + +inline static int +hash_tlsdesc (void *p) +{ + struct tlsdesc_dynamic_arg *td = p; + + /* We know all entries are for the same module, so ti_offset is the + only distinguishing entry. */ + return td->tlsinfo.ti_offset; +} + +inline static int +eq_tlsdesc (void *p, void *q) +{ + struct tlsdesc_dynamic_arg *tdp = p, *tdq = q; + + return tdp->tlsinfo.ti_offset == tdq->tlsinfo.ti_offset; +} + +inline static int +map_generation (struct link_map *map) +{ + size_t idx = map->l_tls_modid; + struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list); + + /* Find the place in the dtv slotinfo list. */ + do + { + /* Does it fit in the array of this list element? */ + if (idx < listp->len) + { + /* We should never get here for a module in static TLS, so + we can assume that, if the generation count is zero, we + still haven't determined the generation count for this + module. */ + if (listp->slotinfo[idx].gen) + return listp->slotinfo[idx].gen; + else + break; + } + idx -= listp->len; + listp = listp->next; + } + while (listp != NULL); + + /* If we get to this point, the module still hasn't been assigned an + entry in the dtv slotinfo data structures, and it will when we're + done with relocations. At that point, the module will get a + generation number that is one past the current generation, so + return exactly that. */ + return GL(dl_tls_generation) + 1; +} + +void * +internal_function +_dl_make_tlsdesc_dynamic (struct link_map *map, size_t ti_offset) +{ + struct funcdesc_ht *ht; + void **entry; + struct tlsdesc_dynamic_arg *td, test; + + ht = map->l_tlsdesc_table; + if (! ht) + { + ht = htab_create (); + if (! ht) + return 0; + map->l_tlsdesc_table = ht; + } + + test.tlsinfo.ti_module = map->l_tls_modid; + test.tlsinfo.ti_offset = ti_offset; + entry = htab_find_slot (ht, &test, 1, hash_tlsdesc, eq_tlsdesc); + if (entry == NULL) + _dl_exit(1); + if (*entry) + { + td = *entry; + return td; + } + + *entry = td = _dl_malloc (sizeof (struct tlsdesc_dynamic_arg)); + /* This may be higher than the map's generation, but it doesn't + matter much. Worst case, we'll have one extra DTV update per + thread. */ + td->gen_count = map_generation (map); + td->tlsinfo = test.tlsinfo; + + return td; +} + +# endif /* SHARED */ + +#endif diff --git a/ldso/ldso/Makefile.in b/ldso/ldso/Makefile.in index 88b254c27..d85646a1a 100644 --- a/ldso/ldso/Makefile.in +++ b/ldso/ldso/Makefile.in @@ -1,28 +1,51 @@ # Makefile for uClibc # -# Copyright (C) 2000-2005 Erik Andersen <andersen@uclibc.org> +# Copyright (C) 2000-2008 Erik Andersen <andersen@uclibc.org> # # Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball. # -CFLAGS-ldso := -DNOT_IN_libc -DIS_IN_rtld $(SSP_DISABLE_FLAGS) +subdirs += ldso/ldso/$(TARGET_ARCH) +CFLAGS-rtld := -DNOT_IN_libc -DIS_IN_rtld $(SSP_DISABLE_FLAGS) + +ifneq ($(TARGET_ARCH),arc) # This stuff will not work with -fomit-frame-pointer -CFLAGS-ldso += -fno-omit-frame-pointer +CFLAGS-rtld += -fno-omit-frame-pointer +endif + +ifeq ($(DODEBUG),y) +ifeq ($(TARGET_ARCH),arm) +# This stuff will not work with -funwind-tables / -fasynchronous-unwind-tables +CFLAGS-rtld += -fno-unwind-tables -fno-asynchronous-unwind-tables +endif +endif -CFLAGS-ldso += -I$(top_srcdir)ldso/ldso/$(TARGET_ARCH) -I$(top_srcdir)ldso/include -I$(top_srcdir)ldso/ldso -CFLAGS-ldso += -DUCLIBC_RUNTIME_PREFIX=\"$(RUNTIME_PREFIX)\" -DUCLIBC_LDSO=\"$(UCLIBC_LDSO)\" +CFLAGS-rtld += -I$(top_srcdir)ldso/ldso/$(TARGET_ARCH) -I$(top_srcdir)ldso/include -I$(top_srcdir)ldso/ldso +CFLAGS-rtld += -DUCLIBC_RUNTIME_PREFIX=\"$(RUNTIME_PREFIX)\" -DUCLIBC_LDSO=\"$(UCLIBC_LDSO)\" -CFLAGS-ldso/ldso/$(TARGET_ARCH)/ := $(CFLAGS-ldso) +# Not really much point in including debugging info, since gdb +# can't really debug ldso, since gdb requires help from ldso to +# debug things.... +# On arm, gcc-4.3.x onwards -Os emits calls to libgcc, which calls _div0, +# which tries to call raise(). And raise comes from libc so a catch 22. +# Using -O2 instead. We could have use -fno-early-inlining with -Os too. +CFLAGS-$(DODEBUG)-ldso/ldso := -O2 -g -CFLAGS-ldso.c := -DLDSO_ELFINTERP=\"$(TARGET_ARCH)/elfinterp.c\" $(CFLAGS-ldso) +CFLAGS-ldso.c := -DLDSO_ELFINTERP=\"$(TARGET_ARCH)/elfinterp.c\" +LDFLAGS-$(UCLIBC_FORMAT_DSBT_ELF)-$(UCLIBC_LDSO_NAME).so := -Wl,--dsbt-index=1 ifneq ($(SUPPORT_LD_DEBUG),y) LDFLAGS-$(UCLIBC_LDSO_NAME).so := $(LDFLAGS) else LDFLAGS-$(UCLIBC_LDSO_NAME).so := $(LDFLAGS_NOSTRIP) -Wl,-z,defs endif -LDFLAGS-$(UCLIBC_LDSO_NAME).so += -Wl,-e,_start -Wl,-z,now -Wl,-Bsymbolic -Wl,--export-dynamic -Wl,--sort-common -Wl,--discard-locals -Wl,--discard-all -Wl,--no-undefined +LDFLAGS-$(UCLIBC_LDSO_NAME).so += -Wl,-e,$(SYMBOL_PREFIX)_start \ + -Wl,-z,now -Wl,-Bsymbolic \ + -Wl,--export-dynamic $(CFLAG_-Wl--sort-common) -Wl,--discard-locals \ + $(CFLAG_-Wl--discard-all) -Wl,--no-undefined + +LDFLAGS-$(LDSO_PRELINK_SUPPORT)-$(UCLIBC_LDSO_NAME).so += -Wl,-defsym=$(SYMBOL_PREFIX)_begin=0 ldso_FULL_NAME := $(UCLIBC_LDSO_NAME)-$(VERSION).so @@ -47,15 +70,18 @@ $(UCLIBC_LDSO_NAME)_OBJS := $($(UCLIBC_LDSO_NAME)_COBJ) $($(UCLIBC_LDSO_NAME)_SO ldso-y := $($(UCLIBC_LDSO_NAME)_OBJS:.o=.oS) lib-so-y += $(ldso) -objclean-y += $(UCLIBC_LDSO_NAME)_clean +objclean-y += CLEAN_ldso/ldso -$(ldso): $(ldso:.$(MAJOR_VERSION)=) -$(ldso:.$(MAJOR_VERSION)=): $($(UCLIBC_LDSO_NAME)_OUT)/$(UCLIBC_LDSO_NAME)_so.a - $(call link.so,$(ldso_FULL_NAME),$(MAJOR_VERSION)) +$(ldso): $(ldso:.$(ABI_VERSION)=) +$(ldso:.$(ABI_VERSION)=): | $(top_builddir)lib +$(ldso:.$(ABI_VERSION)=): $($(UCLIBC_LDSO_NAME)_OUT)/$(UCLIBC_LDSO_NAME)_so.a + $(call link.so,$(ldso_FULL_NAME),$(ABI_VERSION)) + # link for backward compatibility + (cd $(top_builddir)lib; ln -sf $(UCLIBC_LDSO_NAME).so.$(ABI_VERSION) $(UCLIBC_LDSO_NAME).so.0 ) $($(UCLIBC_LDSO_NAME)_OUT)/$(UCLIBC_LDSO_NAME)_so.a: $(ldso-y) $(Q)$(RM) $@ $(do_ar) -$(UCLIBC_LDSO_NAME)_clean: - $(RM) $($(UCLIBC_LDSO_NAME)_OUT)/*.{o,os,oS,a} $($(UCLIBC_LDSO_NAME)_OUT)/*/*.{o,os,oS} +CLEAN_ldso/ldso: + $(do_rm) $(addprefix $($(UCLIBC_LDSO_NAME)_OUT)/,$(foreach e, o os oS a,$(foreach d, *. */*.,$(d)$(e)))) diff --git a/ldso/ldso/arc/dl-debug.h b/ldso/ldso/arc/dl-debug.h new file mode 100644 index 000000000..6573e5452 --- /dev/null +++ b/ldso/ldso/arc/dl-debug.h @@ -0,0 +1,83 @@ +/* + * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) + * + * Licensed under the LGPL v2.1 or later, see the file COPYING.LIB in this tarball. + */ +static const char *_dl_reltypes_tab[] = +{ + "R_ARC_NONE", /* 0 */ + "R_ARC_8", + "R_ARC_16", + "R_ARC_24", + "R_ARC_32", + "R_ARC_B26", /* 5 */ + "R_ARC_B22_PCREL", + "R_ARC_H30", + "R_ARC_N8", + "R_ARC_N16", + "R_ARC_N24", /* 10 */ + "R_ARC_N32", + "R_ARC_SDA", + "R_ARC_SECTOFF", + "R_ARC_S21H_PCREL", + "R_ARC_S21W_PCREL", /* 15 */ + "R_ARC_S25H_PCREL", + "R_ARC_S25W_PCREL", + "R_ARC_SDA32", + "R_ARC_SDA_LDST", + "R_ARC_SDA_LDST1", /* 20 */ + "R_ARC_SDA_LDST2", + "R_ARC_SDA16_LD", + "R_ARC_SDA16_LD1", + "R_ARC_SDA16_LD2", + "R_ARC_S13_PCREL", /* 25 */ + "R_ARC_W", + "R_ARC_32_ME", + "R_ARC_N32_ME", + "R_ARC_SECTOFF_ME", + "R_ARC_SDA32_ME", /* 30 */ + "R_ARC_W_ME", + "R_ARC_H30_ME", + "R_ARC_SECTOFF_U8", + "R_ARC_SECTOFF_S9", + "R_AC_SECTOFF_U8", /* 35 */ + "R_AC_SECTOFF_U8_1", + "R_AC_SECTOFF_U8_2", + "R_AC_SECTOFF_S9", + "R_AC_SECTOFF_S9_1", + "R_AC_SECTOFF_S9_2", /* 40 */ + "R_ARC_SECTOFF_ME_1", + "R_ARC_SECTOFF_ME_2", + "R_ARC_SECTOFF_1", + "R_ARC_SECTOFF_2", + "", /* 45 */ + "", + "", + "", + "", + "R_ARC_PC32", /* 50 */ + "R_ARC_GOTPC32", + "R_ARC_PLT32", + "R_ARC_COPY", + "R_ARC_GLOB_DAT", + "R_ARC_JMP_SLOT", /* 55 */ + "R_ARC_RELATIVE", + "R_ARC_GOTOFF", + "R_ARC_GOTPC", + "R_ARC_GOT32", + "", /* 60 */ + "", + "", + "", + "", + "", /* 65 */ + "R_ARC_TLS_DTPMOD", + "R_ARC_TLS_DTPOFF", + "R_ARC_TLS_TPOFF", + "R_ARC_TLS_GD_GOT", + "R_ARC_TLS_GD_LD", /* 70 */ + "R_ARC_TLS_GD_CALL", + "R_ARC_TLS_IE_GOT", + "", + "", +}; diff --git a/ldso/ldso/arc/dl-startup.h b/ldso/ldso/arc/dl-startup.h new file mode 100644 index 000000000..ef89b5317 --- /dev/null +++ b/ldso/ldso/arc/dl-startup.h @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) + * + * Licensed under the LGPL v2.1 or later, see the file COPYING.LIB in this tarball. + */ + +/* + * vineetg: Refactoring/cleanup of loader entry point + * Removed 6 useless insns + * Joern Improved it even further: + * -better insn scheduling + * -no need for conditional code for _dl_skip_args + * -use of assembler .&2 expressions vs. @gotpc refs (avoids need for GP) + * + * What this code does: + * -ldso starts execution here when kernel returns from execve() + * -calls into generic ldso entry point _dl_start( ) + * -optionally adjusts argc for executable if exec passed as cmd + * -calls into app main with address of finaliser + */ +__asm__( + ".section .text \n" + ".align 4 \n" + ".global _start \n" + ".hidden _start \n" + ".type _start,@function \n" + + "_start: \n" + " ; ldso entry point, returns app entry point \n" + " bl.d _dl_start \n" + " mov_s r0, sp ; pass ptr to aux vector tbl \n" + + " ; If ldso ran as cmd with executable file nm as arg \n" + " ; skip the extra args calc by dl_start() \n" + " ld_s r1, [sp] ; orig argc from aux-vec Tbl \n" + +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ + " ld r12, [pcl, _dl_skip_args@pcl] \n" + + " add r2, pcl, _dl_fini@pcl ; finalizer \n" +#else + " add r12, pcl, _dl_skip_args-.+(.&2) \n" + " ld r12, [r12] \n" + " add r2, pcl, _dl_fini-.+(.&2) ; finalizer \n" +#endif + + " add2 sp, sp, r12 ; discard argv entries from stack\n" + " sub_s r1, r1, r12 ; adjusted argc, on stack \n" + " st_s r1, [sp] \n" + + " j_s.d [r0] ; app entry point \n" + " mov_s r0, r2 ; ptr to finalizer _dl_fini \n" + + ".size _start,.-_start \n" + ".previous \n" +); + +/* + * Get a pointer to the argv array. On many platforms this can be just + * the address if the first argument, on other platforms we need to + * do something a little more subtle here. + */ +#define GET_ARGV(ARGVP, ARGS) ARGVP = ((unsigned long*) ARGS + 1) + +/* + * Dynamic loader bootstrapping: + * Since we don't modify text at runtime, these can only be data relos + * (so safe to assume that they are word aligned). + * And also they HAVE to be RELATIVE relos only + * @RELP is the relo entry being processed + * @REL is the pointer to the address we are relocating. + * @SYMBOL is the symbol involved in the relocation + * @LOAD is the load address. + */ + +#define PERFORM_BOOTSTRAP_RELOC(RELP,REL,SYMBOL,LOAD,SYMTAB) \ +do { \ + int type = ELF32_R_TYPE((RELP)->r_info); \ + if (likely(type == R_ARC_RELATIVE)) \ + *REL += (unsigned long) LOAD; \ + else \ + _dl_exit(1); \ +}while(0) + +/* + * This will go away once we have DT_RELACOUNT + */ +#define ARCH_NEEDS_BOOTSTRAP_RELOCS + +/* we dont need to spit out argc, argv etc for debugging */ +#define NO_EARLY_SEND_STDERR 1 diff --git a/ldso/ldso/arc/dl-syscalls.h b/ldso/ldso/arc/dl-syscalls.h new file mode 100644 index 000000000..a3cbb011b --- /dev/null +++ b/ldso/ldso/arc/dl-syscalls.h @@ -0,0 +1,7 @@ +/* + * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) + * + * Licensed under the LGPL v2.1 or later, see the file COPYING.LIB in this tarball. + */ + +/* stub for arch-specific syscall issues */ diff --git a/ldso/ldso/arc/dl-sysdep.h b/ldso/ldso/arc/dl-sysdep.h new file mode 100644 index 000000000..b6bda9d14 --- /dev/null +++ b/ldso/ldso/arc/dl-sysdep.h @@ -0,0 +1,175 @@ +/* + * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) + * + * Licensed under the LGPL v2.1 or later, see the file COPYING.LIB in this tarball. + */ + +#include "elf.h" + +/* + * Define this if the system uses RELOCA. + */ +#define ELF_USES_RELOCA + +/* + * Dynamic Linking ABI for ARCompact ISA + * + * PLT + * -------------------------------- + * | ld r11, [pcl, off-to-GOT[1] | 0 (20 bytes) + * | | 4 + * plt0 | ld r10, [pcl, off-to-GOT[2] | 8 + * | | 12 + * | j [r10] | 16 + * -------------------------------- + * | Base address of GOT | 20 + * -------------------------------- + * | ld r12, [pcl, off-to-GOT[3] | 24 (12 bytes each) + * plt1 | | + * | j_s.d [r12] | 32 + * | mov_s r12, pcl | 34 + * -------------------------------- + * | | 36 + * ~ ~ + * ~ ~ + * | | + * -------------------------------- + * + * GOT + * -------------- + * | [0] | + * -------------- + * | [1] | Module info - setup by ldso + * -------------- + * | [2] | resolver entry point + * -------------- + * | [3] | + * | ... | Runtime address for function symbols + * | [f] | + * -------------- + * | [f+1] | + * | ... | Runtime address for data symbols + * | [last] | + * -------------- + */ + +/* + * Initialization sequence for a GOT. + * Caller elf_resolve() seeds @GOT_BASE from DT_PLTGOT - which essentially is + * pointer to first PLT entry. The actual GOT base is 5th word in PLT + * + */ +#define INIT_GOT(GOT_BASE,MODULE) \ +do { \ + unsigned long *__plt_base = (unsigned long *)GOT_BASE; \ + GOT_BASE = (unsigned long *)(__plt_base[5] + \ + (unsigned long)MODULE->loadaddr); \ + GOT_BASE[1] = (unsigned long) MODULE; \ + GOT_BASE[2] = (unsigned long) _dl_linux_resolve; \ +} while(0) + +/* Here we define the magic numbers that this dynamic loader should accept */ +#ifdef __A7__ +#define MAGIC1 EM_ARCOMPACT +#define ELF_TARGET "ARCompact" /* For error messages */ +#elif defined(__HS__) +#define MAGIC1 EM_ARCV2 +#define ELF_TARGET "ARCv2" /* For error messages */ +#endif + +#undef MAGIC2 + + +struct elf_resolve; +extern unsigned long _dl_linux_resolver(struct elf_resolve * tpnt, + unsigned int plt_pc); + +extern unsigned __udivmodsi4(unsigned, unsigned) attribute_hidden; + +#ifdef __A7__ +/* using "C" causes an indirection via __umodsi3 -> __udivmodsi4 */ +#define do_rem(result, n, base) ((result) = \ + \ + __builtin_constant_p (base) ? (n) % (unsigned) (base) : \ + __extension__ ({ \ + register unsigned r1 __asm__ ("r1") = (base); \ + \ + __asm__("bl.d @__udivmodsi4` mov r0,%1" \ + : "=r" (r1) \ + : "r" (n), "r" (r1) \ + : "r0", "r2", "r3", "r4", "lp_count", "blink", "cc"); \ + \ + r1; \ + }) \ +) +#elif defined(__HS__) +/* ARCv2 has hardware assisted divide/mod */ +#define do_rem(result, n, base) ((result) = (n) % (unsigned) (base)) +#endif + +/* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry or + TLS variable so PLT entries should not be allowed to define the value. + + ELF_RTYPE_CLASS_NOCOPY iff TYPE should not be allowed to resolve to one + of the main executable's symbols, as for a COPY reloc. */ +#define elf_machine_type_class(type) \ + ((((type) == R_ARC_JMP_SLOT || (type) == R_ARC_TLS_DTPMOD || \ + (type) == R_ARC_TLS_DTPOFF || (type) == R_ARC_TLS_TPOFF) \ + * ELF_RTYPE_CLASS_PLT) \ + | (((type) == R_ARC_COPY) * ELF_RTYPE_CLASS_COPY)) + +/* + * Get the runtime address of GOT[0] + */ +static __always_inline Elf32_Addr elf_machine_dynamic(void) +{ + Elf32_Addr dyn; + + __asm__("ld %0,[pcl,_DYNAMIC@gotpc]\n\t" : "=r" (dyn)); + return dyn; +} + +/* Return the run-time load address of the shared object. */ +static __always_inline Elf32_Addr elf_machine_load_address(void) +{ +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ + /* To find the loadaddr we subtract the runtime addr of a non-local symbol + * say _DYNAMIC from it's build-time addr. + * N.B., gotpc loads get optimized by the linker if it finds the symbol + * is resolved locally. + * A more robust - and efficient - solution would be to use a symbol + * set by the linker. To make it actually save space, we'd have to + * suppress the unwanted text relocation in the linked dso, though. + * (I.e. in ldso.so.*, though it's just another dso as far as bfd/ld + * are concerned.) + */ + Elf32_Addr addr, tmp; + __asm__ ( + "ld %1, [pcl, _DYNAMIC@gotpc] ;build addr of _DYNAMIC" "\n" + "add %0, pcl, _DYNAMIC@pcl ;runtime addr of _DYNAMIC" "\n" + "sub %0, %0, %1 ;delta" "\n" + : "=&r" (addr), "=r"(tmp) + ); +#else + Elf32_Addr addr, tmp; + __asm__ ( + "ld %1, [pcl, _dl_start@gotpc] ;build addr of _dl_start \n" + "add %0, pcl, _dl_start-.+(.&2) ;runtime addr of _dl_start \n" + "sub %0, %0, %1 ;delta \n" + : "=&r" (addr), "=r"(tmp) + ); +#endif + return addr; +} + +static __always_inline void +elf_machine_relative (Elf32_Addr load_off, const Elf32_Addr rel_addr, + Elf32_Word relative_count) +{ + Elf32_Rel * rpnt = (void *) rel_addr; + --rpnt; + do { + Elf32_Addr *const reloc_addr = (void *) (load_off + (++rpnt)->r_offset); + *reloc_addr += load_off; + } while (--relative_count); +} diff --git a/ldso/ldso/arc/elfinterp.c b/ldso/ldso/arc/elfinterp.c new file mode 100644 index 000000000..2f0cf7f66 --- /dev/null +++ b/ldso/ldso/arc/elfinterp.c @@ -0,0 +1,311 @@ +/* + * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) + * + * Lots of code copied from ../i386/elfinterp.c, so: + * Copyright (c) 1994-2000 Eric Youngdale, Peter MacDonald, + * David Engel, Hongjiu Lu and Mitch D'Souza + * Copyright (C) 2001-2002, Erik Andersen + * All rights reserved. + * + * Licensed under the LGPL v2.1 or later, see the file COPYING.LIB in this tarball. + */ +#include "ldso.h" + +#ifdef __A7__ +#define ARC_PLT_SIZE 12 +#else +#define ARC_PLT_SIZE 16 +#endif + +unsigned long +_dl_linux_resolver(struct elf_resolve *tpnt, unsigned int plt_pc) +{ + ELF_RELOC *this_reloc, *rel_base; + char *strtab, *symname, *new_addr; + ElfW(Sym) *symtab; + int symtab_index; + unsigned int *got_addr; + unsigned long plt_base; + int plt_idx; + + /* start of .rela.plt */ + rel_base = (ELF_RELOC *)(tpnt->dynamic_info[DT_JMPREL]); + + /* starts of .plt (addr of PLT0) */ + plt_base = tpnt->dynamic_info[DT_PLTGOT]; + + /* + * compute the idx of the yet-unresolved PLT entry in .plt + * Same idx will be used to find the relo entry in .rela.plt + */ + plt_idx = (plt_pc - plt_base)/ARC_PLT_SIZE - 2; /* ignoring 2 dummy PLTs */ + + this_reloc = rel_base + plt_idx; + + symtab_index = ELF_R_SYM(this_reloc->r_info); + symtab = (ElfW(Sym) *)(intptr_t) (tpnt->dynamic_info[DT_SYMTAB]); + strtab = (char *) (tpnt->dynamic_info[DT_STRTAB]); + symname= strtab + symtab[symtab_index].st_name; + + /* relo-offset to fixup, shd be a .got entry */ + got_addr = (unsigned int *)(this_reloc->r_offset + tpnt->loadaddr); + + /* Get the address of the GOT entry */ + new_addr = _dl_find_hash(symname, &_dl_loaded_modules->symbol_scope, tpnt, + ELF_RTYPE_CLASS_PLT, NULL); + + if (unlikely(!new_addr)) { + _dl_dprintf(2, "%s: can't resolve symbol '%s'\n", _dl_progname, symname); + _dl_exit(1); + } + + +#if defined __SUPPORT_LD_DEBUG__ + if (_dl_debug_bindings) { + _dl_dprintf(_dl_debug_file, "\nresolve function: %s", symname); + if (_dl_debug_detail) + _dl_dprintf(_dl_debug_file, "\n\tpatched %x ==> %pc @ %pl\n", + *got_addr, new_addr, got_addr); + } + + if (!_dl_debug_nofixups) + *got_addr = (unsigned int)new_addr; +#else + /* Update the .got entry with the runtime address of symbol */ + *got_addr = (unsigned int)new_addr; +#endif + + /* + * Return the new addres, where the asm trampoline will jump to + * after re-setting up the orig args + */ + return (unsigned long) new_addr; +} + + +static int +_dl_do_reloc(struct elf_resolve *tpnt, struct r_scope_elem *scope, + ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab) +{ + int reloc_type; + int symtab_index; + char *symname; + unsigned long *reloc_addr; + unsigned long symbol_addr; +#if defined __SUPPORT_LD_DEBUG__ + unsigned long old_val = 0; +#endif + struct symbol_ref sym_ref; + struct elf_resolve *tls_tpnt = NULL; + + reloc_addr = (unsigned long *)(tpnt->loadaddr + rpnt->r_offset); + reloc_type = ELF_R_TYPE(rpnt->r_info); + symtab_index = ELF_R_SYM(rpnt->r_info); + symbol_addr = 0; + + sym_ref.sym = &symtab[symtab_index]; + sym_ref.tpnt = NULL; + +#if defined __SUPPORT_LD_DEBUG__ + if (reloc_addr) + old_val = *reloc_addr; +#endif + + if (symtab_index) { + symname = strtab + symtab[symtab_index].st_name; + symbol_addr = (unsigned long) _dl_find_hash(symname, scope, tpnt, + elf_machine_type_class(reloc_type), &sym_ref); + + /* + * We want to allow undefined references to weak symbols, + * this might have been intentional. We should not be linking + * local symbols here, so all bases should be covered. + */ + + if (unlikely(!symbol_addr + && ELF_ST_BIND(symtab[symtab_index].st_info) != STB_WEAK + && ELF_ST_TYPE(symtab[symtab_index].st_info) != STT_TLS)) { + /* Non-fatal if called from dlopen, hence different ret code */ + return 1; + } + + tls_tpnt = sym_ref.tpnt; + } else if (reloc_type == R_ARC_RELATIVE ) { + *reloc_addr += tpnt->loadaddr; + goto log_entry; + } + +#if defined USE_TLS && USE_TLS + /* In case of a TLS reloc, tls_tpnt NULL means we have an 'anonymous' + symbol. This is the case for a static tls variable, so the lookup + module is just that one is referencing the tls variable. */ + if (!tls_tpnt) + tls_tpnt = tpnt; +#endif + + switch (reloc_type) { + case R_ARC_NONE: + break; + case R_ARC_32: + *reloc_addr += symbol_addr + rpnt->r_addend; + break; + case R_ARC_PC32: + *reloc_addr += symbol_addr + rpnt->r_addend - (unsigned long) reloc_addr; + break; + case R_ARC_GLOB_DAT: + case R_ARC_JMP_SLOT: + *reloc_addr = symbol_addr; + break; + case R_ARC_COPY: + _dl_memcpy((void *) reloc_addr,(void *) symbol_addr, + symtab[symtab_index].st_size); + break; +#if defined USE_TLS && USE_TLS + case R_ARC_TLS_DTPMOD: + *reloc_addr = tls_tpnt->l_tls_modid; + break; + case R_ARC_TLS_DTPOFF: + *reloc_addr = symbol_addr; + break; + case R_ARC_TLS_TPOFF: + CHECK_STATIC_TLS ((struct link_map *) tls_tpnt); + *reloc_addr = tls_tpnt->l_tls_offset + symbol_addr + rpnt->r_addend; + break; +#endif + default: + return -1; + } + +log_entry: +#if defined __SUPPORT_LD_DEBUG__ + if (_dl_debug_detail) + _dl_dprintf(_dl_debug_file,"\tpatched: %lx ==> %lx @ %pl: addend %x ", + old_val, *reloc_addr, reloc_addr, rpnt->r_addend); +#endif + + return 0; +} + +static int +_dl_do_lazy_reloc(struct elf_resolve *tpnt, struct r_scope_elem *scope, + ELF_RELOC *rpnt) +{ + int reloc_type; + unsigned long *reloc_addr; +#if defined __SUPPORT_LD_DEBUG__ + unsigned long old_val; +#endif + + reloc_addr = (unsigned long *)(tpnt->loadaddr + rpnt->r_offset); + reloc_type = ELF_R_TYPE(rpnt->r_info); + +#if defined __SUPPORT_LD_DEBUG__ + old_val = *reloc_addr; +#endif + + switch (reloc_type) { + case R_ARC_NONE: + break; + case R_ARC_JMP_SLOT: + *reloc_addr += tpnt->loadaddr; + break; + default: + return -1; + } + +#if defined __SUPPORT_LD_DEBUG__ + if (_dl_debug_reloc && _dl_debug_detail) + _dl_dprintf(_dl_debug_file, "\tpatched: %lx ==> %lx @ %pl\n", + old_val, *reloc_addr, reloc_addr); +#endif + + return 0; +} + +#define ___DO_LAZY 1 +#define ___DO_NOW 2 + +static int _dl_parse(struct elf_resolve *tpnt, struct r_scope_elem *scope, + unsigned long rel_addr, unsigned long rel_size, int type) +{ + unsigned int i; + char *strtab; + ElfW(Sym) *symtab; + ELF_RELOC *rpnt; + int symtab_index; + int res = 0; + + /* Now parse the relocation information */ + rpnt = (ELF_RELOC *)(intptr_t) (rel_addr); + rel_size = rel_size / sizeof(ELF_RELOC); + + symtab = (ElfW(Sym) *)(intptr_t) (tpnt->dynamic_info[DT_SYMTAB]); + strtab = (char *) (tpnt->dynamic_info[DT_STRTAB]); + + for (i = 0; i < rel_size; i++, rpnt++) { + + symtab_index = ELF_R_SYM(rpnt->r_info); + + debug_sym(symtab,strtab,symtab_index); + debug_reloc(symtab,strtab,rpnt); + + /* constant propagation subsumes the 'if' */ + if (type == ___DO_LAZY) + res = _dl_do_lazy_reloc(tpnt, scope, rpnt); + else + res = _dl_do_reloc(tpnt, scope, rpnt, symtab, strtab); + + if (res != 0) + break; + } + + if (unlikely(res != 0)) { + if (res < 0) { + int reloc_type = ELF_R_TYPE(rpnt->r_info); +#if defined __SUPPORT_LD_DEBUG__ + _dl_dprintf(2, "can't handle reloc type %s\n ", + _dl_reltypes(reloc_type)); +#else + _dl_dprintf(2, "can't handle reloc type %x\n", + reloc_type); +#endif + _dl_exit(-res); + } else { + _dl_dprintf(2, "can't resolve symbol\n"); + /* Fall thru to return res */ + } + } + + return res; +} + +void +_dl_parse_lazy_relocation_information(struct dyn_elf *rpnt, + unsigned long rel_addr, + unsigned long rel_size) +{ + /* This func is called for processing .rela.plt of loaded module(s) + * The relo entries handled are JMP_SLOT type for fixing up .got slots + * for external function calls. + * This function doesn't resolve the slots: that is done lazily at + * runtime. The build linker (at least thats what happens for ARC) had + * pre-init the .got slots to point to PLT0. All that is done here is + * to fix them up to point to load value of PLT0 (as opposed to the + * build value). + * On ARC, the loadaddr of dyn exec is zero, thus elfaddr == loadaddr + * Thus there is no point in adding "0" to values and un-necessarily + * stir up the caches and TLB. + * For ldso processing busybox binary, this skips over 380 relo entries + */ + if (rpnt->dyn->loadaddr != 0) + _dl_parse(rpnt->dyn, NULL, rel_addr, rel_size, ___DO_LAZY); +} + +int +_dl_parse_relocation_information(struct dyn_elf *rpnt, + struct r_scope_elem *scope, + unsigned long rel_addr, + unsigned long rel_size) +{ + return _dl_parse(rpnt->dyn, scope, rel_addr, rel_size, ___DO_NOW); +} diff --git a/ldso/ldso/arc/resolve.S b/ldso/ldso/arc/resolve.S new file mode 100644 index 000000000..891f66b97 --- /dev/null +++ b/ldso/ldso/arc/resolve.S @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) + * + * Licensed under the LGPL v2.1 or later, see the file COPYING.LIB in this tarball. + */ + +#include <sysdep.h> +#include <sys/syscall.h> + +; Save the registers which resolver could possibly clobber +; r0-r9: args to the function - symbol being resolved +; r10-r12 are already clobbered by PLTn, PLT0 thus neednot be saved + +.macro SAVE_CALLER_SAVED + push_s r0 + push_s r1 + push_s r2 + push_s r3 + st.a r4, [sp, -4] + st.a r5, [sp, -4] + st.a r6, [sp, -4] + st.a r7, [sp, -4] + st.a r8, [sp, -4] + st.a r9, [sp, -4] + push_s blink +.endm + +.macro RESTORE_CALLER_SAVED_BUT_R0 + ld.ab blink,[sp, 4] + ld.ab r9, [sp, 4] + ld.ab r8, [sp, 4] + ld.ab r7, [sp, 4] + ld.ab r6, [sp, 4] + ld.ab r5, [sp, 4] + ld.ab r4, [sp, 4] + pop_s r3 + pop_s r2 + pop_s r1 +.endm + +; Upon entry, PLTn, which led us here, sets up the following regs +; r11 = Module info (tpnt pointer as expected by resolver) +; r12 = PC of the PLTn itself - needed by resolver to find +; corresponding .rela.plt entry + +ENTRY(_dl_linux_resolve) + ; args to func being resolved, which resolver might clobber + SAVE_CALLER_SAVED + + mov_s r1, r12 + bl.d _dl_linux_resolver + mov r0, r11 + + RESTORE_CALLER_SAVED_BUT_R0 + j_s.d [r0] ; r0 has resolved function addr + pop_s r0 ; restore first arg to resolved call +END(_dl_linux_resolve) diff --git a/ldso/ldso/arm/aeabi_read_tp.S b/ldso/ldso/arm/aeabi_read_tp.S new file mode 100644 index 000000000..77e0d6ecc --- /dev/null +++ b/ldso/ldso/arm/aeabi_read_tp.S @@ -0,0 +1,62 @@ +/* Copyright (C) 2005 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + In addition to the permissions in the GNU Lesser General Public + License, the Free Software Foundation gives you unlimited + permission to link the compiled version of this file with other + programs, and to distribute those programs without any restriction + coming from the use of this file. (The GNU Lesser General Public + License restrictions do apply in other respects; for example, they + cover modification of the file, and distribution when not linked + into another program.) + + Note that people who make modified versions of this file are not + obligated to grant this special exception for their modified + versions; it is their choice whether to do so. The GNU Lesser + General Public License gives permission to release a modified + version without this exception; this exception also makes it + possible to release a modified version which carries forward this + exception. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ + +#include <features.h> + +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ + +#include <sysdep.h> + +/* GCC will emit calls to this routine under -mtp=soft. Linux has an + equivalent helper function (which clobbers fewer registers than + a normal function call) in a high page of memory; tail call to the + helper. + + This function is exported from libc for use by user code. libpthread, librt, + and the dynamic linker get their own private copies, for + performance (and in the case of ld.so, out of necessity); those are + all hidden. */ + +#ifndef NOT_IN_libc + .global __aeabi_read_tp +#else + .hidden __aeabi_read_tp +#endif +ENTRY (__aeabi_read_tp) + mov r0, #0xffff0fff + sub pc, r0, #31 +END (__aeabi_read_tp) + +#endif /* __UCLIBC_HAS_THREADS_NATIVE__ */ + diff --git a/ldso/ldso/arm/dl-debug.h b/ldso/ldso/arm/dl-debug.h index d5103202c..af14eca2d 100644 --- a/ldso/ldso/arm/dl-debug.h +++ b/ldso/ldso/arm/dl-debug.h @@ -27,18 +27,20 @@ * SUCH DAMAGE. */ -static const char *_dl_reltypes_tab[] = +static const char * const _dl_reltypes_tab[] = { [0] "R_ARM_NONE", "R_ARM_PC24", "R_ARM_ABS32", "R_ARM_REL32", [4] "R_ARM_PC13", "R_ARM_ABS16", "R_ARM_ABS12", "R_ARM_THM_ABS5", [8] "R_ARM_ABS8", "R_ARM_SBREL32","R_ARM_THM_PC22", "R_ARM_THM_PC8", [12] "R_ARM_AMP_VCALL9", "R_ARM_SWI24", "R_ARM_THM_SWI8", "R_ARM_XPC25", - [16] "R_ARM_THM_XPC22", + [16] "R_ARM_THM_XPC22", "R_ARM_TLS_DTPMOD32", "R_ARM_TLS_DTPOFF32", "R_ARM_TLS_TPOFF32", [20] "R_ARM_COPY", "R_ARM_GLOB_DAT","R_ARM_JUMP_SLOT", "R_ARM_RELATIVE", [24] "R_ARM_GOTOFF", "R_ARM_GOTPC", "R_ARM_GOT32", "R_ARM_PLT32", [32] "R_ARM_ALU_PCREL_7_0","R_ARM_ALU_PCREL_15_8","R_ARM_ALU_PCREL_23_15","R_ARM_LDR_SBREL_11_0", [36] "R_ARM_ALU_SBREL_19_12","R_ARM_ALU_SBREL_27_20", [100] "R_ARM_GNU_VTENTRY","R_ARM_GNU_VTINHERIT","R_ARM_THM_PC11","R_ARM_THM_PC9", + [104] "R_ARM_TLS_GD32","R_ARM_TLS_LDM32","R_ARM_TLS_LDO32","R_ARM_TLS_IE32", + [108] "R_ARM_TLS_LE32","R_ARM_TLS_LDO12","R_ARM_TLS_LE12","R_ARM_TLS_IE12GP", [249] "R_ARM_RXPC25", "R_ARM_RSBREL32", "R_ARM_THM_RPC22", "R_ARM_RREL32", [253] "R_ARM_RABS22", "R_ARM_RPC24", "R_ARM_RBASE", }; diff --git a/ldso/ldso/arm/dl-startup.h b/ldso/ldso/arm/dl-startup.h index 43985d002..eb2a9a22a 100644 --- a/ldso/ldso/arm/dl-startup.h +++ b/ldso/ldso/arm/dl-startup.h @@ -7,12 +7,14 @@ */ #include <features.h> +#include <bits/arm_bx.h> #if !defined(__thumb__) __asm__( " .text\n" " .globl _start\n" " .type _start,%function\n" + " .hidden _start\n" "_start:\n" " @ at start time, all the args are on the stack\n" " mov r0, sp\n" @@ -45,11 +47,7 @@ __asm__( " ldr r0, .L_FINI_PROC\n" " ldr r0, [sl, r0]\n" " @ jump to the user_s entry point\n" -#if defined(__USE_BX__) - " bx r6\n" -#else - " mov pc, r6\n" -#endif + " " __stringify(BX(r6)) "\n" ".L_GET_GOT:\n" " .word _GLOBAL_OFFSET_TABLE_ - .L_GOT_GOT - 4\n" ".L_SKIP_ARGS:\n" @@ -111,11 +109,7 @@ __asm__( " ldr r0, .L_FINI_PROC\n" " ldr r0, [r7, r0]\n" " @ jump to the user_s entry point\n" -#if defined(__USE_BX__) - " bx r6\n" -#else - " mov pc, r6\n" -#endif + " " __stringify(BX(r6)) "\n" "\n\n" ".L_GET_GOT:\n" " .word _GLOBAL_OFFSET_TABLE_ - .L_GOT_GOT - 4\n" @@ -131,16 +125,16 @@ __asm__( /* Get a pointer to the argv array. On many platforms this can be just - * the address if the first argument, on other platforms we need to + * the address of the first argument, on other platforms we need to * do something a little more subtle here. */ #define GET_ARGV(ARGVP, ARGS) ARGVP = (((unsigned long*)ARGS)+1) /* Handle relocation of the symbols in the dynamic loader. */ -static inline +static __always_inline void PERFORM_BOOTSTRAP_RELOC(ELF_RELOC *rpnt, unsigned long *reloc_addr, unsigned long symbol_addr, unsigned long load_addr, Elf32_Sym *symtab) { - switch (ELF32_R_TYPE(rpnt->r_info)) { + switch (ELF_R_TYPE(rpnt->r_info)) { case R_ARM_NONE: break; case R_ARM_ABS32: @@ -159,7 +153,7 @@ void PERFORM_BOOTSTRAP_RELOC(ELF_RELOC *rpnt, unsigned long *reloc_addr, if (topbits != 0xfe000000 && topbits != 0x00000000) { #if 0 - // Don't bother with this during ldso initilization... + /* Don't bother with this during ldso initilization... */ newvalue = fix_bad_pc24(reloc_addr, symbol_addr) - (unsigned long)reloc_addr + (addend << 2); topbits = newvalue & 0xfe000000; diff --git a/ldso/ldso/arm/dl-syscalls.h b/ldso/ldso/arm/dl-syscalls.h index 1c0e6699e..195461f83 100644 --- a/ldso/ldso/arm/dl-syscalls.h +++ b/ldso/ldso/arm/dl-syscalls.h @@ -1,9 +1,3 @@ -/* We can't use the real errno in ldso, since it has not yet - * been dynamicly linked in yet. */ -#include "sys/syscall.h" -extern int _dl_errno; -#undef __set_errno -#define __set_errno(X) {(_dl_errno) = (X);} /* _call_via_rX calls are used in thumb ldso because of calls via * function pointers, but ldso is not linked with anything which * provides them, so define them here (only required for thumb). diff --git a/ldso/ldso/arm/dl-sysdep.h b/ldso/ldso/arm/dl-sysdep.h index 2f9a37f8f..dc89710c6 100644 --- a/ldso/ldso/arm/dl-sysdep.h +++ b/ldso/ldso/arm/dl-sysdep.h @@ -5,6 +5,9 @@ * Copyright (C) 2000-2004 by Erik Andersen <andersen@codepoet.org> */ +#ifndef _ARCH_DL_SYSDEP +#define _ARCH_DL_SYSDEP + /* Define this if the system uses RELOCA. */ #undef ELF_USES_RELOCA #include <elf.h> @@ -15,7 +18,7 @@ GOT_BASE[1] = (unsigned long) MODULE; \ } -static __inline__ unsigned long arm_modulus(unsigned long m, unsigned long p) +static __always_inline unsigned long arm_modulus(unsigned long m, unsigned long p) { unsigned long i,t,inc; i=p; t=0; @@ -55,24 +58,23 @@ static __inline__ unsigned long arm_modulus(unsigned long m, unsigned long p) struct elf_resolve; unsigned long _dl_linux_resolver(struct elf_resolve * tpnt, int reloc_entry); -/* 4096 bytes alignment */ -#define PAGE_ALIGN 0xfffff000 -#define ADDR_ALIGN 0xfff -#define OFFS_ALIGN 0x7ffff000 +/* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry or + TLS variable, so undefined references should not be allowed to + define the value. -/* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry, so - PLT entries should not be allowed to define the value. ELF_RTYPE_CLASS_NOCOPY iff TYPE should not be allowed to resolve to one of the main executable's symbols, as for a COPY reloc. */ -#define elf_machine_type_class(type) \ - ((((type) == R_ARM_JUMP_SLOT) * ELF_RTYPE_CLASS_PLT) \ +#define elf_machine_type_class(type) \ + ((((type) == R_ARM_JUMP_SLOT || (type) == R_ARM_TLS_DTPMOD32 \ + || (type) == R_ARM_TLS_DTPOFF32 || (type) == R_ARM_TLS_TPOFF32) \ + * ELF_RTYPE_CLASS_PLT) \ | (((type) == R_ARM_COPY) * ELF_RTYPE_CLASS_COPY)) /* Return the link-time address of _DYNAMIC. Conveniently, this is the first element of the GOT. We used to use the PIC register to do this without a constant pool reference, but GCC 4.2 will use a pseudo-register for the PIC base, so it may not be in r10. */ -static __inline__ Elf32_Addr __attribute__ ((unused)) +static __always_inline Elf32_Addr __attribute__ ((unused)) elf_machine_dynamic (void) { Elf32_Addr dynamic; @@ -103,11 +105,12 @@ elf_machine_dynamic (void) return dynamic; } +extern char __dl_start[] __asm__("_dl_start"); + /* Return the run-time load address of the shared object. */ -static __inline__ Elf32_Addr __attribute__ ((unused)) +static __always_inline Elf32_Addr __attribute__ ((unused)) elf_machine_load_address (void) { - extern void __dl_start __asm__ ("_dl_start"); Elf32_Addr got_addr = (Elf32_Addr) &__dl_start; Elf32_Addr pcrel_addr; #if defined __OPTIMIZE__ && !defined __thumb__ @@ -128,7 +131,7 @@ elf_machine_load_address (void) return pcrel_addr - got_addr; } -static __inline__ void +static __always_inline void elf_machine_relative (Elf32_Addr load_off, const Elf32_Addr rel_addr, Elf32_Word relative_count) { @@ -140,3 +143,8 @@ elf_machine_relative (Elf32_Addr load_off, const Elf32_Addr rel_addr, *reloc_addr += load_off; } while (--relative_count); } +#endif /* !_ARCH_DL_SYSDEP */ + +#ifdef __ARM_EABI__ +#define DL_MALLOC_ALIGN 8 /* EABI needs 8 byte alignment for STRD LDRD */ +#endif diff --git a/ldso/ldso/arm/elfinterp.c b/ldso/ldso/arm/elfinterp.c index 37531126a..2043263ec 100644 --- a/ldso/ldso/arm/elfinterp.c +++ b/ldso/ldso/arm/elfinterp.c @@ -44,50 +44,42 @@ extern int _dl_linux_resolve(void); unsigned long _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry) { - int reloc_type; ELF_RELOC *this_reloc; char *strtab; char *symname; - Elf32_Sym *symtab; + ElfW(Sym) *symtab; ELF_RELOC *rel_addr; int symtab_index; - char *new_addr; + unsigned long new_addr; char **got_addr; unsigned long instr_addr; rel_addr = (ELF_RELOC *) tpnt->dynamic_info[DT_JMPREL]; this_reloc = rel_addr + reloc_entry; - reloc_type = ELF32_R_TYPE(this_reloc->r_info); - symtab_index = ELF32_R_SYM(this_reloc->r_info); + symtab_index = ELF_R_SYM(this_reloc->r_info); - symtab = (Elf32_Sym *) tpnt->dynamic_info[DT_SYMTAB]; + symtab = (ElfW(Sym) *) tpnt->dynamic_info[DT_SYMTAB]; strtab = (char *) tpnt->dynamic_info[DT_STRTAB]; symname = strtab + symtab[symtab_index].st_name; - if (unlikely(reloc_type != R_ARM_JUMP_SLOT)) { - _dl_dprintf(2, "%s: Incorrect relocation type in jump relocations\n", - _dl_progname); - _dl_exit(1); - } - /* Address of jump instruction to fix up */ instr_addr = ((unsigned long) this_reloc->r_offset + (unsigned long) tpnt->loadaddr); got_addr = (char **) instr_addr; /* Get the address of the GOT entry */ - new_addr = _dl_find_hash(symname, tpnt->symbol_scope, - tpnt, ELF_RTYPE_CLASS_PLT); + new_addr = (unsigned long)_dl_find_hash(symname, &_dl_loaded_modules->symbol_scope, + tpnt, ELF_RTYPE_CLASS_PLT, NULL); if (unlikely(!new_addr)) { _dl_dprintf(2, "%s: can't resolve symbol '%s'\n", _dl_progname, symname); _dl_exit(1); } #if defined (__SUPPORT_LD_DEBUG__) -#if !defined __SUPPORT_LD_DEBUG_EARLY__ +# if !defined __SUPPORT_LD_DEBUG_EARLY__ if ((unsigned long) got_addr < 0x40000000) -#endif +# endif { if (_dl_debug_bindings) { @@ -97,25 +89,25 @@ unsigned long _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry) } } if (!_dl_debug_nofixups) { - *got_addr = new_addr; + *got_addr = (char *)new_addr; } #else - *got_addr = new_addr; + *got_addr = (char *)new_addr; #endif - return (unsigned long) new_addr; + return new_addr; } static int -_dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, +_dl_parse(struct elf_resolve *tpnt, struct r_scope_elem *scope, unsigned long rel_addr, unsigned long rel_size, - int (*reloc_fnc) (struct elf_resolve *tpnt, struct dyn_elf *scope, - ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab)) + int (*reloc_fnc) (struct elf_resolve *tpnt, struct r_scope_elem *scope, + ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab)) { int i; char *strtab; int goof = 0; - Elf32_Sym *symtab; + ElfW(Sym) *symtab; ELF_RELOC *rpnt; int symtab_index; @@ -123,13 +115,13 @@ _dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, rpnt = (ELF_RELOC *) rel_addr; rel_size = rel_size / sizeof(ELF_RELOC); - symtab = (Elf32_Sym *) tpnt->dynamic_info[DT_SYMTAB]; + symtab = (ElfW(Sym) *) tpnt->dynamic_info[DT_SYMTAB]; strtab = (char *) tpnt->dynamic_info[DT_STRTAB]; for (i = 0; i < rel_size; i++, rpnt++) { int res; - symtab_index = ELF32_R_SYM(rpnt->r_info); + symtab_index = ELF_R_SYM(rpnt->r_info); debug_sym(symtab,strtab,symtab_index); debug_reloc(symtab,strtab,rpnt); @@ -145,7 +137,7 @@ _dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, if (unlikely(res <0)) { - int reloc_type = ELF32_R_TYPE(rpnt->r_info); + int reloc_type = ELF_R_TYPE(rpnt->r_info); #if defined (__SUPPORT_LD_DEBUG__) _dl_dprintf(2, "can't handle reloc type %s\n ", _dl_reltypes(reloc_type)); #else @@ -189,35 +181,55 @@ fix_bad_pc24 (unsigned long *const reloc_addr, unsigned long value) #endif static int -_dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, - ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab) +_dl_do_reloc (struct elf_resolve *tpnt,struct r_scope_elem *scope, + ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab) { int reloc_type; int symtab_index; + char *symname; unsigned long *reloc_addr; unsigned long symbol_addr; + struct symbol_ref sym_ref; + struct elf_resolve *def_mod = 0; int goof = 0; reloc_addr = (unsigned long *) (tpnt->loadaddr + (unsigned long) rpnt->r_offset); - reloc_type = ELF32_R_TYPE(rpnt->r_info); - symtab_index = ELF32_R_SYM(rpnt->r_info); + + reloc_type = ELF_R_TYPE(rpnt->r_info); + symtab_index = ELF_R_SYM(rpnt->r_info); symbol_addr = 0; + sym_ref.sym = &symtab[symtab_index]; + sym_ref.tpnt = NULL; + symname = strtab + symtab[symtab_index].st_name; if (symtab_index) { - - symbol_addr = (unsigned long) _dl_find_hash(strtab + symtab[symtab_index].st_name, - scope, tpnt, elf_machine_type_class(reloc_type)); + symbol_addr = (unsigned long)_dl_find_hash(symname, scope, tpnt, + elf_machine_type_class(reloc_type), &sym_ref); /* * We want to allow undefined references to weak symbols - this might * have been intentional. We should not be linking local symbols * here, so all bases should be covered. */ - if (!symbol_addr && ELF32_ST_BIND(symtab[symtab_index].st_info) != STB_WEAK) { - _dl_dprintf (2, "%s: can't resolve symbol '%s'\n", - _dl_progname, strtab + symtab[symtab_index].st_name); - _dl_exit (1); + if (!symbol_addr && (ELF_ST_TYPE(symtab[symtab_index].st_info) != STT_TLS) + && (ELF_ST_BIND(symtab[symtab_index].st_info) != STB_WEAK)) { + /* This may be non-fatal if called from dlopen. */ + return 1; + + } + if (_dl_trace_prelink) { + _dl_debug_lookup (symname, tpnt, &symtab[symtab_index], + &sym_ref, elf_machine_type_class(reloc_type)); } + def_mod = sym_ref.tpnt; + } else { + /* + * Relocs against STN_UNDEF are usually treated as using a + * symbol value of zero, and using the module containing the + * reloc itself. + */ + symbol_addr = symtab[symtab_index].st_value; + def_mod = tpnt; } #if defined (__SUPPORT_LD_DEBUG__) @@ -273,6 +285,20 @@ _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, _dl_memcpy((void *) reloc_addr, (void *) symbol_addr, symtab[symtab_index].st_size); break; +#if defined USE_TLS && USE_TLS + case R_ARM_TLS_DTPMOD32: + *reloc_addr = def_mod->l_tls_modid; + break; + + case R_ARM_TLS_DTPOFF32: + *reloc_addr += symbol_addr; + break; + + case R_ARM_TLS_TPOFF32: + CHECK_STATIC_TLS ((struct link_map *) def_mod); + *reloc_addr += (symbol_addr + def_mod->l_tls_offset); + break; +#endif default: return -1; /*call _dl_exit(1) */ } @@ -287,14 +313,14 @@ _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, } static int -_dl_do_lazy_reloc (struct elf_resolve *tpnt, struct dyn_elf *scope, - ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab) +_dl_do_lazy_reloc (struct elf_resolve *tpnt, struct r_scope_elem *scope, + ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab) { int reloc_type; unsigned long *reloc_addr; reloc_addr = (unsigned long *) (tpnt->loadaddr + (unsigned long) rpnt->r_offset); - reloc_type = ELF32_R_TYPE(rpnt->r_info); + reloc_type = ELF_R_TYPE(rpnt->r_info); #if defined (__SUPPORT_LD_DEBUG__) { @@ -326,8 +352,8 @@ void _dl_parse_lazy_relocation_information(struct dyn_elf *rpnt, } int _dl_parse_relocation_information(struct dyn_elf *rpnt, - unsigned long rel_addr, unsigned long rel_size) + struct r_scope_elem *scope, unsigned long rel_addr, unsigned long rel_size) { - return _dl_parse(rpnt->dyn, rpnt->dyn->symbol_scope, rel_addr, rel_size, _dl_do_reloc); + return _dl_parse(rpnt->dyn, scope, rel_addr, rel_size, _dl_do_reloc); } diff --git a/ldso/ldso/arm/resolve.S b/ldso/ldso/arm/resolve.S index b422c334d..7e0058e0d 100644 --- a/ldso/ldso/arm/resolve.S +++ b/ldso/ldso/arm/resolve.S @@ -90,14 +90,17 @@ * dl-startup.c). */ -#include <sys/syscall.h> +#include <features.h> #include <bits/arm_asm.h> +#include <bits/arm_bx.h> -#include <features.h> +#define sl r10 +#define fp r11 +#define ip r12 .text .align 4 @ 16 byte boundary and there are 32 bytes below (arm case) - #if !defined(__thumb__) || defined(__thumb2__) +#if 1 /*(!defined(__thumb__) || defined __THUMB_INTERWORK__) || defined(__thumb2__)*/ .arm .globl _dl_linux_resolve .type _dl_linux_resolve,%function @@ -109,8 +112,8 @@ _dl_linux_resolve: @ function must branch to the real function, and that expects @ r0-r3 and lr to be as they were before the whole PLT stuff - @ ip can be trashed. - @ This routine is called after pushing lr, so we must push an odd - @ number of words to keep the stack correctly aligned. + @ This routine is called after pushing lr, so we must push an odd + @ number of words to keep the stack correctly aligned. stmdb sp!, {r0, r1, r2, r3, r4} ldr r0, [lr, #-4] @ r0 : = [lr-4] (GOT_TABLE[1]) @@ -119,16 +122,12 @@ _dl_linux_resolve: @ ~x = -x-1, therefore ~(r1>>2) = (-((lr-ip)>>2)-1) @ = - ((lr-ip)/4) - 1 = (ip - lr - 4)/4, as required - bl _dl_linux_resolver + bl _dl_linux_resolver - mov ip, r0 + mov ip, r0 ldmia sp!, {r0, r1, r2, r3, r4, lr} -#if defined(__USE_BX__) - bx ip -#else - mov pc,ip -#endif + BX(ip) #else @ In the thumb case _dl_linux_resolver is thumb. If a bl is used @ from arm code the linker will insert a stub call which, with diff --git a/ldso/ldso/arm/thumb_atomics.S b/ldso/ldso/arm/thumb_atomics.S new file mode 100644 index 000000000..f88da2b9c --- /dev/null +++ b/ldso/ldso/arm/thumb_atomics.S @@ -0,0 +1,78 @@ +/* Copyright (C) 2006 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + In addition to the permissions in the GNU Lesser General Public + License, the Free Software Foundation gives you unlimited + permission to link the compiled version of this file with other + programs, and to distribute those programs without any restriction + coming from the use of this file. (The GNU Lesser General Public + License restrictions do apply in other respects; for example, they + cover modification of the file, and distribution when not linked + into another program.) + + Note that people who make modified versions of this file are not + obligated to grant this special exception for their modified + versions; it is their choice whether to do so. The GNU Lesser + General Public License gives permission to release a modified + version without this exception; this exception also makes it + possible to release a modified version which carries forward this + exception. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ + +#include <features.h> + +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ + +#include <sysdep.h> + +#if defined __thumb__ + +/* Out-of-line atomic operations that we can't do in Thumb mode. + This ends up in various libraries where it is needed (and + a few .a archives where it isn't). */ + + .hidden __thumb_swpb +ENTRY (__thumb_swpb) + swpb r0, r0, [r1] + bx lr +END (__thumb_swpb) + + .hidden __thumb_swp +ENTRY (__thumb_swp) + swp r0, r0, [r1] + bx lr +END (__thumb_swp) + + .hidden __thumb_cmpxchg +ENTRY (__thumb_cmpxchg) + stmdb sp!, {r4, lr} + mov r4, r0 +0: ldr r3, [r2] + cmp r3, r4 + bne 1f + mov r0, r4 + mov r3, #0xffff0fff + mov lr, pc + add pc, r3, #(0xffff0fc0 - 0xffff0fff) + bcc 0b + mov r3, r4 +1: mov r0, r3 + ldmia sp!, {r4, pc} +END (__thumb_cmpxchg) + +#endif /* __thumb__ */ +#endif /* __UCLIBC_HAS_THREADS_NATIVE__ */ + diff --git a/ldso/ldso/avr32/dl-debug.h b/ldso/ldso/avr32/dl-debug.h index fe35539f6..44b0c01da 100644 --- a/ldso/ldso/avr32/dl-debug.h +++ b/ldso/ldso/avr32/dl-debug.h @@ -26,7 +26,7 @@ * SUCH DAMAGE. */ -static const char *_dl_reltypes_tab[] = { +static const char * const _dl_reltypes_tab[] = { "R_AVR32_NONE", "R_AVR32_32", "R_AVR32_16", "R_AVR32_8", "R_AVR32_32_PCREL", "R_AVR32_16_PCREL", "R_AVR32_8_PCREL", diff --git a/ldso/ldso/avr32/dl-startup.h b/ldso/ldso/avr32/dl-startup.h index 3b8bf4ce2..e49101955 100644 --- a/ldso/ldso/avr32/dl-startup.h +++ b/ldso/ldso/avr32/dl-startup.h @@ -12,6 +12,7 @@ __asm__(" .text\n" " .global _start\n" " .type _start,@function\n" + " .hidden _start\n" "_start:\n" /* All arguments are on the stack initially */ " mov r12, sp\n" @@ -41,7 +42,7 @@ __asm__(" .text\n" " .previous\n"); /* Get a pointer to the argv array. On many platforms this can be just - * the address if the first argument, on other platforms we need to + * the address of the first argument, on other platforms we need to * do something a little more subtle here. */ #define GET_ARGV(ARGVP, ARGS) ARGVP = ((unsigned long *)ARGS + 1) @@ -75,7 +76,7 @@ void PERFORM_BOOTSTRAP_RELOC(ELF_RELOC *rpnt, unsigned long *reloc_addr, unsigned long symbol_addr, unsigned long load_addr, Elf32_Sym *symtab) { - switch(ELF32_R_TYPE(rpnt->r_info)) { + switch(ELF_R_TYPE(rpnt->r_info)) { case R_AVR32_NONE: break; case R_AVR32_GLOB_DAT: @@ -91,7 +92,7 @@ void PERFORM_BOOTSTRAP_RELOC(ELF_RELOC *rpnt, unsigned long *reloc_addr, break; default: SEND_STDERR("BOOTSTRAP_RELOC: unhandled reloc_type "); - SEND_NUMBER_STDERR(ELF32_R_TYPE(rpnt->r_info), 1); + SEND_NUMBER_STDERR(ELF_R_TYPE(rpnt->r_info), 1); SEND_STDERR("REL, SYMBOL, LOAD: "); SEND_ADDRESS_STDERR(reloc_addr, 0); SEND_STDERR(", "); @@ -101,12 +102,3 @@ void PERFORM_BOOTSTRAP_RELOC(ELF_RELOC *rpnt, unsigned long *reloc_addr, _dl_exit(1); } } - -/* Transfer control to the user's application, once the dynamic loader - * is done. This routine has to exit the current function, then call - * the _dl_elf_main function. - * - * Since our _dl_boot will simply call whatever is returned by - * _dl_boot2, we can just return the address we're supposed to - * call. */ -#define START() return _dl_elf_main; diff --git a/ldso/ldso/avr32/dl-syscalls.h b/ldso/ldso/avr32/dl-syscalls.h index 996bb87c6..f40c4fd31 100644 --- a/ldso/ldso/avr32/dl-syscalls.h +++ b/ldso/ldso/avr32/dl-syscalls.h @@ -1,6 +1 @@ -/* We can't use the real errno in ldso, since it has not yet - * been dynamicly linked in yet. */ -#include "sys/syscall.h" -extern int _dl_errno; -#undef __set_errno -#define __set_errno(X) {(_dl_errno) = (X);} +/* stub for arch-specific syscall issues */ diff --git a/ldso/ldso/avr32/dl-sysdep.h b/ldso/ldso/avr32/dl-sysdep.h index 5ee110101..a42212731 100644 --- a/ldso/ldso/avr32/dl-sysdep.h +++ b/ldso/ldso/avr32/dl-sysdep.h @@ -24,19 +24,17 @@ /* Initialization sequence for the application/library GOT. */ #define INIT_GOT(GOT_BASE,MODULE) \ do { \ - unsigned long i, nr_got; \ + unsigned long _i, _nr_got; \ \ GOT_BASE[0] = (unsigned long) _dl_linux_resolve; \ GOT_BASE[1] = (unsigned long) MODULE; \ \ /* Add load address displacement to all GOT entries */ \ - nr_got = MODULE->dynamic_info[DT_AVR32_GOTSZ_IDX] / 4; \ - for (i = 2; i < nr_got; i++) \ - GOT_BASE[i] += (unsigned long)MODULE->loadaddr; \ + _nr_got = MODULE->dynamic_info[DT_AVR32_GOTSZ_IDX] / 4; \ + for (_i = 2; _i < _nr_got; _i++) \ + GOT_BASE[_i] += (unsigned long)MODULE->loadaddr; \ } while (0) -#define do_rem(result, n, base) ((result) = (n) % (base)) - /* Here we define the magic numbers that this dynamic loader should accept */ #define MAGIC1 EM_AVR32 #undef MAGIC2 @@ -44,12 +42,10 @@ /* Used for error messages */ #define ELF_TARGET "AVR32" -unsigned long _dl_linux_resolver(unsigned long got_offset, unsigned long *got); +/* Need bootstrap relocations */ +#define ARCH_NEEDS_BOOTSTRAP_RELOCS -/* 4096 bytes alignment */ -#define PAGE_ALIGN 0xfffff000 -#define ADDR_ALIGN 0xfff -#define OFFS_ALIGN 0x7ffff000 +unsigned long _dl_linux_resolver(unsigned long got_offset, unsigned long *got); #define elf_machine_type_class(type) \ ((type == R_AVR32_JMP_SLOT) * ELF_RTYPE_CLASS_PLT) @@ -60,18 +56,19 @@ unsigned long _dl_linux_resolver(unsigned long got_offset, unsigned long *got); /* Return the link-time address of _DYNAMIC. Conveniently, this is the first element of the GOT. This must be inlined in a function which uses global data. */ -static __inline__ Elf32_Addr +static __always_inline Elf32_Addr elf_machine_dynamic (void) { register Elf32_Addr *got __asm__("r6"); return *got; } +extern char __dl_start[] __asm__("_dl_start"); + /* Return the run-time load address of the shared object. */ -static __inline__ Elf32_Addr +static __always_inline Elf32_Addr elf_machine_load_address (void) { - extern void __dl_start __asm__("_dl_start"); Elf32_Addr got_addr = (Elf32_Addr) &__dl_start; Elf32_Addr pcrel_addr; @@ -91,7 +88,7 @@ elf_machine_load_address (void) * Currently, we don't use that tag, but we might in the future as * this would reduce the startup time somewhat (although probably not by much). */ -static __inline__ void +static __always_inline void elf_machine_relative (Elf32_Addr load_off, const Elf32_Addr rel_addr, Elf32_Word relative_count) { diff --git a/ldso/ldso/avr32/elfinterp.c b/ldso/ldso/avr32/elfinterp.c index 813179e64..17f34fae8 100644 --- a/ldso/ldso/avr32/elfinterp.c +++ b/ldso/ldso/avr32/elfinterp.c @@ -34,7 +34,7 @@ unsigned long _dl_linux_resolver(unsigned long got_offset, unsigned long *got) */ #if 0 struct elf_resolve *tpnt = (struct elf_resolve *)got[1]; - Elf32_Sym *sym; + ElfW(Sym) *sym; unsigned long local_gotno; unsigned long gotsym; unsigned long new_addr; @@ -45,14 +45,14 @@ unsigned long _dl_linux_resolver(unsigned long got_offset, unsigned long *got) local_gotno = tpnt->dynamic_info[DT_AVR32_LOCAL_GOTNO]; gotsym = tpnt->dynamic_info[DT_AVR32_GOTSYM]; - sym = ((Elf32_Sym *)(tpnt->dynamic_info[DT_SYMTAB] + tpnt->loadaddr)) + sym = ((ElfW(Sym) *)(tpnt->dynamic_info[DT_SYMTAB] + tpnt->loadaddr)) + sym_index; strtab = (char *)(tpnt->dynamic_info[DT_STRTAB] + tpnt->loadaddr); symname = strtab + sym->st_name; - new_addr = (unsigned long) _dl_find_hash(strtab + sym->st_name, - tpnt->symbol_scope, tpnt, - resolver); + new_addr = (unsigned long) _dl_find_hash(symname, + &_dl_loaded_modules->symbol_scope, tpnt, + ELF_RTYPE_CLASS_PLT, NULL); entry = (unsigned long *)(got + local_gotno + sym_index - gotsym); *entry = new_addr; @@ -63,25 +63,25 @@ unsigned long _dl_linux_resolver(unsigned long got_offset, unsigned long *got) } static int -_dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, +_dl_parse(struct elf_resolve *tpnt, struct r_scope_elem *scope, unsigned long rel_addr, unsigned long rel_size, - int (*reloc_func)(struct elf_resolve *tpnt, struct dyn_elf *scope, - Elf32_Rela *rpnt, Elf32_Sym *symtab, char *strtab)) + int (*reloc_func)(struct elf_resolve *tpnt, struct r_scope_elem *scope, + ElfW(Rela) *rpnt, ElfW(Sym) *symtab, char *strtab)) { - Elf32_Sym *symtab; - Elf32_Rela *rpnt; + ElfW(Sym) *symtab; + ElfW(Rela) *rpnt; char *strtab; int i; - rpnt = (Elf32_Rela *)rel_addr; - rel_size /= sizeof(Elf32_Rela); - symtab = (Elf32_Sym *)tpnt->dynamic_info[DT_SYMTAB]; + rpnt = (ElfW(Rela) *)rel_addr; + rel_size /= sizeof(ElfW(Rela)); + symtab = (ElfW(Sym) *)tpnt->dynamic_info[DT_SYMTAB]; strtab = (char *)tpnt->dynamic_info[DT_STRTAB]; for (i = 0; i < rel_size; i++, rpnt++) { int symtab_index, res; - symtab_index = ELF32_R_SYM(rpnt->r_info); + symtab_index = ELF_R_SYM(rpnt->r_info); debug_sym(symtab, strtab, symtab_index); debug_reloc(symtab, strtab, rpnt); @@ -98,7 +98,7 @@ _dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, strtab + symtab[symtab_index].st_name); if (res < 0) { - int reloc_type = ELF32_R_TYPE(rpnt->r_info); + int reloc_type = ELF_R_TYPE(rpnt->r_info); #if defined(__SUPPORT_LD_DEBUG__) _dl_dprintf(2, "can't handle reloc type %s\n", _dl_reltypes(reloc_type)); @@ -116,8 +116,8 @@ _dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, return 0; } -static int _dl_do_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, - Elf32_Rela *rpnt, Elf32_Sym *symtab, char *strtab) +static int _dl_do_reloc(struct elf_resolve *tpnt, struct r_scope_elem *scope, + ElfW(Rela) *rpnt, ElfW(Sym) *symtab, char *strtab) { int reloc_type; int symtab_index; @@ -127,26 +127,32 @@ static int _dl_do_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, #if defined(__SUPPORT_LD_DEBUG__) unsigned long old_val; #endif + struct symbol_ref sym_ref; reloc_addr = (unsigned long *)(tpnt->loadaddr + rpnt->r_offset); - reloc_type = ELF32_R_TYPE(rpnt->r_info); - symtab_index = ELF32_R_SYM(rpnt->r_info); + reloc_type = ELF_R_TYPE(rpnt->r_info); + symtab_index = ELF_R_SYM(rpnt->r_info); symbol_addr = 0; + sym_ref.sym = &symtab[symtab_index]; + sym_ref.tpnt = NULL; symname = strtab + symtab[symtab_index].st_name; if (symtab_index) { symbol_addr = (unsigned long) - _dl_find_hash(strtab + symtab[symtab_index].st_name, - tpnt->symbol_scope, tpnt, - elf_machine_type_class(reloc_type)); + _dl_find_hash(symname, scope, tpnt, + elf_machine_type_class(reloc_type), &sym_ref); /* Allow undefined references to weak symbols */ if (!symbol_addr && - ELF32_ST_BIND(symtab[symtab_index].st_info) != STB_WEAK) { + ELF_ST_BIND(symtab[symtab_index].st_info) != STB_WEAK) { _dl_dprintf(2, "%s: can't resolve symbol '%s'\n", _dl_progname, symname); return 0; } + if (_dl_trace_prelink) { + _dl_debug_lookup (symname, tpnt, &symtab[symtab_index], + &sym_ref, elf_machine_type_class(reloc_type)); + } } #if defined(__SUPPORT_LD_DEBUG__) @@ -185,9 +191,10 @@ void _dl_parse_lazy_relocation_information(struct dyn_elf *rpnt, } int _dl_parse_relocation_information(struct dyn_elf *rpnt, + struct r_scope_elem *scope, unsigned long rel_addr, unsigned long rel_size) { - return _dl_parse(rpnt->dyn, rpnt->dyn->symbol_scope, rel_addr, rel_size, + return _dl_parse(rpnt->dyn, scope, rel_addr, rel_size, _dl_do_reloc); } diff --git a/ldso/ldso/bfin/dl-debug.h b/ldso/ldso/bfin/dl-debug.h index 9dd316240..6952b6160 100644 --- a/ldso/ldso/bfin/dl-debug.h +++ b/ldso/ldso/bfin/dl-debug.h @@ -29,18 +29,18 @@ * SUCH DAMAGE. */ -static const char *_dl_reltypes_tab[] = +static const char * const _dl_reltypes_tab[] = { - [0] "R_BFIN_unused0", "R_BFIN_pcrel5m2", - [2] "R_BFIN_unused1", "R_BFIN_pcrel10", - [4] "R_BFIN_pcrel12_jump", "R_BFIN_rimm16", - [6] "R_BFIN_luimm16", "R_BFIN_huimm16", - [8] "R_BFIN_pcrel12_jump_s","R_BFIN_pcrel24_jump_x", - [10] "R_BFIN_pcrel24", "R_BFIN_unusedb", - [12] "R_BFIN_unusedc", "R_BFIN_pcrel24_jump_l", - [14] "R_BFIN_pcrel24_call_x","R_BFIN_var_eq_symb", - [16] "R_BFIN_byte_data", "R_BFIN_byte2_data", "R_BFIN_byte4_data", - [19] "R_BFIN_pcrel11", + [0] "R_BFIN_UNUSED0", "R_BFIN_PCREL5M2", + [2] "R_BFIN_UNUSED1", "R_BFIN_PCREL10", + [4] "R_BFIN_PCREL12_JUMP", "R_BFIN_RIMM16", + [6] "R_BFIN_LUIMM16", "R_BFIN_HUIMM16", + [8] "R_BFIN_PCREL12_JUMP_S","R_BFIN_PCREL24_JUMP_X", + [10] "R_BFIN_PCREL24", "R_BFIN_UNUSEDB", + [12] "R_BFIN_UNUSEDC", "R_BFIN_PCREL24_JUMP_L", + [14] "R_BFIN_PCREL24_CALL_X","R_BFIN_var_eq_symb", + [16] "R_BFIN_BYTE_DATA", "R_BFIN_BYTE2_DATA", "R_BFIN_BYTE4_DATA", + [19] "R_BFIN_PCREL11", [20] "R_BFIN_GOT17M4", "R_BFIN_GOTHI", "R_BFIN_GOTLO", [23] "R_BFIN_FUNCDESC", diff --git a/ldso/ldso/bfin/dl-inlines.h b/ldso/ldso/bfin/dl-inlines.h index f8b8f85f1..b08ce61cb 100644 --- a/ldso/ldso/bfin/dl-inlines.h +++ b/ldso/ldso/bfin/dl-inlines.h @@ -1,574 +1,146 @@ - /* Copyright (C) 2003, 2004 Red Hat, Inc. - Contributed by Alexandre Oliva <aoliva@redhat.com> - -This file is part of uClibc. - -uClibc is free software; you can redistribute it and/or modify it -under the terms of the GNU Lesser General Public License as -published by the Free Software Foundation; either version 2.1 of the -License, or (at your option) any later version. - -uClibc is distributed in the hope that it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -Library General Public License for more details. - -You should have received a copy of the GNU Lesser General Public -License along with uClibc; see the file COPYING.LIB. If not, write to -the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, -USA. */ +/* Copyright (C) 2003, 2004 Red Hat, Inc. + * Contributed by Alexandre Oliva <aoliva@redhat.com> + * Copyright (C) 2006-2011 Analog Devices, Inc. + * + * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball. + */ #include <bfin_sram.h> -#ifndef _dl_assert -# define _dl_assert(expr) -#endif - -/* Initialize a DL_LOADADDR_TYPE given a got pointer and a complete - load map. */ -inline static void -__dl_init_loadaddr_map (struct elf32_fdpic_loadaddr *loadaddr, Elf32_Addr dl_boot_got_pointer, - struct elf32_fdpic_loadmap *map) -{ - if (map->version != 0) - { - SEND_EARLY_STDERR ("Invalid loadmap version number\n"); - _dl_exit(-1); - } - if (map->nsegs == 0) - { - SEND_EARLY_STDERR ("Invalid segment count in loadmap\n"); - _dl_exit(-1); - } - loadaddr->got_value = dl_boot_got_pointer; - loadaddr->map = map; -} - -/* Figure out how many LOAD segments there are in the given headers, - and allocate a block for the load map big enough for them. - got_value will be properly initialized later on, with INIT_GOT. */ -inline static int -__dl_init_loadaddr (struct elf32_fdpic_loadaddr *loadaddr, Elf32_Phdr *ppnt, - int pcnt) -{ - int count = 0, i; - size_t size; - - for (i = 0; i < pcnt; i++) - if (ppnt[i].p_type == PT_LOAD) - count++; - - loadaddr->got_value = 0; - - size = sizeof (struct elf32_fdpic_loadmap) - + sizeof (struct elf32_fdpic_loadseg) * count; - loadaddr->map = _dl_malloc (size); - if (! loadaddr->map) - _dl_exit (-1); - - loadaddr->map->version = 0; - loadaddr->map->nsegs = 0; - - return count; -} - -/* Incrementally initialize a load map. */ -inline static void -__dl_init_loadaddr_hdr (struct elf32_fdpic_loadaddr loadaddr, void *addr, - Elf32_Phdr *phdr, int maxsegs) -{ - struct elf32_fdpic_loadseg *segdata; - - if (loadaddr.map->nsegs == maxsegs) - _dl_exit (-1); - - segdata = &loadaddr.map->segs[loadaddr.map->nsegs++]; - segdata->addr = (Elf32_Addr) addr; - segdata->p_vaddr = phdr->p_vaddr; - segdata->p_memsz = phdr->p_memsz; - -#if defined (__SUPPORT_LD_DEBUG__) - { - extern char *_dl_debug; - extern int _dl_debug_file; - if (_dl_debug) - _dl_dprintf(_dl_debug_file, "%i: mapped %x at %x, size %x\n", - loadaddr.map->nsegs-1, - segdata->p_vaddr, segdata->addr, segdata->p_memsz); - } -#endif -} - -inline static void __dl_loadaddr_unmap -(struct elf32_fdpic_loadaddr loadaddr, struct funcdesc_ht *funcdesc_ht); - -/* Figure out whether the given address is in one of the mapped - segments. */ -inline static int -__dl_addr_in_loadaddr (void *p, struct elf32_fdpic_loadaddr loadaddr) -{ - struct elf32_fdpic_loadmap *map = loadaddr.map; - int c; - - for (c = 0; c < map->nsegs; c++) - if ((void*)map->segs[c].addr <= p - && (char*)p < (char*)map->segs[c].addr + map->segs[c].p_memsz) - return 1; - - return 0; -} - -inline static void * _dl_funcdesc_for (void *entry_point, void *got_value); - -/* The hashcode handling code below is heavily inspired in libiberty's - hashtab code, but with most adaptation points and support for - deleting elements removed. - - Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc. - Contributed by Vladimir Makarov (vmakarov@cygnus.com). */ - -inline static unsigned long -higher_prime_number (unsigned long n) -{ - /* These are primes that are near, but slightly smaller than, a - power of two. */ - static const unsigned long primes[] = { - (unsigned long) 7, - (unsigned long) 13, - (unsigned long) 31, - (unsigned long) 61, - (unsigned long) 127, - (unsigned long) 251, - (unsigned long) 509, - (unsigned long) 1021, - (unsigned long) 2039, - (unsigned long) 4093, - (unsigned long) 8191, - (unsigned long) 16381, - (unsigned long) 32749, - (unsigned long) 65521, - (unsigned long) 131071, - (unsigned long) 262139, - (unsigned long) 524287, - (unsigned long) 1048573, - (unsigned long) 2097143, - (unsigned long) 4194301, - (unsigned long) 8388593, - (unsigned long) 16777213, - (unsigned long) 33554393, - (unsigned long) 67108859, - (unsigned long) 134217689, - (unsigned long) 268435399, - (unsigned long) 536870909, - (unsigned long) 1073741789, - (unsigned long) 2147483647, - /* 4294967291L */ - ((unsigned long) 2147483647) + ((unsigned long) 2147483644), - }; - - const unsigned long *low = &primes[0]; - const unsigned long *high = &primes[sizeof(primes) / sizeof(primes[0])]; - - while (low != high) - { - const unsigned long *mid = low + (high - low) / 2; - if (n > *mid) - low = mid + 1; - else - high = mid; - } - -#if 0 - /* If we've run out of primes, abort. */ - if (n > *low) - { - fprintf (stderr, "Cannot find prime bigger than %lu\n", n); - abort (); - } -#endif - - return *low; -} - -struct funcdesc_ht -{ - /* Table itself. */ - struct funcdesc_value **entries; - - /* Current size (in entries) of the hash table */ - size_t size; - - /* Current number of elements. */ - size_t n_elements; -}; - -inline static int -hash_pointer (const void *p) -{ - return (int) ((long)p >> 3); -} - -inline static struct funcdesc_ht * -htab_create (void) -{ - struct funcdesc_ht *ht = _dl_malloc (sizeof (struct funcdesc_ht)); - - if (! ht) - return NULL; - ht->size = 3; - ht->entries = _dl_malloc (sizeof (struct funcdesc_ht_value *) * ht->size); - if (! ht->entries) - return NULL; - - ht->n_elements = 0; - - _dl_memset (ht->entries, 0, sizeof (struct funcdesc_ht_value *) * ht->size); - - return ht; -} - -/* This is only called from _dl_loadaddr_unmap, so it's safe to call - _dl_free(). See the discussion below. */ -inline static void -htab_delete (struct funcdesc_ht *htab) -{ - int i; - - for (i = htab->size - 1; i >= 0; i--) - if (htab->entries[i]) - _dl_free (htab->entries[i]); - - _dl_free (htab->entries); - _dl_free (htab); -} - -/* Similar to htab_find_slot, but without several unwanted side effects: - - Does not call htab->eq_f when it finds an existing entry. - - Does not change the count of elements/searches/collisions in the - hash table. - This function also assumes there are no deleted entries in the table. - HASH is the hash value for the element to be inserted. */ - -inline static struct funcdesc_value ** -find_empty_slot_for_expand (struct funcdesc_ht *htab, int hash) -{ - size_t size = htab->size; - unsigned int index = hash % size; - struct funcdesc_value **slot = htab->entries + index; - int hash2; - - if (! *slot) - return slot; - - hash2 = 1 + hash % (size - 2); - for (;;) - { - index += hash2; - if (index >= size) - index -= size; - - slot = htab->entries + index; - if (! *slot) - return slot; - } -} - -/* The following function changes size of memory allocated for the - entries and repeatedly inserts the table elements. The occupancy - of the table after the call will be about 50%. Naturally the hash - table must already exist. Remember also that the place of the - table entries is changed. If memory allocation failures are allowed, - this function will return zero, indicating that the table could not be - expanded. If all goes well, it will return a non-zero value. */ - -inline static int -htab_expand (struct funcdesc_ht *htab) -{ - struct funcdesc_value **oentries; - struct funcdesc_value **olimit; - struct funcdesc_value **p; - struct funcdesc_value **nentries; - size_t nsize; - - oentries = htab->entries; - olimit = oentries + htab->size; - - /* Resize only when table after removal of unused elements is either - too full or too empty. */ - if (htab->n_elements * 2 > htab->size) - nsize = higher_prime_number (htab->n_elements * 2); - else - nsize = htab->size; - - nentries = _dl_malloc (sizeof (struct funcdesc_value *) * nsize); - _dl_memset (nentries, 0, sizeof (struct funcdesc_value *) * nsize); - if (nentries == NULL) - return 0; - htab->entries = nentries; - htab->size = nsize; - - p = oentries; - do - { - if (*p) - *find_empty_slot_for_expand (htab, hash_pointer ((*p)->entry_point)) - = *p; - - p++; - } - while (p < olimit); - -#if 0 /* We can't tell whether this was allocated by the _dl_malloc() - built into ld.so or malloc() in the main executable or libc, - and calling free() for something that wasn't malloc()ed could - do Very Bad Things (TM). Take the conservative approach - here, potentially wasting as much memory as actually used by - the hash table, even if multiple growths occur. That's not - so bad as to require some overengineered solution that would - enable us to keep track of how it was allocated. */ - _dl_free (oentries); -#endif - return 1; -} - -/* This function searches for a hash table slot containing an entry - equal to the given element. To delete an entry, call this with - INSERT = 0, then call htab_clear_slot on the slot returned (possibly - after doing some checks). To insert an entry, call this with - INSERT = 1, then write the value you want into the returned slot. - When inserting an entry, NULL may be returned if memory allocation - fails. */ - -inline static struct funcdesc_value ** -htab_find_slot (struct funcdesc_ht *htab, void *ptr, int insert) -{ - unsigned int index; - int hash, hash2; - size_t size; - struct funcdesc_value **entry; - - if (htab->size * 3 <= htab->n_elements * 4 - && htab_expand (htab) == 0) - return NULL; - - hash = hash_pointer (ptr); - - size = htab->size; - index = hash % size; - - entry = &htab->entries[index]; - if (!*entry) - goto empty_entry; - else if ((*entry)->entry_point == ptr) - return entry; - - hash2 = 1 + hash % (size - 2); - for (;;) - { - index += hash2; - if (index >= size) - index -= size; - - entry = &htab->entries[index]; - if (!*entry) - goto empty_entry; - else if ((*entry)->entry_point == ptr) - return entry; - } - - empty_entry: - if (!insert) - return NULL; - - htab->n_elements++; - return entry; -} - -void * -_dl_funcdesc_for (void *entry_point, void *got_value) -{ - struct elf_resolve *tpnt = ((void**)got_value)[2]; - struct funcdesc_ht *ht = tpnt->funcdesc_ht; - struct funcdesc_value **entry; - - _dl_assert (got_value == tpnt->loadaddr.got_value); - - if (! ht) - { - ht = htab_create (); - if (! ht) - return (void*)-1; - tpnt->funcdesc_ht = ht; - } - - entry = htab_find_slot (ht, entry_point, 1); - if (*entry) - { - _dl_assert ((*entry)->entry_point == entry_point); - return _dl_stabilize_funcdesc (*entry); - } - - *entry = _dl_malloc (sizeof (struct funcdesc_value)); - (*entry)->entry_point = entry_point; - (*entry)->got_value = got_value; - - return _dl_stabilize_funcdesc (*entry); +#define __dl_loadaddr_unmap __dl_loadaddr_unmap + +#include "../fdpic/dl-inlines.h" + +static __always_inline void +__dl_loadaddr_unmap(struct elf32_fdpic_loadaddr loadaddr, + struct funcdesc_ht *funcdesc_ht) +{ + int i; + + for (i = 0; i < loadaddr.map->nsegs; i++) { + struct elf32_fdpic_loadseg *segdata; + ssize_t offs; + segdata = loadaddr.map->segs + i; + + /* FIXME: + * A more cleaner way is to add type for struct elf32_fdpic_loadseg, + * and release the memory according to the type. + * Currently, we hardcode the memory address of L1 SRAM. + */ + if ((segdata->addr & 0xff800000) == 0xff800000) { + _dl_sram_free((void *)segdata->addr); + continue; + } + + offs = (segdata->p_vaddr & ADDR_ALIGN); + _dl_munmap((void*)segdata->addr - offs, + segdata->p_memsz + offs); + } + + /* + * _dl_unmap is only called for dlopen()ed libraries, for which + * calling free() is safe, or before we've completed the initial + * relocation, in which case calling free() is probably pointless, + * but still safe. + */ + _dl_free(loadaddr.map); + if (funcdesc_ht) + htab_delete(funcdesc_ht); +} + +static __always_inline int +__dl_is_special_segment(Elf32_Ehdr *epnt, Elf32_Phdr *ppnt) +{ + if (ppnt->p_type != PT_LOAD) + return 0; + + /* Allow read-only executable segments to be loaded into L1 inst */ + if ((epnt->e_flags & EF_BFIN_CODE_IN_L1) && + !(ppnt->p_flags & PF_W) && (ppnt->p_flags & PF_X)) + return 1; + + /* Allow writable non-executable segments to be loaded into L1 data */ + if ((epnt->e_flags & EF_BFIN_DATA_IN_L1) && + (ppnt->p_flags & PF_W) && !(ppnt->p_flags & PF_X)) + return 1; + + /* + * These L1 memory addresses are also used in GNU ld and linux kernel. + * They need to be kept synchronized. + */ + switch (ppnt->p_vaddr) { + case 0xff700000: + case 0xff800000: + case 0xff900000: + case 0xffa00000: + case 0xfeb00000: + case 0xfec00000: + return 1; + default: + return 0; + } } -inline static void const * -_dl_lookup_address (void const *address) +static __always_inline char * +__dl_map_segment(Elf32_Ehdr *epnt, Elf32_Phdr *ppnt, int infile, int flags) { - struct elf_resolve *rpnt; - struct funcdesc_value const *fd; - - /* Make sure we don't make assumptions about its alignment. */ - __asm__ ("" : "+r" (address)); - - if ((Elf32_Addr)address & 7) - /* It's not a function descriptor. */ - return address; - - fd = (struct funcdesc_value const *)address; - - for (rpnt = _dl_loaded_modules; rpnt; rpnt = rpnt->next) - { - if (! rpnt->funcdesc_ht) - continue; - - if (fd->got_value != rpnt->loadaddr.got_value) - continue; + void *addr; + unsigned long sram_flags = 0; - address = htab_find_slot (rpnt->funcdesc_ht, (void*)fd->entry_point, 0); - - if (address && *(struct funcdesc_value *const*)address == fd) + /* Handle L1 inst mappings */ + if (((epnt->e_flags & EF_BFIN_CODE_IN_L1) || ppnt->p_vaddr == 0xffa00000) && + !(ppnt->p_flags & PF_W) && (ppnt->p_flags & PF_X)) { - address = (*(struct funcdesc_value *const*)address)->entry_point; - break; + size_t size = (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_filesz; + void *status = _dl_mmap(NULL, size, LXFLAGS(ppnt->p_flags), + flags | MAP_EXECUTABLE | MAP_DENYWRITE, + infile, ppnt->p_offset & OFFS_ALIGN); + if (_dl_mmap_check_error(status)) + return NULL; + + addr = _dl_sram_alloc(ppnt->p_filesz, L1_INST_SRAM); + if (addr) + _dl_dma_memcpy(addr, status + (ppnt->p_vaddr & ADDR_ALIGN), ppnt->p_filesz); + else + _dl_dprintf(2, "%s:%s: sram allocation %#x failed\n", + _dl_progname, __func__, ppnt->p_vaddr); + + _dl_munmap(status, size); + return addr; } - else - address = fd; - } - - return address; -} - -void -__dl_loadaddr_unmap (struct elf32_fdpic_loadaddr loadaddr, - struct funcdesc_ht *funcdesc_ht) -{ - int i; - - for (i = 0; i < loadaddr.map->nsegs; i++) - { - struct elf32_fdpic_loadseg *segdata; - ssize_t offs; - segdata = loadaddr.map->segs + i; - - /* FIXME: - A more cleaner way is to add type for struct elf32_fdpic_loadseg, - and release the memory according to the type. - Currently, we hardcode the memory address of L1 SRAM. */ - if ((segdata->addr & 0xff800000) == 0xff800000) - { - _dl_sram_free ((void *)segdata->addr); - continue; - } - offs = (segdata->p_vaddr & ADDR_ALIGN); - _dl_munmap ((void*)segdata->addr - offs, - segdata->p_memsz + offs); - } - /* _dl_unmap is only called for dlopen()ed libraries, for which - calling free() is safe, or before we've completed the initial - relocation, in which case calling free() is probably pointless, - but still safe. */ - _dl_free (loadaddr.map); - if (funcdesc_ht) - htab_delete (funcdesc_ht); -} - -inline static int -__dl_is_special_segment (Elf32_Ehdr *epnt, - Elf32_Phdr *ppnt) -{ - if (ppnt->p_type != PT_LOAD) - return 0; - - if ((epnt->e_flags & EF_BFIN_CODE_IN_L1) - && !(ppnt->p_flags & PF_W) - && (ppnt->p_flags & PF_X)) - return 1; - - if ((epnt->e_flags & EF_BFIN_DATA_IN_L1) - && (ppnt->p_flags & PF_W) - && !(ppnt->p_flags & PF_X)) - return 1; - - /* 0xff700000, 0xff800000, 0xff900000 and 0xffa00000 are also used in - GNU ld and linux kernel. They need to be keep synchronized. */ - if (ppnt->p_vaddr == 0xff700000 - || ppnt->p_vaddr == 0xff800000 - || ppnt->p_vaddr == 0xff900000 - || ppnt->p_vaddr == 0xffa00000) - return 1; - - return 0; -} - -inline static char * -__dl_map_segment (Elf32_Ehdr *epnt, - Elf32_Phdr *ppnt, - int infile, - int flags) -{ - char *status, *tryaddr, *l1addr; - size_t size; - - if (((epnt->e_flags & EF_BFIN_CODE_IN_L1) || ppnt->p_vaddr == 0xffa00000) - && !(ppnt->p_flags & PF_W) - && (ppnt->p_flags & PF_X)) { - status = (char *) _dl_mmap - (tryaddr = 0, - size = (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_filesz, - LXFLAGS(ppnt->p_flags), - flags | MAP_EXECUTABLE | MAP_DENYWRITE, - infile, ppnt->p_offset & OFFS_ALIGN); - if (_dl_mmap_check_error(status) - || (tryaddr && tryaddr != status)) - return NULL; - l1addr = (char *) _dl_sram_alloc (ppnt->p_filesz, L1_INST_SRAM); - if (l1addr != NULL) - _dl_dma_memcpy (l1addr, status + (ppnt->p_vaddr & ADDR_ALIGN), ppnt->p_filesz); - _dl_munmap (status, size); - if (l1addr == NULL) - _dl_dprintf(2, "%s:%i: L1 allocation failed\n", _dl_progname, __LINE__); - return l1addr; - } + /* Handle L1 data mappings */ + if (((epnt->e_flags & EF_BFIN_DATA_IN_L1) || + ppnt->p_vaddr == 0xff700000 || + ppnt->p_vaddr == 0xff800000 || + ppnt->p_vaddr == 0xff900000) && + (ppnt->p_flags & PF_W) && !(ppnt->p_flags & PF_X)) + { + switch (ppnt->p_vaddr) { + case 0xff800000: sram_flags = L1_DATA_A_SRAM; break; + case 0xff900000: sram_flags = L1_DATA_B_SRAM; break; + default: sram_flags = L1_DATA_SRAM; break; + } + } - if (((epnt->e_flags & EF_BFIN_DATA_IN_L1) - || ppnt->p_vaddr == 0xff700000 - || ppnt->p_vaddr == 0xff800000 - || ppnt->p_vaddr == 0xff900000) - && (ppnt->p_flags & PF_W) - && !(ppnt->p_flags & PF_X)) { - if (ppnt->p_vaddr == 0xff800000) - l1addr = (char *) _dl_sram_alloc (ppnt->p_memsz, L1_DATA_A_SRAM); - else if (ppnt->p_vaddr == 0xff900000) - l1addr = (char *) _dl_sram_alloc (ppnt->p_memsz, L1_DATA_B_SRAM); - else - l1addr = (char *) _dl_sram_alloc (ppnt->p_memsz, L1_DATA_SRAM); - if (l1addr == NULL) { - _dl_dprintf(2, "%s:%i: L1 allocation failed\n", _dl_progname, __LINE__); - } else { - if (_DL_PREAD (infile, l1addr, ppnt->p_filesz, ppnt->p_offset) != ppnt->p_filesz) { - _dl_sram_free (l1addr); - return NULL; - } - if (ppnt->p_filesz < ppnt->p_memsz) - _dl_memset (l1addr + ppnt->p_filesz, 0, ppnt->p_memsz - ppnt->p_filesz); - } - return l1addr; - } + /* Handle L2 mappings */ + if (ppnt->p_vaddr == 0xfeb00000 || ppnt->p_vaddr == 0xfec00000) + sram_flags = L2_SRAM; + + if (sram_flags) { + addr = _dl_sram_alloc(ppnt->p_memsz, sram_flags); + if (addr) { + if (_DL_PREAD(infile, addr, ppnt->p_filesz, ppnt->p_offset) != ppnt->p_filesz) { + _dl_sram_free(addr); + return NULL; + } + if (ppnt->p_filesz < ppnt->p_memsz) + _dl_memset(addr + ppnt->p_filesz, 0, ppnt->p_memsz - ppnt->p_filesz); + } else + _dl_dprintf(2, "%s:%s: sram allocation %#x failed\n", + _dl_progname, __func__, ppnt->p_vaddr); + return addr; + } - return 0; + return 0; } diff --git a/ldso/ldso/bfin/dl-startup.h b/ldso/ldso/bfin/dl-startup.h index a1e150e27..576b8f29f 100644 --- a/ldso/ldso/bfin/dl-startup.h +++ b/ldso/ldso/bfin/dl-startup.h @@ -14,17 +14,13 @@ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Lesser General Public -License along with uClibc; see the file COPYING.LIB. If not, write to -the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, -USA. */ +License along with uClibc; see the file COPYING.LIB. If not, see +<http://www.gnu.org/licenses/>. */ /* Any assembly language/system dependent hacks needed to setup * boot1.c so it will work as expected and cope with whatever platform * specific wierdness is needed for this architecture. - - * We override the default _dl_boot function, and replace it with a - * bit of asm. Then call the real _dl_boot function, which is now - * named _dl_boot2. */ + */ /* At program start-up, p0 contains a pointer to a elf32_fdpic_loadmap that describes how the executable was loaded @@ -40,50 +36,51 @@ USA. */ use this value to initialize the PIC register. */ __asm__( - " .text\n" \ - " .global __dl_boot\n" \ - " .type __dl_boot,@function\n" \ - "__dl_boot:\n" \ - " call .Lcall\n" \ - ".Lcall:\n" \ - " R4 = RETS;\n" \ - " SP += -32;\n" \ - " R5 = P0;\n" \ - " R6 = P1;\n" \ - " R7 = P2;\n" \ - " R0.L = .Lcall;\n" \ - " R0.H = .Lcall;\n" \ - " R1.L = __ROFIXUP_LIST__;\n" \ - " R1.H = __ROFIXUP_LIST__;\n" \ - " R2.L = __ROFIXUP_END__;\n" \ - " R2.H = __ROFIXUP_END__;\n" \ - " R1 = R1 - R0;\n" \ - " R1 = R1 + R4;\n" \ - " R2 = R2 - R0;\n" \ - " R2 = R2 + R4;\n" \ - " R0 = P1;\n" \ - " CC = R0 == 0;\n" \ - " IF CC R0 = P0;\n" \ - " CALL ___self_reloc;\n" \ - " P3 = R0;\n" \ - " P5 = R0;\n" \ - " R1 = R5;\n" \ - " R2 = R6;\n" \ - " [SP + 12] = R7;\n" \ - " P0 = SP;\n" \ - " P0 += 24;\n" \ - " [SP + 16] = P0;\n" \ - " P0 += 8;\n" \ - " [SP + 20] = P0;\n" \ - " CALL __dl_start;\n" \ - " /* Pass our FINI ptr() to the user in P1 */\n" \ - " R7 = [P5 + __dl_fini@FUNCDESC_GOT17M4];\n" \ - " P4 = [SP + 24];\n" \ - " P3 = [SP + 28];\n" \ - " P0 = R5;\n" \ - " SP += 32;\n" \ - " JUMP (P4);\n" \ - " .size __dl_boot,.-__dl_boot\n" + " .text\n" + " .global __start\n" + " .type __start,@function\n" + " .hidden __start\n" + "__start:\n" + " call .Lcall\n" + ".Lcall:\n" + " R4 = RETS;\n" + " SP += -32;\n" + " R5 = P0;\n" + " R6 = P1;\n" + " R7 = P2;\n" + " R0.L = .Lcall;\n" + " R0.H = .Lcall;\n" + " R1.L = __ROFIXUP_LIST__;\n" + " R1.H = __ROFIXUP_LIST__;\n" + " R2.L = __ROFIXUP_END__;\n" + " R2.H = __ROFIXUP_END__;\n" + " R1 = R1 - R0;\n" + " R1 = R1 + R4;\n" + " R2 = R2 - R0;\n" + " R2 = R2 + R4;\n" + " R0 = P1;\n" + " CC = R0 == 0;\n" + " IF CC R0 = P0;\n" + " CALL ___self_reloc;\n" + " P3 = R0;\n" + " P5 = R0;\n" + " R1 = R5;\n" + " R2 = R6;\n" + " [SP + 12] = R7;\n" + " P0 = SP;\n" + " P0 += 24;\n" + " [SP + 16] = P0;\n" + " P0 += 8;\n" + " [SP + 20] = P0;\n" + " CALL __dl_start;\n" + " /* Pass our FINI ptr() to the user in P1 */\n" + " R7 = [P5 + __dl_fini@FUNCDESC_GOT17M4];\n" + " P4 = [SP + 24];\n" + " P3 = [SP + 28];\n" + " P0 = R5;\n" + " SP += 32;\n" + " JUMP (P4);\n" + " .size __start,.-__start\n" ); #undef DL_START @@ -96,11 +93,9 @@ _dl_start (Elf32_Addr dl_boot_got_pointer, \ struct funcdesc_value *dl_main_funcdesc, \ X) -struct elf32_fdpic_loadmap; - /* * Get a pointer to the argv array. On many platforms this can be just - * the address if the first argument, on other platforms we need to + * the address of the first argument, on other platforms we need to * do something a little more subtle here. */ #define GET_ARGV(ARGVP, ARGS) ARGVP = (((unsigned long*) ARGS) + 1) @@ -113,8 +108,8 @@ struct elf32_fdpic_loadmap; * load address. */ #define PERFORM_BOOTSTRAP_RELOC(RELP,REL,SYMBOL,LOAD,SYMTAB) \ - switch(ELF32_R_TYPE((RELP)->r_info)){ \ - case R_BFIN_byte4_data: \ + switch(ELF_R_TYPE((RELP)->r_info)){ \ + case R_BFIN_BYTE4_DATA: \ *(REL) += (SYMBOL); \ break; \ case R_BFIN_FUNCDESC_VALUE: \ diff --git a/ldso/ldso/bfin/dl-syscalls.h b/ldso/ldso/bfin/dl-syscalls.h index 21e4cdd2c..bfe352001 100644 --- a/ldso/ldso/bfin/dl-syscalls.h +++ b/ldso/ldso/bfin/dl-syscalls.h @@ -14,195 +14,24 @@ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Lesser General Public -License along with uClibc; see the file COPYING.LIB. If not, write to -the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, -USA. */ - -/* We can't use the real errno in ldso, since it has not yet - * been dynamicly linked in yet. */ -#include "sys/syscall.h" -extern int _dl_errno; -#undef __set_errno -#define __set_errno(X) {(_dl_errno) = (X);} -#include <sys/mman.h> - -/* The code below is extracted from libc/sysdeps/linux/frv/_mmap.c */ - -#if DYNAMIC_LOADER_IN_SIMULATOR -#define __NR___syscall_mmap2 __NR_mmap2 -static __inline__ _syscall6(__ptr_t, __syscall_mmap2, __ptr_t, addr, - size_t, len, int, prot, int, flags, int, fd, off_t, offset); - -/* Make sure we don't get another definition of _dl_mmap from the - machine-independent code. */ -#undef __NR_mmap -#undef __NR_mmap2 - -/* This is always 12, even on architectures where PAGE_SHIFT != 12. */ -# ifndef MMAP2_PAGE_SHIFT -# define MMAP2_PAGE_SHIFT 12 -# endif - -#include <bits/uClibc_page.h> /* for PAGE_SIZE */ -inline static void *_dl_memset(void*,int,size_t); -inline static ssize_t _dl_pread(int fd, void *buf, size_t count, off_t offset); - -static __ptr_t -_dl_mmap(__ptr_t addr, size_t len, int prot, int flags, int fd, __off_t offset) -{ - size_t plen = (len + PAGE_SIZE - 1) & -PAGE_SIZE; - -/* This is a hack to enable the dynamic loader to run within a - simulator that doesn't support mmap, with a number of very ugly - tricks. Also, it's not as useful as it sounds, since only dynamic - executables without DT_NEEDED dependencies can be run. AFAIK, they - can only be created with -pie. This trick suffices to enable the - dynamic loader to obtain a blank page that it maps early in the - bootstrap. */ - if ((flags & MAP_FIXED) == 0) - { - void *_dl_mmap_base = 0; - __ptr_t *ret = 0; - - if (! _dl_mmap_base) - { - void *stack; - __asm__ ("mov sp, %0" : "=r" (stack)); - _dl_mmap_base = (void *)(((long)stack + 2 * PAGE_SIZE) & -PAGE_SIZE); - retry: - if (((void **)_dl_mmap_base)[0] == _dl_mmap_base - && ((void **)_dl_mmap_base)[1023] == _dl_mmap_base - && (((void **)_dl_mmap_base)[177] - == ((void **)_dl_mmap_base)[771])) - { - while (((void**)_dl_mmap_base)[177]) - { - _dl_mmap_base = ((void**)_dl_mmap_base)[177]; - if (!(((void **)_dl_mmap_base)[0] == _dl_mmap_base - && ((void **)_dl_mmap_base)[1023] == _dl_mmap_base - && (((void **)_dl_mmap_base)[177] - == ((void**)_dl_mmap_base)[771]))) - ((void(*)())0)(); - } - } - else - { - int i; - for (i = 0; i < (int)PAGE_SIZE; i++) - if (*(char*)(_dl_mmap_base + i)) - break; - if (i != PAGE_SIZE) - { - _dl_mmap_base = (void*)((long)_dl_mmap_base + PAGE_SIZE); - goto retry; - } - ((void**)_dl_mmap_base)[-1] = - ((void**)_dl_mmap_base)[0] = - ((void**)_dl_mmap_base)[1023] = - _dl_mmap_base; - } - } - - if (_dl_mmap_base) - { - if (!(((void **)_dl_mmap_base)[0] == _dl_mmap_base - && ((void **)_dl_mmap_base)[1023] == _dl_mmap_base - && (((void **)_dl_mmap_base)[177] - == ((void**)_dl_mmap_base)[771]))) - ((void(*)())0)(); - ret = (__ptr_t)((char*)_dl_mmap_base + PAGE_SIZE); - _dl_mmap_base = - ((void**)_dl_mmap_base)[177] = - ((void**)_dl_mmap_base)[771] = - (char*)_dl_mmap_base + plen + PAGE_SIZE; - ((void**)_dl_mmap_base)[0] = - ((void**)_dl_mmap_base)[1023] = - _dl_mmap_base; - } - - if ((flags & MAP_ANONYMOUS) != 0) - { - _dl_memset (ret, 0, plen); - return ret; - } - - flags |= MAP_FIXED; - addr = ret; - } - if (offset & ((1 << MMAP2_PAGE_SHIFT) - 1)) { -#if 0 - __set_errno (EINVAL); -#endif - return MAP_FAILED; - } - if ((flags & MAP_FIXED) != 0) - { - if (_dl_pread(fd, addr, len, offset) != (ssize_t)len) - return (void*)MAP_FAILED; - if (plen != len) - _dl_memset (addr + len, 0, plen - len); - return addr; - } - return(__syscall_mmap2(addr, len, prot, flags, fd, (off_t) (offset >> MMAP2_PAGE_SHIFT))); -} -#endif - -#ifdef __NR_pread -#ifdef DYNAMIC_LOADER_IN_SIMULATOR -#include <unistd.h> - -#define __NR___syscall_lseek __NR_lseek -inline static unsigned long _dl_read(int fd, const void *buf, unsigned long count); - -inline static _syscall3(__off_t, __syscall_lseek, int, fd, __off_t, offset, - int, whence); -inline static ssize_t -_dl_pread(int fd, void *buf, size_t count, off_t offset) -{ - __off_t orig = __syscall_lseek (fd, 0, SEEK_CUR); - ssize_t ret; - - if (orig == -1) - return -1; - - if (__syscall_lseek (fd, offset, SEEK_SET) != offset) - return -1; - - ret = _dl_read (fd, buf, count); - - if (__syscall_lseek (fd, orig, SEEK_SET) != orig) - ((void(*)())0)(); - - return ret; -} -#else -#define __NR___syscall_pread __NR_pread -inline static _syscall5(ssize_t, __syscall_pread, int, fd, void *, buf, - size_t, count, off_t, offset_hi, off_t, offset_lo); - -inline static ssize_t -_dl_pread(int fd, void *buf, size_t count, off_t offset) -{ - return(__syscall_pread(fd,buf,count,__LONG_LONG_PAIR (offset >> 31, offset))); -} -#endif -#endif +License along with uClibc; see the file COPYING.LIB. If not, see +<http://www.gnu.org/licenses/>. */ #ifdef __NR_sram_alloc #define __NR__dl_sram_alloc __NR_sram_alloc -inline static _syscall2(__ptr_t, _dl_sram_alloc, - size_t, len, unsigned long, flags); +static __always_inline _syscall2(__ptr_t, _dl_sram_alloc, + size_t, len, unsigned long, flags) #endif #ifdef __NR_sram_free #define __NR__dl_sram_free __NR_sram_free -inline static _syscall1(int, _dl_sram_free, __ptr_t, addr); +static __always_inline _syscall1(int, _dl_sram_free, __ptr_t, addr) #endif #ifdef __NR_dma_memcpy #define __NR__dl_dma_memcpy __NR_dma_memcpy -inline static _syscall3(__ptr_t, _dl_dma_memcpy, - __ptr_t, dest, __ptr_t, src, size_t, len); +static __always_inline _syscall3(__ptr_t, _dl_dma_memcpy, + __ptr_t, dest, __ptr_t, src, size_t, len) #endif #define __UCLIBC_MMAP_HAS_6_ARGS__ diff --git a/ldso/ldso/bfin/dl-sysdep.h b/ldso/ldso/bfin/dl-sysdep.h index 52df4c91f..5758117ba 100644 --- a/ldso/ldso/bfin/dl-sysdep.h +++ b/ldso/ldso/bfin/dl-sysdep.h @@ -1,23 +1,10 @@ - /* Copyright (C) 2003, 2004 Red Hat, Inc. - Contributed by Alexandre Oliva <aoliva@redhat.com> - Based on ../i386/dl-sysdep.h - -This file is part of uClibc. - -uClibc is free software; you can redistribute it and/or modify it -under the terms of the GNU Lesser General Public License as -published by the Free Software Foundation; either version 2.1 of the -License, or (at your option) any later version. - -uClibc is distributed in the hope that it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -Library General Public License for more details. - -You should have received a copy of the GNU Lesser General Public -License along with uClibc; see the file COPYING.LIB. If not, write to -the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, -USA. */ +/* Copyright (C) 2003, 2004 Red Hat, Inc. + * Contributed by Alexandre Oliva <aoliva@redhat.com> + * Copyright (C) 2006-2011 Analog Devices, Inc. + * Based on ../i386/dl-sysdep.h + * + * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball. + */ /* * Various assembly language/system dependent hacks that are required @@ -34,21 +21,6 @@ USA. */ #define DL_NO_COPY_RELOCS -#define HAVE_DL_INLINES_H - -/* - * Initialization sequence for a GOT. Copy the resolver function - * descriptor and the pointer to the elf_resolve/link_map data - * structure. Initialize the got_value in the module while at that. - */ -#define INIT_GOT(GOT_BASE,MODULE) \ -{ \ - (MODULE)->loadaddr.got_value = (GOT_BASE); \ - GOT_BASE[0] = ((unsigned long *)&_dl_linux_resolve)[0]; \ - GOT_BASE[1] = ((unsigned long *)&_dl_linux_resolve)[1]; \ - GOT_BASE[2] = (unsigned long) MODULE; \ -} - /* Here we define the magic numbers that this dynamic loader should accept */ #define MAGIC1 EM_BLACKFIN #undef MAGIC2 @@ -56,25 +28,11 @@ USA. */ /* Used for error messages */ #define ELF_TARGET "BFIN" -struct elf_resolve; - -struct funcdesc_value -{ - void *entry_point; - void *got_value; -} __attribute__((__aligned__(8))); - +/* Need bootstrap relocations */ +#define ARCH_NEEDS_BOOTSTRAP_RELOCS extern int _dl_linux_resolve(void) __attribute__((__visibility__("hidden"))); -/* 4KiB page alignment. Should perhaps be made dynamic using - getpagesize(), based on AT_PAGESZ from auxvt? */ -#define PAGE_ALIGN 0xfffff000 -#define ADDR_ALIGN 0xfff -#define OFFS_ALIGN 0x7ffff000 - -struct funcdesc_ht; - #undef SEND_EARLY_STDERR #define SEND_EARLY_STDERR(S) \ do { \ @@ -92,63 +50,6 @@ struct funcdesc_ht; for (__t = 0; __t < 0x1000000; __t++) __asm__ __volatile__ (""); } \ } while (0) -#define DL_LOADADDR_TYPE struct elf32_fdpic_loadaddr - -#define DL_RELOC_ADDR(LOADADDR, ADDR) \ - ((ElfW(Addr))__reloc_pointer ((void*)(ADDR), (LOADADDR).map)) - -#define DL_ADDR_TO_FUNC_PTR(ADDR, LOADADDR) \ - ((void(*)(void)) _dl_funcdesc_for ((void*)(ADDR), (LOADADDR).got_value)) - -#define _dl_stabilize_funcdesc(val) \ - ({ __asm__ ("" : "+m" (*(val))); (val); }) - -#define DL_CALL_FUNC_AT_ADDR(ADDR, LOADADDR, SIGNATURE, ...) \ - ({ struct funcdesc_value fd = { (void*)(ADDR), (LOADADDR).got_value }; \ - void (*pf)(void) = (void*) _dl_stabilize_funcdesc (&fd); \ - (* SIGNATURE pf)(__VA_ARGS__); }) - -#define DL_INIT_LOADADDR_BOOT(LOADADDR, BASEADDR) \ - (__dl_init_loadaddr_map (&(LOADADDR), dl_boot_got_pointer, \ - dl_boot_ldsomap ?: dl_boot_progmap)) - -#define DL_INIT_LOADADDR_PROG(LOADADDR, BASEADDR) \ - (__dl_init_loadaddr_map (&(LOADADDR), 0, dl_boot_progmap)) - -#define DL_INIT_LOADADDR_EXTRA_DECLS \ - int dl_init_loadaddr_load_count; -#define DL_INIT_LOADADDR(LOADADDR, BASEADDR, PHDR, PHDRCNT) \ - (dl_init_loadaddr_load_count = \ - __dl_init_loadaddr (&(LOADADDR), (PHDR), (PHDRCNT))) -#define DL_INIT_LOADADDR_HDR(LOADADDR, ADDR, PHDR) \ - (__dl_init_loadaddr_hdr ((LOADADDR), (ADDR), (PHDR), \ - dl_init_loadaddr_load_count)) -#define DL_LOADADDR_UNMAP(LOADADDR, LEN) \ - (__dl_loadaddr_unmap ((LOADADDR), (NULL))) -#define DL_LIB_UNMAP(LIB, LEN) \ - (__dl_loadaddr_unmap ((LIB)->loadaddr, (LIB)->funcdesc_ht)) -#define DL_LOADADDR_BASE(LOADADDR) \ - ((LOADADDR).got_value) - -/* This is called from dladdr(), such that we map a function - descriptor's address to the function's entry point before trying to - find in which library it's defined. */ -#define DL_LOOKUP_ADDRESS(ADDRESS) (_dl_lookup_address (ADDRESS)) - -#define DL_ADDR_IN_LOADADDR(ADDR, TPNT, TFROM) \ - (! (TFROM) && __dl_addr_in_loadaddr ((void*)(ADDR), (TPNT)->loadaddr)) - -/* - * Compute the GOT address. On several platforms, we use assembly - * here. on FR-V FDPIC, there's no way to compute the GOT address, - * since the offset between text and data is not fixed, so we arrange - * for the assembly _dl_boot to pass this value as an argument to - * _dl_boot. */ -#define DL_BOOT_COMPUTE_GOT(got) ((got) = dl_boot_got_pointer) - -#define DL_BOOT_COMPUTE_DYN(dpnt, got, load_addr) \ - ((dpnt) = dl_boot_ldso_dyn_pointer) - /* We only support loading FDPIC independently-relocatable shared libraries. It probably wouldn't be too hard to support loading shared libraries that require relocation by the same amount, but we @@ -173,57 +74,25 @@ do \ } \ while (0) -/* We want want to apply all relocations in the interpreter during - bootstrap. Because of this, we have to skip the interpreter - relocations in _dl_parse_relocation_information(), see - elfinterp.c. */ -#define DL_SKIP_BOOTSTRAP_RELOC(SYMTAB, INDEX, STRTAB) 0 - -#ifdef __NR_pread -#define _DL_PREAD(FD, BUF, SIZE, OFFSET) \ - (_dl_pread((FD), (BUF), (SIZE), (OFFSET))) -#endif - -/* We want to return to dlsym() a function descriptor if the symbol - turns out to be a function. */ -#define DL_FIND_HASH_VALUE(TPNT, TYPE_CLASS, SYM) \ - (((TYPE_CLASS) & ELF_RTYPE_CLASS_DLSYM) \ - && ELF32_ST_TYPE((SYM)->st_info) == STT_FUNC \ - ? _dl_funcdesc_for ((void *)DL_RELOC_ADDR ((TPNT)->loadaddr, (SYM)->st_value), \ - (TPNT)->loadaddr.got_value) \ - : DL_RELOC_ADDR ((TPNT)->loadaddr, (SYM)->st_value)) - #define DL_IS_SPECIAL_SEGMENT(EPNT, PPNT) \ __dl_is_special_segment(EPNT, PPNT) #define DL_MAP_SEGMENT(EPNT, PPNT, INFILE, FLAGS) \ __dl_map_segment (EPNT, PPNT, INFILE, FLAGS) -#define DL_GET_READY_TO_RUN_EXTRA_PARMS \ - , struct elf32_fdpic_loadmap *dl_boot_progmap, Elf32_Addr dl_boot_got_pointer -#define DL_GET_READY_TO_RUN_EXTRA_ARGS \ - , dl_boot_progmap, dl_boot_got_pointer - +#if defined(__BFIN_FDPIC__) +#include "../fdpic/dl-sysdep.h" -#ifdef __USE_GNU -# include <link.h> -#else -# define __USE_GNU -# include <link.h> -# undef __USE_GNU -#endif +static __always_inline Elf32_Addr +elf_machine_load_address (void) +{ + /* this is never an issue on Blackfin systems, so screw it */ + return 0; +} -#include <elf.h> -static __inline__ void +static __always_inline void elf_machine_relative (DL_LOADADDR_TYPE load_off, const Elf32_Addr rel_addr, Elf32_Word relative_count) { -#if 0 - Elf32_Rel * rpnt = (void *) rel_addr; - --rpnt; - do { - Elf32_Addr *const reloc_addr = (void *) (load_off + (++rpnt)->r_offset); - - *reloc_addr = DL_RELOC_ADDR (load_off, *reloc_addr); - } while (--relative_count); -#endif + return 0; } +#endif diff --git a/ldso/ldso/bfin/elfinterp.c b/ldso/ldso/bfin/elfinterp.c index ac24337a1..4e1c1c75f 100644 --- a/ldso/ldso/bfin/elfinterp.c +++ b/ldso/ldso/bfin/elfinterp.c @@ -20,9 +20,8 @@ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Lesser General Public -License along with uClibc; see the file COPYING.LIB. If not, write to -the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, -USA. */ +License along with uClibc; see the file COPYING.LIB. If not, see +<http://www.gnu.org/licenses/>. */ #include <sys/cdefs.h> /* __attribute_used__ */ @@ -37,44 +36,39 @@ USA. */ a more than adequate job of explaining everything required to get this working. */ -struct funcdesc_value volatile *__attribute__((__visibility__("hidden"))) +__attribute__((__visibility__("hidden"))) +struct funcdesc_value volatile * _dl_linux_resolver (struct elf_resolve *tpnt, int reloc_entry) { - int reloc_type; ELF_RELOC *this_reloc; char *strtab; ElfW(Sym) *symtab; int symtab_index; char *rel_addr; - struct elf_resolve *new_tpnt; char *new_addr; struct funcdesc_value funcval; struct funcdesc_value volatile *got_entry; char *symname; + struct symbol_ref sym_ref; rel_addr = (char *)tpnt->dynamic_info[DT_JMPREL]; this_reloc = (ELF_RELOC *)(intptr_t)(rel_addr + reloc_entry); - reloc_type = ELF_R_TYPE(this_reloc->r_info); symtab_index = ELF_R_SYM(this_reloc->r_info); - symtab = (Elf32_Sym *) tpnt->dynamic_info[DT_SYMTAB]; + symtab = (ElfW(Sym) *) tpnt->dynamic_info[DT_SYMTAB]; strtab = (char *) tpnt->dynamic_info[DT_STRTAB]; + sym_ref.sym = &symtab[symtab_index]; + sym_ref.tpnt = NULL; symname= strtab + symtab[symtab_index].st_name; - if (reloc_type != R_BFIN_FUNCDESC_VALUE) { - _dl_dprintf(2, "%s: Incorrect relocation type in jump relocations\n", - _dl_progname); - _dl_exit(1); - } - /* Address of GOT entry fix up */ got_entry = (struct funcdesc_value *) DL_RELOC_ADDR(tpnt->loadaddr, this_reloc->r_offset); /* Get the address to be used to fill in the GOT entry. */ - new_addr = _dl_lookup_hash(symname, tpnt->symbol_scope, NULL, 0, &new_tpnt); + new_addr = _dl_find_hash(symname, &_dl_loaded_modules->symbol_scope, NULL, 0, &sym_ref); if (!new_addr) { - new_addr = _dl_lookup_hash(symname, NULL, NULL, 0, &new_tpnt); + new_addr = _dl_find_hash(symname, NULL, NULL, 0, &sym_ref); if (!new_addr) { _dl_dprintf(2, "%s: can't resolve symbol '%s'\n", _dl_progname, symname); @@ -83,7 +77,7 @@ _dl_linux_resolver (struct elf_resolve *tpnt, int reloc_entry) } funcval.entry_point = new_addr; - funcval.got_value = new_tpnt->loadaddr.got_value; + funcval.got_value = sym_ref.tpnt->loadaddr.got_value; #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_bindings) { @@ -106,9 +100,9 @@ _dl_linux_resolver (struct elf_resolve *tpnt, int reloc_entry) } static int -_dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, +_dl_parse(struct elf_resolve *tpnt, struct r_scope_elem *scope, unsigned long rel_addr, unsigned long rel_size, - int (*reloc_fnc) (struct elf_resolve *tpnt, struct dyn_elf *scope, + int (*reloc_fnc) (struct elf_resolve *tpnt, struct r_scope_elem *scope, ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab)) { unsigned int i; @@ -157,7 +151,7 @@ _dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, } static int -_dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, +_dl_do_reloc (struct elf_resolve *tpnt,struct r_scope_elem *scope, ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab) { int reloc_type; @@ -172,12 +166,15 @@ _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, #if defined (__SUPPORT_LD_DEBUG__) unsigned long old_val; #endif + struct symbol_ref sym_ref; reloc_addr = (unsigned long *) DL_RELOC_ADDR(tpnt->loadaddr, rpnt->r_offset); __asm__ ("" : "=r" (reloc_addr_packed) : "0" (reloc_addr)); reloc_type = ELF_R_TYPE(rpnt->r_info); symtab_index = ELF_R_SYM(rpnt->r_info); symbol_addr = 0; + sym_ref.sym = &symtab[symtab_index]; + sym_ref.tpnt = NULL; symname = strtab + symtab[symtab_index].st_name; if (ELF_ST_BIND (symtab[symtab_index].st_info) == STB_LOCAL) { @@ -186,7 +183,7 @@ _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, } else { symbol_addr = (unsigned long) - _dl_lookup_hash(symname, scope, NULL, 0, &symbol_tpnt); + _dl_find_hash(symname, scope, NULL, 0, &sym_ref); /* * We want to allow undefined references to weak symbols - this might @@ -196,9 +193,14 @@ _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, if (!symbol_addr && ELF_ST_BIND(symtab[symtab_index].st_info) != STB_WEAK) { _dl_dprintf (2, "%s: can't resolve symbol '%s'\n", - _dl_progname, strtab + symtab[symtab_index].st_name); + _dl_progname, symname); _dl_exit (1); } + if (_dl_trace_prelink) { + _dl_debug_lookup (symname, tpnt, &symtab[symtab_index], + &sym_ref, elf_machine_type_class(reloc_type)); + } + symbol_tpnt = sym_ref.tpnt; } #if defined (__SUPPORT_LD_DEBUG__) @@ -213,9 +215,9 @@ _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, old_val = 0; #endif switch (reloc_type) { - case R_BFIN_unused0: + case R_BFIN_UNUSED0: break; - case R_BFIN_byte4_data: + case R_BFIN_BYTE4_DATA: if ((long)reloc_addr_packed & 3) reloc_value = reloc_addr_packed->v += symbol_addr; else @@ -282,7 +284,7 @@ _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, static int _dl_do_lazy_reloc (struct elf_resolve *tpnt, - struct dyn_elf *scope __attribute__((unused)), + struct r_scope_elem *scope __attribute__((unused)), ELF_RELOC *rpnt, ElfW(Sym) *symtab __attribute__((unused)), char *strtab __attribute__((unused))) { @@ -300,11 +302,11 @@ _dl_do_lazy_reloc (struct elf_resolve *tpnt, old_val = (unsigned long)reloc_addr->entry_point; #endif switch (reloc_type) { - case R_BFIN_unused0: + case R_BFIN_UNUSED0: break; case R_BFIN_FUNCDESC_VALUE: funcval = *reloc_addr; - funcval.entry_point = DL_RELOC_ADDR(tpnt->loadaddr, funcval.entry_point); + funcval.entry_point = (void *) DL_RELOC_ADDR(tpnt->loadaddr, funcval.entry_point); funcval.got_value = tpnt->loadaddr.got_value; *reloc_addr = funcval; break; @@ -313,7 +315,7 @@ _dl_do_lazy_reloc (struct elf_resolve *tpnt, } #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_reloc && _dl_debug_detail) - _dl_dprintf(_dl_debug_file, "\tpatched: %x ==> %x @ %x", old_val, reloc_addr->entry_point, reloc_addr); + _dl_dprintf(_dl_debug_file, "\tpatched: %x ==> %x @ %x\n", old_val, reloc_addr->entry_point, reloc_addr); #endif return 0; @@ -328,9 +330,9 @@ _dl_parse_lazy_relocation_information int _dl_parse_relocation_information -(struct dyn_elf *rpnt, unsigned long rel_addr, unsigned long rel_size) +(struct dyn_elf *rpnt, struct r_scope_elem *scope, unsigned long rel_addr, unsigned long rel_size) { - return _dl_parse(rpnt->dyn, rpnt->dyn->symbol_scope, rel_addr, rel_size, _dl_do_reloc); + return _dl_parse(rpnt->dyn, scope, rel_addr, rel_size, _dl_do_reloc); } /* We don't have copy relocs. */ diff --git a/ldso/ldso/bfin/resolve.S b/ldso/ldso/bfin/resolve.S index ae7f4a4c5..4ac12c36c 100644 --- a/ldso/ldso/bfin/resolve.S +++ b/ldso/ldso/bfin/resolve.S @@ -14,9 +14,8 @@ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Lesser General Public -License along with uClibc; see the file COPYING.LIB. If not, write to -the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, -USA. */ +License along with uClibc; see the file COPYING.LIB. If not, see +<http://www.gnu.org/licenses/>. */ /* The function below is tail-called by resolver stubs when a lazily-bound function is called. It must preserve all diff --git a/ldso/ldso/c6x/dl-debug.h b/ldso/ldso/c6x/dl-debug.h new file mode 100644 index 000000000..d4915bf21 --- /dev/null +++ b/ldso/ldso/c6x/dl-debug.h @@ -0,0 +1,49 @@ +/* C6X DSBT ELF shared library loader suppport. + * + * Copyright (C) 2010 Texas Instruments Incorporated + * Contributed by Mark Salter <msalter@redhat.com> + * + * All rights reserved. + * + * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball. + */ + +static const char * const _dl_reltypes_tab[] = +{ + "R_C6000_NONE", /* 0 */ + "R_C6000_ABS32", + "R_C6000_ABS16", + "R_C6000_ABS8", + "R_C6000_PCR_S21", + "R_C6000_PCR_S12", /* 5 */ + "R_C6000_PCR_S10", + "R_C6000_PCR_S7", + "R_C6000_ABS_S16", + "R_C6000_ABS_L16", + "R_C6000_ABS_H16", /* 10 */ + "R_C6000_SBR_U15_B", + "R_C6000_SBR_U15_H", + "R_C6000_SBR_U15_W", + "R_C6000_SBR_S16", + "R_C6000_SBR_L16_B", /* 15 */ + "R_C6000_SBR_L16_H", + "R_C6000_SBR_L16_W", + "R_C6000_SBR_H16_B", + "R_C6000_SBR_H16_H", + "R_C6000_SBR_H16_W", /* 20 */ + "R_C6000_SBR_GOT_U15_W", + "R_C6000_SBR_GOT_L16_W", + "R_C6000_SBR_GOT_H16_W", + "R_C6000_DSBT_INDEX", + "R_C6000_PREL31", /* 25 */ + "R_C6000_COPY", + "R_C6000_JUMP_SLOT", + "R_C6000_SBR_GOT32", + "R_C6000_PCR_H16", + "R_C6000_PCR_L16", /* 30 */ +#if 0 + "R_C6000_ALIGN", /* 253 */ + "R_C6000_FPHEAD", /* 254 */ + "R_C6000_NOCMP", /* 255 */ +#endif +}; diff --git a/ldso/ldso/c6x/dl-inlines.h b/ldso/ldso/c6x/dl-inlines.h new file mode 100644 index 000000000..62e1cc9ca --- /dev/null +++ b/ldso/ldso/c6x/dl-inlines.h @@ -0,0 +1,120 @@ +/* Copyright (C) 2010 Texas Instruments Incorporated + * Contributed by Mark Salter <msalter@redhat.com> + * + * Borrowed heavily from frv arch: + * Copyright (C) 2003, 2004 Red Hat, Inc. + * + * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball. + */ + +/* Figure out whether the given address is in one of the mapped + segments. */ +static __always_inline int +__dl_addr_in_loadaddr (void *p, struct elf32_dsbt_loadaddr loadaddr) +{ + struct elf32_dsbt_loadmap *map = loadaddr.map; + int c; + + for (c = 0; c < map->nsegs; c++) + if ((void*)map->segs[c].addr <= p + && (char*)p < (char*)map->segs[c].addr + map->segs[c].p_memsz) + return 1; + + return 0; +} + +/* Figure out how many LOAD segments there are in the given headers, + and allocate a block for the load map big enough for them. + got_value will be properly initialized later on, with INIT_GOT. */ +static __always_inline int +__dl_init_loadaddr (struct elf32_dsbt_loadaddr *loadaddr, Elf32_Phdr *ppnt, + int pcnt) +{ + int count = 0, i; + size_t size; + + for (i = 0; i < pcnt; i++) + if (ppnt[i].p_type == PT_LOAD) + count++; + + size = sizeof (struct elf32_dsbt_loadmap) + + sizeof (struct elf32_dsbt_loadseg) * count; + loadaddr->map = _dl_malloc (size); + if (! loadaddr->map) + _dl_exit (-1); + + loadaddr->map->version = 0; + loadaddr->map->nsegs = 0; + + return count; +} + +/* Incrementally initialize a load map. */ +static __always_inline void +__dl_init_loadaddr_hdr (struct elf32_dsbt_loadaddr loadaddr, void *addr, + Elf32_Phdr *phdr, int maxsegs) +{ + struct elf32_dsbt_loadseg *segdata; + + if (loadaddr.map->nsegs == maxsegs) + _dl_exit (-1); + + segdata = &loadaddr.map->segs[loadaddr.map->nsegs++]; + segdata->addr = (Elf32_Addr) addr; + segdata->p_vaddr = phdr->p_vaddr; + segdata->p_memsz = phdr->p_memsz; + +#if defined (__SUPPORT_LD_DEBUG__) + { + if (_dl_debug) + _dl_dprintf(_dl_debug_file, "%i: mapped %x at %x, size %x\n", + loadaddr.map->nsegs-1, + segdata->p_vaddr, segdata->addr, segdata->p_memsz); + } +#endif +} + +/* Replace an existing entry in the load map. */ +static __always_inline void +__dl_update_loadaddr_hdr (struct elf32_dsbt_loadaddr loadaddr, void *addr, + Elf32_Phdr *phdr) +{ + struct elf32_dsbt_loadseg *segdata; + void *oldaddr; + int i; + + for (i = 0; i < loadaddr.map->nsegs; i++) + if (loadaddr.map->segs[i].p_vaddr == phdr->p_vaddr + && loadaddr.map->segs[i].p_memsz == phdr->p_memsz) + break; + if (i == loadaddr.map->nsegs) + _dl_exit (-1); + + segdata = loadaddr.map->segs + i; + oldaddr = (void *)segdata->addr; + _dl_munmap (oldaddr, segdata->p_memsz); + segdata->addr = (Elf32_Addr) addr; + +#if defined (__SUPPORT_LD_DEBUG__) + if (_dl_debug) + _dl_dprintf(_dl_debug_file, "%i: changed mapping %x at %x (old %x), size %x\n", + loadaddr.map->nsegs-1, + segdata->p_vaddr, segdata->addr, oldaddr, segdata->p_memsz); +#endif +} + +static __always_inline void +__dl_loadaddr_unmap (struct elf32_dsbt_loadaddr loadaddr) +{ + int i; + + for (i = 0; i < loadaddr.map->nsegs; i++) + _dl_munmap ((void*)loadaddr.map->segs[i].addr, + loadaddr.map->segs[i].p_memsz); + + /* _dl_unmap is only called for dlopen()ed libraries, for which + calling free() is safe, or before we've completed the initial + relocation, in which case calling free() is probably pointless, + but still safe. */ + _dl_free (loadaddr.map); +} diff --git a/ldso/ldso/c6x/dl-startup.h b/ldso/ldso/c6x/dl-startup.h new file mode 100644 index 000000000..c83e33cb3 --- /dev/null +++ b/ldso/ldso/c6x/dl-startup.h @@ -0,0 +1,189 @@ +/* Copyright (C) 2010 Texas Instruments Incorporated + * Contributed by Mark Salter <msalter@redhat.com> + * + * Borrowed heavily from frv arch: + * Copyright (C) 2003 Red Hat, Inc. + * + * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball. + */ +#undef DL_START +#define DL_START(X) \ +static void * __attribute_used__ \ +_dl_start (unsigned placeholder, \ + struct elf32_dsbt_loadmap *dl_boot_progmap, \ + struct elf32_dsbt_loadmap *dl_boot_ldsomap, \ + Elf32_Dyn *dl_boot_ldso_dyn_pointer, \ + X) + +/* + * On entry, the kernel has set up the stack thusly: + * + * 0(sp) pad0 + * 4(sp) pad1 + * 8(sp) argc + * 12(sp) argv[0] + * ... + * (4*(argc+3))(sp) NULL + * (4*(argc+4))(sp) envp[0] + * ... + * NULL + * + * Register values are unspecified, except: + * + * B4 --> executable loadmap address + * A6 --> interpreter loadmap address + * B6 --> dynamic section address + * + * NB: DSBT index is always 0 for the executable + * and 1 for the interpreter + */ + +__asm__(" .text\n" + ".globl _start\n" + ".hidden _start\n" + "_start:\n" + /* Find interpreter DSBT base in dynamic section */ + " MV .S2 B6,B2\n" + " || ADD .D1X B6,4,A2\n" + " LDW .D2T2 *B2++[2],B0\n" + " || LDW .D1T1 *A2++[2],A0\n" + " MVKL .S2 " __stringify(DT_C6000_DSBT_BASE) ",B7\n" + " MVKH .S2 " __stringify(DT_C6000_DSBT_BASE) ",B7\n" + " NOP\n" + " NOP\n" + /* + * B0 now holds dynamic tag and A0 holds tag value. + * Loop through looking for DSBT base tag + */ + "0:\n" + " [B0] CMPEQ .L2 B0,B7,B1\n" + " || [!B0] MVK .S2 1,B1\n" + " [!B1] BNOP .S1 0b,5\n" + " ||[!B1] LDW .D2T2 *B2++[2],B0\n" + " ||[!B1] LDW .D1T1 *A2++[2],A0\n" + /* + * DSBT base in A0 needs to be relocated. + * Search through our loadmap to find where it got loaded. + * + * struct elf32_dsbt_loadmap { + * Elf32_Half version; + * Elf32_Half nsegs; + * struct { + * Elf32_Addr addr; + * Elf32_Addr p_vaddr; + * Elf32_Word p_memsz; + * } segments[]; + * } + * + */ + " MV .S1 A6,A1\n" + " [!A1] MV .S1X B4,A1\n" + " ADD .D1 A1,2,A3\n" + " LDHU .D1T2 *A3++[1],B0\n" /* nsegs */ + " LDW .D1T1 *A3++[1],A10\n" /* addr */ + " LDW .D1T1 *A3++[1],A11\n" /* p_vaddr */ + " LDW .D1T1 *A3++[1],A12\n" /* p_memsz */ + " NOP\n" + " NOP\n" + /* + * Here we have: + * B0 -> number of segments to search. + * A3 -> pointer to next segment to check + * A10 -> segment load address + * A11 -> ELF segment virt address + * A12 -> ELF segment size + */ + "0:\n" + " [!B0] B .S2 0f\n" + " SUB .D2 B0,1,B0\n" + " CMPLTU .L1 A0,A11,A13\n" + " || SUB .S1 A12,1,A12\n" + " ADD .D1 A11,A12,A12\n" + " CMPGTU .L1 A0,A12,A14\n" + " OR .L1 A13,A14,A2\n" + " [A2] B .S2 0b\n" + " || [!A2] SUB .L1 A0,A11,A0\n" + " [B0] LDW .D1T1 *A3++[1],A10\n" /* addr */ + " || [!A2] ADD .L1 A0,A10,A0\n" + " [B0] LDW .D1T1 *A3++[1],A11\n" /* p_vaddr */ + " [B0] LDW .D1T1 *A3++[1],A12\n" /* p_memsz */ + " MV .S2X A0,B14\n" + " NOP\n" + "0:\n" + " B .S2 _dl_start\n" + " STW .D2T2 B14, *+B14[1]\n" + " ADD .D1X B15,8,A8\n" + " ADDKPC .S2 ret_from_dl,B3,2\n" + "ret_from_dl:\n" + " B .S2X A4\n" + " || LDW .D2T2 *+B14[0],B14\n" + " ADDKPC .S2 __dl_fini,B0,0\n" + " MV .S1X B0,A4\n" + " NOP\n" + " NOP\n" + " NOP\n" + "__dl_fini:\n" + " LDW .D2T2 *+B14[1],B14\n" + " NOP 4\n" + " LDW .D2T1 *+B14($GOT(_dl_fini)), A0\n" + " NOP 4\n" + " BNOP .S2X A0, 5\n"); + +__asm__(" .text\n" + "__c6x_cache_sync:\n" + " MVK .S2 330,B0\n" + " SWE\n" + " NOP\n" + " BNOP .S2 B3,5\n" + " NOP\n" + " NOP\n" + " NOP\n" + " NOP\n" + "\n" +); + +/* + * Get a pointer to the argv array. On many platforms this can be just + * the address of the first argument, on other platforms we need to + * do something a little more subtle here. + */ +#define GET_ARGV(ARGVP, ARGS) ARGVP = (((unsigned long*) ARGS) + 1) + +struct elf32_dsbt_loadmap; + +/* + * Here is a macro to perform a relocation. This is only used when + * bootstrapping the dynamic loader. RELP is the relocation that we + * are performing, REL is the pointer to the address we are relocating. + * SYMBOL is the symbol involved in the relocation, and LOAD is the + * load address. + */ +#define PERFORM_BOOTSTRAP_RELOC(RELP,REL,SYMBOL,LOAD,SYMTAB) \ + switch(ELF_R_TYPE((RELP)->r_info)){ \ + case R_C6000_ABS_L16: \ + { \ + unsigned int opcode = *(REL); \ + unsigned int v = (SYMBOL) + (RELP)->r_addend; \ + opcode &= ~0x7fff80; \ + opcode |= ((v & 0xffff) << 7); \ + *(REL) = opcode; \ + } \ + break; \ + case R_C6000_ABS_H16: \ + { \ + unsigned int opcode = *(REL); \ + unsigned int v = (SYMBOL) + (RELP)->r_addend; \ + opcode &= ~0x7fff80; \ + opcode |= ((v >> 9) & 0x7fff80); \ + *(REL) = opcode; \ + } \ + break; \ + case R_C6000_ABS32: \ + *(REL) = (SYMBOL) + (RELP)->r_addend; \ + break; \ + default: \ + _dl_exit(1); \ + } + +extern void __c6x_cache_sync(unsigned long start, unsigned long end) + attribute_hidden; diff --git a/ldso/ldso/c6x/dl-syscalls.h b/ldso/ldso/c6x/dl-syscalls.h new file mode 100644 index 000000000..f40c4fd31 --- /dev/null +++ b/ldso/ldso/c6x/dl-syscalls.h @@ -0,0 +1 @@ +/* stub for arch-specific syscall issues */ diff --git a/ldso/ldso/c6x/dl-sysdep.h b/ldso/ldso/c6x/dl-sysdep.h new file mode 100644 index 000000000..c2e91d2f0 --- /dev/null +++ b/ldso/ldso/c6x/dl-sysdep.h @@ -0,0 +1,230 @@ +/* Copyright (C) 2010 Texas Instruments Incorporated + * Contributed by Mark Salter <msalter@redhat.com> + * + * Borrowed heavily from frv arch: + * Copyright (C) 2003, 2004 Red Hat, Inc. + * + * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball. + */ + +#include <bits/elf-dsbt.h> + +/* + * Define this if the system uses RELOCA. + */ +#define ELF_USES_RELOCA 1 + +/* JMPREL relocs are inside the DT_RELA table. */ +/* Actually looks like a linker bug sets DT_JMPREL anyway */ +#define ELF_MACHINE_PLTREL_OVERLAP 1 + +#undef DL_NO_COPY_RELOCS + +#define HAVE_DL_INLINES_H + + +/* + * Various assembly language/system dependent hacks that are required + * so that we can minimize the amount of platform specific code. + */ + +/* Initialization sequence for the GOT. */ +#define INIT_GOT(GOT_BASE,MODULE) \ +{ \ + GOT_BASE[0] = (unsigned long) _dl_linux_resolve; \ + GOT_BASE[1] = (unsigned long) MODULE; \ +} + +/* Here we define the magic numbers that this dynamic loader should accept */ +#define MAGIC1 EM_TI_C6000 +#undef MAGIC2 + +/* Used for error messages */ +#define ELF_TARGET "C6000" + +/* Need bootstrap relocations */ +#define ARCH_NEEDS_BOOTSTRAP_RELOCS + +struct elf_resolve; + +extern int _dl_linux_resolve(void) attribute_hidden; + +struct funcdesc_ht; +struct elf32_dsbt_loadaddr; + +/* Current toolchains access constant strings via unrelocated GOT + entries. Fortunately, we have enough in place to just call the + relocation function early on. */ +#undef SEND_EARLY_STDERR +#define SEND_EARLY_STDERR(S) \ + do { char *__p = __reloc_pointer((S), dl_boot_ldsomap?:dl_boot_progmap);\ + SEND_STDERR (__p); } while (0) + +#define DL_LOADADDR_TYPE struct elf32_dsbt_loadaddr + +#define DL_RELOC_ADDR(LOADADDR, ADDR) \ + ((ElfW(Addr))__reloc_pointer ((void*)(ADDR), (LOADADDR).map)) + +#define DL_INIT_LOADADDR_BOOT(LOADADDR, BASEADDR) \ + do { \ + struct elf32_dsbt_loadmap *map; \ + map = dl_boot_ldsomap ?: dl_boot_progmap; \ + if (map->version != 0) { \ + SEND_EARLY_STDERR ("Invalid loadmap version number\n"); \ + _dl_exit(-1); \ + } \ + if (map->nsegs < 2) { \ + SEND_EARLY_STDERR ("Invalid segment count in loadmap\n"); \ + _dl_exit(-1); \ + } \ + (LOADADDR).map = map; \ + } while(0) + +#define DL_INIT_LOADADDR_PROG(LOADADDR, BASEADDR) \ + do { \ + if (dl_boot_progmap->version != 0) { \ + SEND_EARLY_STDERR ("Invalid loadmap version number\n"); \ + _dl_exit(-1); \ + } \ + if (dl_boot_progmap->nsegs < 2) { \ + SEND_EARLY_STDERR ("Invalid segment count in loadmap\n"); \ + _dl_exit(-1); \ + } \ + (LOADADDR).map = dl_boot_progmap; \ + } while(0) + +#define DL_INIT_LOADADDR_EXTRA_DECLS \ + int dl_init_loadaddr_load_count; + +#define DL_INIT_LOADADDR(LOADADDR, BASEADDR, PHDR, PHDRCNT) \ + (dl_init_loadaddr_load_count = \ + __dl_init_loadaddr (&(LOADADDR), (PHDR), (PHDRCNT))) + +#define DL_INIT_LOADADDR_HDR(LOADADDR, ADDR, PHDR) \ + (__dl_init_loadaddr_hdr ((LOADADDR), (ADDR), (PHDR), \ + dl_init_loadaddr_load_count)) + +#define DL_UPDATE_LOADADDR_HDR(LOADADDR, ADDR, PHDR) \ + (__dl_update_loadaddr_hdr ((LOADADDR), (ADDR), (PHDR))) + +#define DL_LOADADDR_UNMAP(LOADADDR, LEN) \ + (__dl_loadaddr_unmap ((LOADADDR))) + +#define DL_LIB_UNMAP(LIB, LEN) \ + (__dl_loadaddr_unmap ((LIB)->loadaddr)) + +#define DL_LOADADDR_BASE(LOADADDR) \ + ((LOADADDR).map) + +#define DL_ADDR_IN_LOADADDR(ADDR, TPNT, TFROM) \ + (! (TFROM) && __dl_addr_in_loadaddr ((void*)(ADDR), (TPNT)->loadaddr)) + + +/* We only support loading DSBT relocatable shared libraries. + It probably wouldn't be too hard to support loading statically + linked executables that require relocation.*/ +#define DL_CHECK_LIB_TYPE(epnt, piclib, _dl_progname, libname) \ +do \ +{ \ + (piclib) = 2; \ +} \ +while (0) + +/* We want want to apply all relocations in the interpreter during + bootstrap. Because of this, we have to skip the interpreter + relocations in _dl_parse_relocation_information(), see + elfinterp.c. */ +#define DL_SKIP_BOOTSTRAP_RELOC(SYMTAB, INDEX, STRTAB) 0 + +#ifdef __NR_pread64 +#define _DL_PREAD(FD, BUF, SIZE, OFFSET) \ + (_dl_pread((FD), (BUF), (SIZE), (OFFSET))) +#endif + +#define DL_GET_READY_TO_RUN_EXTRA_PARMS \ + , struct elf32_dsbt_loadmap *dl_boot_progmap \ + , struct elf32_dsbt_loadmap *dl_boot_ldsomap +#define DL_GET_READY_TO_RUN_EXTRA_ARGS \ + , dl_boot_progmap \ + , dl_boot_ldsomap + + +/* + * C6X doesn't really need the GOT here. + * The GOT is placed just past the DSBT table, so we could find it by + * using the DSBT register + table size found in the dynamic section. + * + * do { \ + * unsigned long *ldso_dsbt; \ + * ElfW(Dyn) *d = dl_boot_ldso_dyn_pointer; \ + * while (d->d_tag != DT_NULL) { \ + * if (d->d_tag == DT_C6000_DSBT_SIZE) { \ + * __asm__ (" MV .S2 B14,%0\n" \ + * : "=b" (ldso_dsbt)); \ + * (GOT) = ldso_dsbt + d->d_un.d_val; \ + * break; \ + * } \ + * d++; \ + * } \ + * } while(0) + * + * Instead, just point it to the DSBT table to avoid unused variable warning. + */ +#define DL_BOOT_COMPUTE_GOT(GOT) \ + __asm__ (" MV .S2 B14,%0\n" : "=b" (GOT)) + +#define DL_BOOT_COMPUTE_DYN(dpnt, got, load_addr) \ + ((dpnt) = dl_boot_ldso_dyn_pointer) + +/* Define this to declare the library offset. */ +#define DL_DEF_LIB_OFFSET + +/* Define this to get the library offset. */ +#define DL_GET_LIB_OFFSET() 0 + +/* Define this to set the library offset. */ +#define DL_SET_LIB_OFFSET(offset) + +/* Define this to get the real object's runtime address. */ +#define DL_GET_RUN_ADDR(loadaddr, mapaddr) (loadaddr) + +#ifdef __USE_GNU +# include <link.h> +#else +# define __USE_GNU +# include <link.h> +# undef __USE_GNU +#endif + +/* we need this for __LDSO_STANDALONE_SUPPORT__ */ +#define elf_machine_load_address() \ + (dl_boot_ldsomap ?: dl_boot_progmap)->segs[0].addr + +static __always_inline void +elf_machine_relative (DL_LOADADDR_TYPE load_off, const Elf32_Addr rel_addr, + Elf32_Word relative_count) +{ +} + +/* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry, so + PLT entries should not be allowed to define the value. + ELF_RTYPE_CLASS_NOCOPY iff TYPE should not be allowed to resolve to one + of the main executable's symbols, as for a COPY reloc. */ +#define elf_machine_type_class(type) \ + ((((type) == R_C6000_JUMP_SLOT) * ELF_RTYPE_CLASS_PLT) \ + | (((type) == R_C6000_COPY) * ELF_RTYPE_CLASS_COPY)) + +#define ARCH_NUM 3 +#define DT_DSBT_BASE_IDX (DT_NUM + OS_NUM) +#define DT_DSBT_SIZE_IDX (DT_NUM + OS_NUM + 1) +#define DT_DSBT_INDEX_IDX (DT_NUM + OS_NUM + 2) + +#define ARCH_DYNAMIC_INFO(dpnt, dynamic, debug_addr) \ +do { \ +if (dpnt->d_tag == DT_C6000_DSBT_BASE) \ + dynamic[DT_DSBT_BASE_IDX] = dpnt->d_un.d_val; \ +else if (dpnt->d_tag == DT_C6000_DSBT_SIZE) \ + dynamic[DT_DSBT_SIZE_IDX] = dpnt->d_un.d_val; \ +else if (dpnt->d_tag == DT_C6000_DSBT_INDEX) \ + dynamic[DT_DSBT_INDEX_IDX] = dpnt->d_un.d_val; \ +} while (0) diff --git a/ldso/ldso/c6x/elfinterp.c b/ldso/ldso/c6x/elfinterp.c new file mode 100644 index 000000000..f0e05b9d0 --- /dev/null +++ b/ldso/ldso/c6x/elfinterp.c @@ -0,0 +1,304 @@ +/* TI C64X DSBT ELF shared library loader suppport + * Copyright (C) 2010 Texas Instruments Incorporated + * Contributed by Mark Salter <msalter@redhat.com> + * + * Borrowed heavily from frv arch: + * Copyright (C) 2003, 2004 Red Hat, Inc. + * Contributed by Alexandre Oliva <aoliva@redhat.com> + * Lots of code copied from ../i386/elfinterp.c, so: + * Copyright (c) 1994-2000 Eric Youngdale, Peter MacDonald, + * David Engel, Hongjiu Lu and Mitch D'Souza + * Copyright (C) 2001-2002, Erik Andersen + * All rights reserved. + * + * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball. + */ + +#include <features.h> + +/* Program to load an ELF binary on a linux system, and run it. + References to symbols in sharable libraries can be resolved by either + an ELF sharable library or a linux style of shared library. */ + +/* Disclaimer: I have never seen any AT&T source code for SVr4, nor have + I ever taken any courses on internals. This program was developed using + information available through the book "UNIX SYSTEM V RELEASE 4, + Programmers guide: Ansi C and Programming Support Tools", which did + a more than adequate job of explaining everything required to get this + working. */ + +extern void __c6x_cache_sync(unsigned long start, unsigned long end) + attribute_hidden; + +static void +_dl_c6x_flush_relocs(struct elf32_dsbt_loadmap *map) +{ + unsigned long s, e; + s = map->segs[0].addr; + e = s + map->segs[0].p_memsz; + __c6x_cache_sync(s, e); + s = map->segs[1].addr; + e = s + map->segs[1].p_memsz; + __c6x_cache_sync(s, e); +} + + +attribute_hidden +char * +_dl_linux_resolver (struct elf_resolve *tpnt, int reloc_entry) +{ + ELF_RELOC *this_reloc; + char *strtab; + ElfW(Sym) *symtab; + int symtab_index; + char *rel_addr; + char *new_addr; + char **got_addr; + char *symname; + + rel_addr = (char *)tpnt->dynamic_info[DT_JMPREL]; + + this_reloc = (ELF_RELOC *)(intptr_t)(rel_addr + reloc_entry); + symtab_index = ELF_R_SYM(this_reloc->r_info); + + symtab = (ElfW(Sym) *) tpnt->dynamic_info[DT_SYMTAB]; + strtab = (char *) tpnt->dynamic_info[DT_STRTAB]; + symname = strtab + symtab[symtab_index].st_name; + + /* Address of GOT entry fix up */ + got_addr = (char **) DL_RELOC_ADDR(tpnt->loadaddr, this_reloc->r_offset); + + /* Get the address to be used to fill in the GOT entry. */ + new_addr = _dl_find_hash(symname, &_dl_loaded_modules->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT, NULL); + if (unlikely(!new_addr)) { + _dl_dprintf(2, "%s: can't resolve symbol '%s' in lib '%s'.\n", _dl_progname, symname, tpnt->libname); + _dl_exit(1); + } + +#if defined (__SUPPORT_LD_DEBUG__) + if (_dl_debug_bindings) { + _dl_dprintf(_dl_debug_file, "\nresolve function: %s", symname); + if (_dl_debug_detail) + _dl_dprintf(_dl_debug_file, + "\n\tpatched %x ==> %x @ %x\n", + *got_addr, new_addr, got_addr); + } + if (!_dl_debug_nofixups) { + *got_addr = new_addr; + } +#else + *got_addr = new_addr; +#endif + + return new_addr; +} + +static int +_dl_parse(struct elf_resolve *tpnt, struct r_scope_elem *scope, + unsigned long rel_addr, unsigned long rel_size, + int (*reloc_fnc) (struct elf_resolve *tpnt, struct r_scope_elem *scope, + ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab)) +{ + unsigned int i; + char *strtab; + ElfW(Sym) *symtab; + ELF_RELOC *rpnt; + int symtab_index; + + /* Now parse the relocation information */ + rpnt = (ELF_RELOC *)rel_addr; + rel_size = rel_size / sizeof(ELF_RELOC); + + symtab = (ElfW(Sym) *)tpnt->dynamic_info[DT_SYMTAB]; + strtab = (char *)tpnt->dynamic_info[DT_STRTAB]; + + for (i = 0; i < rel_size; i++, rpnt++) { + int res; + + symtab_index = ELF_R_SYM(rpnt->r_info); + debug_sym(symtab,strtab,symtab_index); + debug_reloc(symtab,strtab,rpnt); + + res = reloc_fnc (tpnt, scope, rpnt, symtab, strtab); + + if (res==0) continue; + + _dl_dprintf(2, "\n%s: ",_dl_progname); + + if (symtab_index) + _dl_dprintf(2, "symbol '%s': ", strtab + symtab[symtab_index].st_name); + + if (res <0) { + int reloc_type = ELF_R_TYPE(rpnt->r_info); +#if defined (__SUPPORT_LD_DEBUG__) + _dl_dprintf(2, "can't handle reloc type %s\n ", _dl_reltypes(reloc_type)); +#else + _dl_dprintf(2, "can't handle reloc type %x\n", reloc_type); +#endif + _dl_exit(-res); + } else if (res >0) { + _dl_dprintf(2, "can't resolve symbol\n"); + return res; + } + } + _dl_c6x_flush_relocs(tpnt->loadaddr.map); + return 0; +} + +static int +_dl_do_reloc (struct elf_resolve *tpnt,struct r_scope_elem *scope, + ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab) +{ + int reloc_type; + int symtab_index; + char *symname; + unsigned long *reloc_addr; + unsigned long symbol_addr, sym_val; + long reloc_addend; + unsigned long old_val, new_val = 0; + struct symbol_ref sym_ref; + struct elf_resolve *symbol_tpnt; + + reloc_addr = (unsigned long *)(intptr_t) + DL_RELOC_ADDR (tpnt->loadaddr, rpnt->r_offset); + + reloc_type = ELF_R_TYPE(rpnt->r_info); + reloc_addend = rpnt->r_addend; + symtab_index = ELF_R_SYM(rpnt->r_info); + symbol_addr = 0; + symname = strtab + symtab[symtab_index].st_name; + sym_ref.sym = &symtab[symtab_index]; + sym_ref.tpnt = NULL; + + if (ELF_ST_BIND (symtab[symtab_index].st_info) == STB_LOCAL) { + symbol_addr = (unsigned long) + DL_RELOC_ADDR (tpnt->loadaddr, symtab[symtab_index].st_value); + symbol_tpnt = tpnt; + } else { + symbol_addr = (unsigned long) _dl_find_hash(symname, + scope, NULL, elf_machine_type_class(reloc_type), + &sym_ref); + /* + * We want to allow undefined references to weak symbols - this might + * have been intentional. We should not be linking local symbols + * here, so all bases should be covered. + */ + + if (!symbol_addr && ELF_ST_BIND(symtab[symtab_index].st_info) != STB_WEAK) { + _dl_dprintf (2, "%s: can't resolve symbol '%s'\n", + _dl_progname, strtab + symtab[symtab_index].st_name); + _dl_exit (1); + } + symbol_tpnt = sym_ref.tpnt; + } + old_val = *reloc_addr; + sym_val = symbol_addr + reloc_addend; + + switch (reloc_type) { + case R_C6000_NONE: + break; + case R_C6000_ABS32: + case R_C6000_JUMP_SLOT: + new_val = sym_val; + *reloc_addr = sym_val; + break; + case R_C6000_DSBT_INDEX: + new_val = (old_val & ~0x007fff00) | ((symbol_tpnt->dsbt_index & 0x7fff) << 8); + *reloc_addr = new_val; + break; + case R_C6000_ABS_L16: + new_val = (old_val & ~0x007fff80) | ((sym_val & 0xffff) << 7); + *reloc_addr = new_val; + break; + case R_C6000_ABS_H16: + new_val = (old_val & ~0x007fff80) | ((sym_val >> 9) & 0x007fff80); + *reloc_addr = new_val; + break; + case R_C6000_PCR_S21: + new_val = sym_val - (((unsigned long)reloc_addr) & ~31); + *reloc_addr = (old_val & ~0x0fffff80) | (((new_val >> 2) & 0x1fffff) << 7); + break; + case R_C6000_COPY: + if (symbol_addr) { +#if defined (__SUPPORT_LD_DEBUG__) + if (_dl_debug_move) + _dl_dprintf(_dl_debug_file, + "\n%s move %d bytes from %x to %x", + symname, symtab[symtab_index].st_size, + symbol_addr, reloc_addr); +#endif + + _dl_memcpy((char *)reloc_addr, + (char *)symbol_addr, + symtab[symtab_index].st_size); + } + return 0; + default: + return -1; /*call _dl_exit(1) */ + } +#if defined (__SUPPORT_LD_DEBUG__) + if (_dl_debug_reloc && _dl_debug_detail && reloc_type != R_C6000_NONE) { + _dl_dprintf(_dl_debug_file, "\tpatched: %x ==> %x @ %x\n", old_val, new_val, reloc_addr); + } +#endif + return 0; +} + +static int +_dl_do_lazy_reloc (struct elf_resolve *tpnt, + struct r_scope_elem *scope attribute_unused, + ELF_RELOC *rpnt, ElfW(Sym) *symtab attribute_unused, + char *strtab attribute_unused) +{ + int reloc_type; + unsigned long *reloc_addr; + unsigned long old_val; + + reloc_addr = (unsigned long *) DL_RELOC_ADDR(tpnt->loadaddr, rpnt->r_offset); + reloc_type = ELF_R_TYPE(rpnt->r_info); + + old_val = *reloc_addr; + + switch (reloc_type) { + case R_C6000_NONE: + break; + case R_C6000_JUMP_SLOT: + *reloc_addr = DL_RELOC_ADDR(tpnt->loadaddr, old_val); + break; + default: + return -1; + } + +#if defined (__SUPPORT_LD_DEBUG__) + if (_dl_debug_reloc && _dl_debug_detail) + _dl_dprintf(_dl_debug_file, "\n\tpatched: %x ==> %x @ %x\n", + old_val, *reloc_addr, reloc_addr); +#endif + + return 0; +} + +void +_dl_parse_lazy_relocation_information +(struct dyn_elf *rpnt, unsigned long rel_addr, unsigned long rel_size) +{ + _dl_parse(rpnt->dyn, NULL, rel_addr, rel_size, _dl_do_lazy_reloc); +} + +int +_dl_parse_relocation_information +(struct dyn_elf *rpnt, struct r_scope_elem *scope, unsigned long rel_addr, unsigned long rel_size) +{ + return _dl_parse(rpnt->dyn, scope, rel_addr, rel_size, _dl_do_reloc); +} + +/* We don't have copy relocs. */ +int +_dl_parse_copy_information +(struct dyn_elf *rpnt, + unsigned long rel_addr, + unsigned long rel_size) +{ + return 0; +} + diff --git a/ldso/ldso/c6x/resolve.S b/ldso/ldso/c6x/resolve.S new file mode 100644 index 000000000..ce3cbe793 --- /dev/null +++ b/ldso/ldso/c6x/resolve.S @@ -0,0 +1,68 @@ +;; +;; Copyright (C) 2010 Texas Instruments Incorporated +;; Mark Salter <msalter@redhat.com> +;; +;; Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball. +;; + +;; The function below is tail-called by resolver stubs when a +;; lazily-bound function is called. It must preserve all +;; registers that could be used to pass arguments to the actual +;; function. + +;; _dl_linux_resolver() figures out where the jump symbol is +;; _really_ supposed to have jumped to and returns that to us. +;; Once we have that, we prepare to tail-call the actual +;; function, clean up after ourselves, restoring the original +;; arguments, then jump to the fixed up address. */ + +; resolver stub - called from PLT to resolve target address and update GOT +; +; B0 : reloc offset (bytes from DT_RELPLT) +; B1 : module pointer, loaded from GOT[1] +; DP : caller's DP +; A4,B4, etc: callee's arguments +; B3 : return address + + .text + .align 5 + .global _dl_linux_resolve +_dl_linux_resolve: + stw .d2t2 B14, *B15--[2] + stdw .d2t1 A15:A14, *B15-- + stdw .d2t2 B13:B12, *B15-- + stdw .d2t1 A13:A12, *B15-- + stdw .d2t2 B11:B10, *B15-- + stdw .d2t1 A11:A10, *B15-- + stdw .d2t2 B9:B8, *B15-- + stdw .d2t1 A9:A8, *B15-- + stdw .d2t2 B7:B6, *B15-- + stdw .d2t1 A7:A6, *B15-- + stdw .d2t2 B5:B4, *B15-- + stdw .d2t1 A5:A4, *B15-- + stdw .d2t2 B3:B2, *B15-- + stdw .d2t1 A3:A2, *B15-- + + ; call lookup routine + MV .S1X B1, A4 ; arg 1: module id +|| MV .S2 B0,B4 ; arg 2: reloc offset + CALLP .S2 _dl_linux_resolver, B3 ; returns &f in A4 + MV .S2X A4,B0 ; &f + + lddw .d2t1 *++B15, A3:A2 + lddw .d2t2 *++B15, B3:B2 + lddw .d2t1 *++B15, A5:A4 + lddw .d2t2 *++B15, B5:B4 + lddw .d2t1 *++B15, A7:A6 + lddw .d2t2 *++B15, B7:B6 + lddw .d2t1 *++B15, A9:A8 + lddw .d2t2 *++B15, B9:B8 + lddw .d2t1 *++B15, A11:A10 + lddw .d2t2 *++B15, B11:B10 + lddw .d2t1 *++B15, A13:A12 + lddw .d2t2 *++B15, B13:B12 + lddw .d2t1 *++B15, A15:A14 + ldw .d2t2 *++B15[2], B14 + + B .S2 B0 ; tail-call f + NOP 5 diff --git a/ldso/ldso/cris/dl-debug.h b/ldso/ldso/cris/dl-debug.h index f6c03d21f..dcd23edb5 100644 --- a/ldso/ldso/cris/dl-debug.h +++ b/ldso/ldso/cris/dl-debug.h @@ -33,7 +33,7 @@ * SUCH DAMAGE. */ -static const char *_dl_reltypes_tab[] = { +static const char * const _dl_reltypes_tab[] = { [0] "R_CRIS_NONE", "R_CRIS_8", "R_CRIS_16", "R_CRIS_32", [4] "R_CRIS_8_PCREL", "R_CRIS_16_PCREL", "R_CRIS_32_PCREL", "R_CRIS_GNU_VTINHERIT", [8] "R_CRIS_GNU_VTENTRY", "R_CRIS_COPY", "R_CRIS_GLOB_DAT", "R_CRIS_JUMP_SLOT", diff --git a/ldso/ldso/cris/dl-startup.h b/ldso/ldso/cris/dl-startup.h index 832c3528b..66580004e 100644 --- a/ldso/ldso/cris/dl-startup.h +++ b/ldso/ldso/cris/dl-startup.h @@ -10,6 +10,7 @@ __asm__("" \ " .text\n" \ " .globl _start\n" \ " .type _start,@function\n" \ +" .hidden _start\n" \ "_start:\n" \ " move.d $sp,$r10\n" \ " lapc _dl_start,$r9\n" \ @@ -28,6 +29,7 @@ __asm__("" \ " .text\n" \ " .globl _start\n" \ " .type _start,@function\n" \ +" .hidden _start\n" \ "_start:\n" \ " move.d $sp,$r10\n" \ " move.d $pc,$r9\n" \ @@ -43,7 +45,7 @@ __asm__("" \ #endif /* __arch_v32 */ /* Get a pointer to the argv array. On many platforms this can be just - * the address if the first argument, on other platforms we need to + * the address of the first argument, on other platforms we need to * do something a little more subtle here. */ #define GET_ARGV(ARGVP, ARGS) ARGVP = (((unsigned long *) ARGS)+1) @@ -53,11 +55,11 @@ __asm__("" \ /* Handle relocation of the symbols in the dynamic loader. */ -static inline +static __always_inline void PERFORM_BOOTSTRAP_RELOC(ELF_RELOC *rpnt, unsigned long *reloc_addr, unsigned long symbol_addr, unsigned long load_addr, Elf32_Sym *symtab) { - switch (ELF32_R_TYPE(rpnt->r_info)) { + switch (ELF_R_TYPE(rpnt->r_info)) { case R_CRIS_GLOB_DAT: case R_CRIS_JUMP_SLOT: case R_CRIS_32: diff --git a/ldso/ldso/cris/dl-syscalls.h b/ldso/ldso/cris/dl-syscalls.h index 996bb87c6..f40c4fd31 100644 --- a/ldso/ldso/cris/dl-syscalls.h +++ b/ldso/ldso/cris/dl-syscalls.h @@ -1,6 +1 @@ -/* We can't use the real errno in ldso, since it has not yet - * been dynamicly linked in yet. */ -#include "sys/syscall.h" -extern int _dl_errno; -#undef __set_errno -#define __set_errno(X) {(_dl_errno) = (X);} +/* stub for arch-specific syscall issues */ diff --git a/ldso/ldso/cris/dl-sysdep.h b/ldso/ldso/cris/dl-sysdep.h index c68541d1b..e454c10a8 100644 --- a/ldso/ldso/cris/dl-sysdep.h +++ b/ldso/ldso/cris/dl-sysdep.h @@ -15,14 +15,12 @@ #undef MAGIC2 #define ELF_TARGET "CRIS" +/* Need bootstrap relocations */ +#define ARCH_NEEDS_BOOTSTRAP_RELOCS + struct elf_resolve; extern unsigned long _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry); -/* 8192 bytes alignment */ -#define PAGE_ALIGN 0xffffe000 -#define ADDR_ALIGN 0x1fff -#define OFFS_ALIGN 0xffffe000 - /* The union of reloc-type-classes where the reloc TYPE is a member. TYPE is in the class ELF_RTYPE_CLASS_PLT if it can describe a @@ -39,7 +37,7 @@ extern unsigned long _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entr || ((type) == R_CRIS_GLOB_DAT)) * ELF_RTYPE_CLASS_PLT) \ | (((type) == R_CRIS_COPY) * ELF_RTYPE_CLASS_COPY)) -static __inline__ Elf32_Addr +static __always_inline Elf32_Addr elf_machine_dynamic(void) { /* Don't just set this to an asm variable "r0" since that's not logical @@ -61,7 +59,7 @@ elf_machine_dynamic(void) there's some other symbol we could use, that we don't *have* to force a GOT entry for. */ -static __inline__ Elf32_Addr +static __always_inline Elf32_Addr elf_machine_load_address(void) { Elf32_Addr gotaddr_diff; @@ -95,7 +93,7 @@ elf_machine_load_address(void) return gotaddr_diff; } -static __inline__ void +static __always_inline void elf_machine_relative(Elf32_Addr load_off, const Elf32_Addr rel_addr, Elf32_Word relative_count) { diff --git a/ldso/ldso/cris/elfinterp.c b/ldso/ldso/cris/elfinterp.c index 7c71df83a..5ad302559 100644 --- a/ldso/ldso/cris/elfinterp.c +++ b/ldso/ldso/cris/elfinterp.c @@ -41,40 +41,32 @@ extern int _dl_linux_resolve(void); unsigned long _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry) { - int reloc_type; int symtab_index; char *strtab; char *symname; char *new_addr; char *rel_addr; char **got_addr; - Elf32_Sym *symtab; + ElfW(Sym) *symtab; ELF_RELOC *this_reloc; unsigned long instr_addr; rel_addr = (char *)tpnt->dynamic_info[DT_JMPREL]; this_reloc = (ELF_RELOC *)(intptr_t)(rel_addr + reloc_entry); - reloc_type = ELF32_R_TYPE(this_reloc->r_info); - symtab_index = ELF32_R_SYM(this_reloc->r_info); + symtab_index = ELF_R_SYM(this_reloc->r_info); - symtab = (Elf32_Sym *)(intptr_t)tpnt->dynamic_info[DT_SYMTAB]; + symtab = (ElfW(Sym) *)(intptr_t)tpnt->dynamic_info[DT_SYMTAB]; strtab = (char *)tpnt->dynamic_info[DT_STRTAB]; symname = strtab + symtab[symtab_index].st_name; - if (unlikely(reloc_type != R_CRIS_JUMP_SLOT)) { - _dl_dprintf(2, "%s: Incorrect relocation type in jump relocations\n", - _dl_progname); - _dl_exit(1); - } - /* Address of the jump instruction to fix up. */ instr_addr = ((unsigned long)this_reloc->r_offset + (unsigned long)tpnt->loadaddr); got_addr = (char **)instr_addr; /* Get the address of the GOT entry. */ - new_addr = _dl_find_hash(symname, tpnt->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT); + new_addr = _dl_find_hash(symname, &_dl_loaded_modules->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT, NULL); if (unlikely(!new_addr)) { _dl_dprintf(2, "%s: Can't resolve symbol '%s'\n", _dl_progname, symname); _dl_exit(1); @@ -85,7 +77,7 @@ _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry) _dl_dprintf(_dl_debug_file, "\nresolve function: %s", symname); if (_dl_debug_detail) _dl_dprintf(_dl_debug_file, - "\n\tpatched: %x ==> %x @ %x", + "\n\tpatched: %x ==> %x @ %x\n", *got_addr, new_addr, got_addr); } if (!_dl_debug_nofixups) { @@ -99,28 +91,28 @@ _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry) } static int -_dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, +_dl_parse(struct elf_resolve *tpnt, struct r_scope_elem *scope, unsigned long rel_addr, unsigned long rel_size, - int (*reloc_fnc)(struct elf_resolve *tpnt, struct dyn_elf *scope, - ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab)) + int (*reloc_fnc)(struct elf_resolve *tpnt, struct r_scope_elem *scope, + ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab)) { int symtab_index; unsigned int i; char *strtab; - Elf32_Sym *symtab; + ElfW(Sym) *symtab; ELF_RELOC *rpnt; /* Parse the relocation information. */ rpnt = (ELF_RELOC *)(intptr_t)rel_addr; rel_size /= sizeof(ELF_RELOC); - symtab = (Elf32_Sym *)(intptr_t)tpnt->dynamic_info[DT_SYMTAB]; + symtab = (ElfW(Sym) *)(intptr_t)tpnt->dynamic_info[DT_SYMTAB]; strtab = (char *)tpnt->dynamic_info[DT_STRTAB]; for (i = 0; i < rel_size; i++, rpnt++) { int res; - symtab_index = ELF32_R_SYM(rpnt->r_info); + symtab_index = ELF_R_SYM(rpnt->r_info); debug_sym(symtab, strtab, symtab_index); debug_reloc(symtab, strtab, rpnt); @@ -138,7 +130,7 @@ _dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, strtab + symtab[symtab_index].st_name); if (unlikely(res < 0)) { - int reloc_type = ELF32_R_TYPE(rpnt->r_info); + int reloc_type = ELF_R_TYPE(rpnt->r_info); #if defined (__SUPPORT_LD_DEBUG__) _dl_dprintf(2, "can't handle reloc type %s\n", @@ -158,8 +150,8 @@ _dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, } static int -_dl_do_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, - ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab) +_dl_do_reloc(struct elf_resolve *tpnt, struct r_scope_elem *scope, + ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab) { int reloc_type; int symtab_index; @@ -169,28 +161,35 @@ _dl_do_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, #if defined (__SUPPORT_LD_DEBUG__) unsigned long old_val; #endif + struct symbol_ref sym_ref; reloc_addr = (unsigned long *)(intptr_t)(tpnt->loadaddr + (unsigned long)rpnt->r_offset); - reloc_type = ELF32_R_TYPE(rpnt->r_info); - symtab_index = ELF32_R_SYM(rpnt->r_info); + reloc_type = ELF_R_TYPE(rpnt->r_info); + symtab_index = ELF_R_SYM(rpnt->r_info); symbol_addr = 0; + sym_ref.sym = &symtab[symtab_index]; + sym_ref.tpnt = NULL; symname = strtab + symtab[symtab_index].st_name; if (symtab_index) { if (symtab[symtab_index].st_shndx != SHN_UNDEF && - ELF32_ST_BIND(symtab[symtab_index].st_info) == STB_LOCAL) { + ELF_ST_BIND(symtab[symtab_index].st_info) == STB_LOCAL) { symbol_addr = (unsigned long)tpnt->loadaddr; } else { symbol_addr = (unsigned long)_dl_find_hash(symname, scope, tpnt, - elf_machine_type_class(reloc_type)); + elf_machine_type_class(reloc_type), &sym_ref); } - if (unlikely(!symbol_addr && ELF32_ST_BIND(symtab[symtab_index].st_info) != STB_WEAK)) { + if (unlikely(!symbol_addr && ELF_ST_BIND(symtab[symtab_index].st_info) != STB_WEAK)) { _dl_dprintf(2, "%s: can't resolve symbol '%s'\n", _dl_progname, symname); _dl_exit(1); } symbol_addr += rpnt->r_addend; + if (_dl_trace_prelink) { + _dl_debug_lookup (symname, tpnt, &symtab[symtab_index], + &sym_ref, elf_machine_type_class(reloc_type)); + } } #if defined (__SUPPORT_LD_DEBUG__) @@ -227,7 +226,7 @@ _dl_do_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_reloc && _dl_debug_detail) - _dl_dprintf(_dl_debug_file, "\n\tpatched: %x ==> %x @ %x", + _dl_dprintf(_dl_debug_file, "\n\tpatched: %x ==> %x @ %x\n", old_val, *reloc_addr, reloc_addr); #endif @@ -235,8 +234,8 @@ _dl_do_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, } static int -_dl_do_lazy_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, - ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab) +_dl_do_lazy_reloc(struct elf_resolve *tpnt, struct r_scope_elem *scope, + ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab) { int reloc_type; unsigned long *reloc_addr; @@ -250,7 +249,7 @@ _dl_do_lazy_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, (void)strtab; reloc_addr = (unsigned long *)(intptr_t)(tpnt->loadaddr + (unsigned long)rpnt->r_offset); - reloc_type = ELF32_R_TYPE(rpnt->r_info); + reloc_type = ELF_R_TYPE(rpnt->r_info); #if defined (__SUPPORT_LD_DEBUG__) old_val = *reloc_addr; @@ -268,7 +267,7 @@ _dl_do_lazy_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_reloc && _dl_debug_detail) - _dl_dprintf(_dl_debug_file, "\n\tpatched: %x ==> %x @ %x", + _dl_dprintf(_dl_debug_file, "\n\tpatched: %x ==> %x @ %x\n", old_val, *reloc_addr, reloc_addr); #endif @@ -287,8 +286,9 @@ _dl_parse_lazy_relocation_information(struct dyn_elf *rpnt, int _dl_parse_relocation_information(struct dyn_elf *rpnt, + struct r_scope_elem *scope, unsigned long rel_addr, unsigned long rel_size) { - return _dl_parse(rpnt->dyn, rpnt->dyn->symbol_scope, rel_addr, rel_size, _dl_do_reloc); + return _dl_parse(rpnt->dyn, scope, rel_addr, rel_size, _dl_do_reloc); } diff --git a/ldso/ldso/dl-debug.c b/ldso/ldso/dl-debug.c index 7ce8bfbce..88a48933c 100644 --- a/ldso/ldso/dl-debug.c +++ b/ldso/ldso/dl-debug.c @@ -104,3 +104,54 @@ static void debug_reloc(ElfW(Sym) *symtab, char *strtab, ELF_RELOC *rpnt) #define debug_reloc(symtab, strtab, rpnt) #endif /* __SUPPORT_LD_DEBUG__ */ + +#ifdef __LDSO_PRELINK_SUPPORT__ +static void +internal_function +_dl_debug_lookup (const char *undef_name, struct elf_resolve *undef_map, + const ElfW(Sym) *ref, struct symbol_ref *value, int type_class) +{ +#ifdef SHARED + if (_dl_trace_prelink) + { + int conflict = 0; + struct symbol_ref val = { ref, NULL }; + + if ((_dl_trace_prelink_map == NULL + || _dl_trace_prelink_map == _dl_loaded_modules) + && undef_map != _dl_loaded_modules) + { + _dl_find_hash(undef_name, &undef_map->symbol_scope, + undef_map, type_class, &val); + + if (val.sym != value->sym || val.tpnt != value->tpnt) + conflict = 1; + } + + if (unlikely(value->sym && ELF_ST_TYPE(value->sym->st_info) == STT_TLS)) + type_class = 4; + + if (conflict + || _dl_trace_prelink_map == undef_map + || _dl_trace_prelink_map == NULL + || type_class == 4) + { + _dl_dprintf (1, "%s %x %x -> %x %x ", + conflict ? "conflict" : "lookup", + (size_t) undef_map->mapaddr, + (size_t) (((ElfW(Addr)) ref) - undef_map->mapaddr), + (size_t) (value->tpnt ? value->tpnt->mapaddr : 0), + (size_t) (value->sym ? value->sym->st_value : 0)); + if (conflict) + _dl_dprintf (1, "x %x %x ", + (size_t) (val.tpnt ? val.tpnt->mapaddr : 0), + (size_t) (val.sym ? val.sym->st_value : 0)); + _dl_dprintf (1, "/%x %s\n", type_class, undef_name); + } +} +#endif +} + +#else +#define _dl_debug_lookup(undef_name, undef_map, ref, value, type_class) +#endif diff --git a/ldso/ldso/dl-elf.c b/ldso/ldso/dl-elf.c index 6c0e80544..04e8c60a4 100644 --- a/ldso/ldso/dl-elf.c +++ b/ldso/ldso/dl-elf.c @@ -45,14 +45,14 @@ int _dl_map_cache(void) libentry_t *libent; int i, strtabsize; - if (_dl_cache_addr == (caddr_t) - 1) + if (_dl_cache_addr == MAP_FAILED) return -1; else if (_dl_cache_addr != NULL) return 0; if (_dl_stat(LDSO_CACHE, &st) - || (fd = _dl_open(LDSO_CACHE, O_RDONLY, 0)) < 0) { - _dl_cache_addr = (caddr_t) - 1; /* so we won't try again */ + || (fd = _dl_open(LDSO_CACHE, O_RDONLY|O_CLOEXEC, 0)) < 0) { + _dl_cache_addr = MAP_FAILED; /* so we won't try again */ return -1; } @@ -96,13 +96,13 @@ int _dl_map_cache(void) fail: _dl_munmap(_dl_cache_addr, _dl_cache_size); - _dl_cache_addr = (caddr_t) - 1; + _dl_cache_addr = MAP_FAILED; return -1; } int _dl_unmap_cache(void) { - if (_dl_cache_addr == NULL || _dl_cache_addr == (caddr_t) - 1) + if (_dl_cache_addr == NULL || _dl_cache_addr == MAP_FAILED) return -1; #if 1 @@ -119,8 +119,8 @@ void _dl_protect_relro (struct elf_resolve *l) { ElfW(Addr) base = (ElfW(Addr)) DL_RELOC_ADDR(l->loadaddr, l->relro_addr); - ElfW(Addr) start = (base & ~(_dl_pagesize - 1)); - ElfW(Addr) end = ((base + l->relro_size) & ~(_dl_pagesize - 1)); + ElfW(Addr) start = (base & PAGE_ALIGN); + ElfW(Addr) end = ((base + l->relro_size) & PAGE_ALIGN); _dl_if_debug_dprint("RELRO protecting %s: start:%x, end:%x\n", l->libname, start, end); if (start != end && _dl_mprotect ((void *) start, end - start, PROT_READ) < 0) { @@ -132,64 +132,63 @@ _dl_protect_relro (struct elf_resolve *l) /* This function's behavior must exactly match that * in uClibc/ldso/util/ldd.c */ static struct elf_resolve * -search_for_named_library(const char *name, int secure, const char *path_list, - struct dyn_elf **rpnt) +search_for_named_library(const char *name, unsigned int rflags, const char *path_list, + struct dyn_elf **rpnt, const char* origin) { - char *path, *path_n, *mylibname; + char *mylibname; struct elf_resolve *tpnt; - int done; + const char *p, *pn; + int plen; if (path_list==NULL) return NULL; - /* We need a writable copy of this string, but we don't - * need this allocated permanently since we don't want - * to leak memory, so use alloca to put path on the stack */ - done = _dl_strlen(path_list); - path = alloca(done + 1); - /* another bit of local storage */ mylibname = alloca(2050); - /* gcc inlines alloca using a single instruction adjusting - * the stack pointer and no stack overflow check and thus - * no NULL error return. No point leaving in dead code... */ -#if 0 - if (!path || !mylibname) { - _dl_dprintf(2, "Out of memory!\n"); - _dl_exit(0); - } -#endif - - _dl_memcpy(path, path_list, done+1); - /* Unlike ldd.c, don't bother to eliminate double //s */ /* Replace colons with zeros in path_list */ /* : at the beginning or end of path maps to CWD */ /* :: anywhere maps CWD */ /* "" maps to CWD */ - done = 0; - path_n = path; - do { - if (*path == 0) { - *path = ':'; - done = 1; - } - if (*path == ':') { - *path = 0; - if (*path_n) - _dl_strcpy(mylibname, path_n); - else - _dl_strcpy(mylibname, "."); /* Assume current dir if empty path */ - _dl_strcat(mylibname, "/"); - _dl_strcat(mylibname, name); - if ((tpnt = _dl_load_elf_shared_library(secure, rpnt, mylibname)) != NULL) - return tpnt; - path_n = path+1; + for (p = path_list; p != NULL; p = pn) { + pn = _dl_strchr(p + 1, ':'); + if (pn != NULL) { + plen = pn - p; + pn++; + } else + plen = _dl_strlen(p); + + if (plen >= 7 && _dl_memcmp(p, "$ORIGIN", 7) == 0) { + int olen; + /* $ORIGIN is not expanded for SUID/GUID programs + (except if it is $ORIGIN alone) */ + if ((rflags & __RTLD_SECURE) && plen != 7) + continue; + if (origin == NULL) + continue; + for (olen = _dl_strlen(origin) - 1; olen >= 0 && origin[olen] != '/'; olen--) + ; + if (olen <= 0) + continue; + _dl_memcpy(&mylibname[0], origin, olen); + _dl_memcpy(&mylibname[olen], p + 7, plen - 7); + mylibname[olen + plen - 7] = 0; + } else if (plen != 0) { + _dl_memcpy(mylibname, p, plen); + mylibname[plen] = 0; + } else { + _dl_strcpy(mylibname, "."); } - path++; - } while (!done); + _dl_strcat(mylibname, "/"); + _dl_strcat(mylibname, name); +#ifdef __LDSO_SAFE_RUNPATH__ + if (*mylibname == '/') +#endif + if ((tpnt = _dl_load_elf_shared_library(rflags, rpnt, mylibname)) != NULL) + return tpnt; + } return NULL; } @@ -197,8 +196,8 @@ search_for_named_library(const char *name, int secure, const char *path_list, unsigned long _dl_error_number; unsigned long _dl_internal_error_number; -struct elf_resolve *_dl_load_shared_library(int secure, struct dyn_elf **rpnt, - struct elf_resolve *tpnt, char *full_libname, int __attribute__((unused)) trace_loaded_objects) +struct elf_resolve *_dl_load_shared_library(unsigned int rflags, struct dyn_elf **rpnt, + struct elf_resolve *tpnt, char *full_libname, int attribute_unused trace_loaded_objects) { char *pnt; struct elf_resolve *tpnt1; @@ -226,7 +225,7 @@ struct elf_resolve *_dl_load_shared_library(int secure, struct dyn_elf **rpnt, if (libname != full_libname) { _dl_if_debug_dprint("\ttrying file='%s'\n", full_libname); - tpnt1 = _dl_load_elf_shared_library(secure, rpnt, full_libname); + tpnt1 = _dl_load_elf_shared_library(rflags, rpnt, full_libname); if (tpnt1) { return tpnt1; } @@ -241,20 +240,23 @@ struct elf_resolve *_dl_load_shared_library(int secure, struct dyn_elf **rpnt, if (pnt) { pnt += (unsigned long) tpnt->dynamic_info[DT_STRTAB]; _dl_if_debug_dprint("\tsearching RPATH='%s'\n", pnt); - if ((tpnt1 = search_for_named_library(libname, secure, pnt, rpnt)) != NULL) + if ((tpnt1 = search_for_named_library(libname, rflags, pnt, rpnt, + tpnt->libname)) != NULL) return tpnt1; + } #endif +#ifdef __LDSO_LD_LIBRARY_PATH__ /* Check in LD_{ELF_}LIBRARY_PATH, if specified and allowed */ if (_dl_library_path) { _dl_if_debug_dprint("\tsearching LD_LIBRARY_PATH='%s'\n", _dl_library_path); - if ((tpnt1 = search_for_named_library(libname, secure, _dl_library_path, rpnt)) != NULL) + if ((tpnt1 = search_for_named_library(libname, rflags, _dl_library_path, rpnt, NULL)) != NULL) { return tpnt1; } } - +#endif /* * The ABI specifies that RUNPATH is searched after LD_LIBRARY_PATH. */ @@ -263,9 +265,21 @@ struct elf_resolve *_dl_load_shared_library(int secure, struct dyn_elf **rpnt, if (pnt) { pnt += (unsigned long) tpnt->dynamic_info[DT_STRTAB]; _dl_if_debug_dprint("\tsearching RUNPATH='%s'\n", pnt); - if ((tpnt1 = search_for_named_library(libname, secure, pnt, rpnt)) != NULL) + if ((tpnt1 = search_for_named_library(libname, rflags, pnt, rpnt, NULL)) != NULL) return tpnt1; } +#ifdef __LDSO_RUNPATH_OF_EXECUTABLE__ + /* + * Try the DT_RPATH of the executable itself. + */ + pnt = (char *) _dl_loaded_modules->dynamic_info[DT_RPATH]; + if (pnt) { + pnt += (unsigned long) _dl_loaded_modules->dynamic_info[DT_STRTAB]; + _dl_if_debug_dprint("\tsearching exe's RPATH='%s'\n", pnt); + if ((tpnt1 = search_for_named_library(libname, rflags, pnt, rpnt, NULL)) != NULL) + return tpnt1; + } +#endif #endif /* @@ -274,7 +288,7 @@ struct elf_resolve *_dl_load_shared_library(int secure, struct dyn_elf **rpnt, * the hard coded paths that follow (i.e before /lib and /usr/lib). */ #ifdef __LDSO_CACHE_SUPPORT__ - if (_dl_cache_addr != NULL && _dl_cache_addr != (caddr_t) - 1) { + if (_dl_cache_addr != NULL && _dl_cache_addr != MAP_FAILED) { int i; header_t *header = (header_t *) _dl_cache_addr; libentry_t *libent = (libentry_t *) & header[1]; @@ -282,40 +296,69 @@ struct elf_resolve *_dl_load_shared_library(int secure, struct dyn_elf **rpnt, _dl_if_debug_dprint("\tsearching cache='%s'\n", LDSO_CACHE); for (i = 0; i < header->nlibs; i++) { - if ((libent[i].flags == LIB_ELF || - libent[i].flags == LIB_ELF_LIBC0 || - libent[i].flags == LIB_ELF_LIBC5) && - _dl_strcmp(libname, strs + libent[i].sooffset) == 0 && - (tpnt1 = _dl_load_elf_shared_library(secure, - rpnt, strs + libent[i].liboffset))) + if ((libent[i].flags == LIB_ELF + || libent[i].flags == LIB_ELF_LIBC0 + || libent[i].flags == LIB_ELF_LIBC5) + && _dl_strcmp(libname, strs + libent[i].sooffset) == 0 + && (tpnt1 = _dl_load_elf_shared_library(rflags, rpnt, strs + libent[i].liboffset)) + ) { return tpnt1; + } } } #endif - +#if defined SHARED && defined __LDSO_SEARCH_INTERP_PATH__ /* Look for libraries wherever the shared library loader * was installed */ _dl_if_debug_dprint("\tsearching ldso dir='%s'\n", _dl_ldsopath); - if ((tpnt1 = search_for_named_library(libname, secure, _dl_ldsopath, rpnt)) != NULL) - { + tpnt1 = search_for_named_library(libname, rflags, _dl_ldsopath, rpnt, NULL); + if (tpnt1 != NULL) return tpnt1; - } - - +#endif /* Lastly, search the standard list of paths for the library. This list must exactly match the list in uClibc/ldso/util/ldd.c */ _dl_if_debug_dprint("\tsearching full lib path list\n"); - if ((tpnt1 = search_for_named_library(libname, secure, + tpnt1 = search_for_named_library(libname, rflags, UCLIBC_RUNTIME_PREFIX "lib:" UCLIBC_RUNTIME_PREFIX "usr/lib" #ifndef __LDSO_CACHE_SUPPORT__ ":" UCLIBC_RUNTIME_PREFIX "usr/X11R6/lib" #endif - , rpnt) - ) != NULL) - { + , rpnt, NULL); + if (tpnt1 != NULL) return tpnt1; + +#ifdef __LDSO_RUNPATH_OF_EXECUTABLE__ + /* Very last resort, try the executable's DT_RUNPATH and DT_RPATH */ + /* http://www.sco.com/developers/gabi/latest/ch5.dynamic.html#shobj_dependencies + * The set of directories specified by a given DT_RUNPATH entry is + * used to find only the immediate dependencies of the executable or + * shared object containing the DT_RUNPATH entry. That is, it is + * used only for those dependencies contained in the DT_NEEDED + * entries of the dynamic structure containing the DT_RUNPATH entry, + * itself. One object's DT_RUNPATH entry does not affect the search + * for any other object's dependencies. + * + * glibc (around 2.19) violates this and the usual suspects are + * abusing this bug^Wrelaxed, user-friendly behaviour. + */ + + pnt = (char *) _dl_loaded_modules->dynamic_info[DT_RUNPATH]; + if (pnt) { + pnt += (unsigned long) _dl_loaded_modules->dynamic_info[DT_STRTAB]; + _dl_if_debug_dprint("\tsearching exe's RUNPATH='%s'\n", pnt); + if ((tpnt1 = search_for_named_library(libname, rflags, pnt, rpnt, NULL)) != NULL) + return tpnt1; } + pnt = (char *) _dl_loaded_modules->dynamic_info[DT_RPATH]; + if (pnt) { + pnt += (unsigned long) _dl_loaded_modules->dynamic_info[DT_STRTAB]; + _dl_if_debug_dprint("\tsearching exe's RPATH='%s'\n", pnt); + if ((tpnt1 = search_for_named_library(libname, rflags, pnt, rpnt, NULL)) != NULL) + return tpnt1; + } +#endif + goof: /* Well, we shot our wad on that one. All we can do now is punt */ @@ -327,6 +370,124 @@ goof: return NULL; } +/* Define the _dl_library_offset for the architectures that need it */ +DL_DEF_LIB_OFFSET; + +/* + * Make a writeable mapping of a segment, regardless of whether PF_W is + * set or not. + */ +static void * +map_writeable (int infile, ElfW(Phdr) *ppnt, int piclib, int flags, + unsigned long libaddr) +{ + int prot_flags = ppnt->p_flags | PF_W; + char *status, *retval; + char *tryaddr; + ssize_t size; + unsigned long map_size; + char *cpnt; + char *piclib2map = NULL; + + if (piclib == 2 && + /* We might be able to avoid this call if memsz doesn't + require an additional page, but this would require mmap + to always return page-aligned addresses and a whole + number of pages allocated. Unfortunately on uClinux + may return misaligned addresses and may allocate + partial pages, so we may end up doing unnecessary mmap + calls. + + This is what we could do if we knew mmap would always + return aligned pages: + + ((ppnt->p_vaddr + ppnt->p_filesz + ADDR_ALIGN) & + PAGE_ALIGN) < ppnt->p_vaddr + ppnt->p_memsz) + + Instead, we have to do this: */ + ppnt->p_filesz < ppnt->p_memsz) + { + piclib2map = (char *) + _dl_mmap(0, (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_memsz, + LXFLAGS(prot_flags), flags | MAP_ANONYMOUS, -1, 0); + if (_dl_mmap_check_error(piclib2map)) + return 0; + } + + tryaddr = piclib == 2 ? piclib2map + : ((char *) (piclib ? libaddr : DL_GET_LIB_OFFSET()) + + (ppnt->p_vaddr & PAGE_ALIGN)); + + size = (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_filesz; + + /* For !MMU, mmap to fixed address will fail. + So instead of desperately call mmap and fail, + we set status to MAP_FAILED to save a call + to mmap (). */ +#ifndef __ARCH_USE_MMU__ + if (piclib2map == 0) +#endif + status = (char *) _dl_mmap + (tryaddr, size, LXFLAGS(prot_flags), + flags | (piclib2map ? MAP_FIXED : 0), + infile, ppnt->p_offset & OFFS_ALIGN); +#ifndef __ARCH_USE_MMU__ + else + status = MAP_FAILED; +#endif +#ifdef _DL_PREAD + if (_dl_mmap_check_error(status) && piclib2map + && (_DL_PREAD (infile, tryaddr, size, + ppnt->p_offset & OFFS_ALIGN) == size)) + status = tryaddr; +#endif + if (_dl_mmap_check_error(status) || (tryaddr && tryaddr != status)) + return 0; + + if (piclib2map) + retval = piclib2map; + else + retval = status; + + /* Now we want to allocate and zero-out any data from the end + of the region we mapped in from the file (filesz) to the + end of the loadable segment (memsz). We may need + additional pages for memsz, that we map in below, and we + can count on the kernel to zero them out, but we have to + zero out stuff in the last page that we mapped in from the + file. However, we can't assume to have actually obtained + full pages from the kernel, since we didn't ask for them, + and uClibc may not give us full pages for small + allocations. So only zero out up to memsz or the end of + the page, whichever comes first. */ + + /* CPNT is the beginning of the memsz portion not backed by + filesz. */ + cpnt = (char *) (status + size); + + /* MAP_SIZE is the address of the + beginning of the next page. */ + map_size = (ppnt->p_vaddr + ppnt->p_filesz + + ADDR_ALIGN) & PAGE_ALIGN; + + _dl_memset (cpnt, 0, + MIN (map_size + - (ppnt->p_vaddr + + ppnt->p_filesz), + ppnt->p_memsz + - ppnt->p_filesz)); + + if (map_size < ppnt->p_vaddr + ppnt->p_memsz && !piclib2map) { + tryaddr = map_size + (char*)(piclib ? libaddr : 0); + status = (char *) _dl_mmap(tryaddr, + ppnt->p_vaddr + ppnt->p_memsz - map_size, + LXFLAGS(prot_flags), + flags | MAP_ANONYMOUS | MAP_FIXED, -1, 0); + if (_dl_mmap_check_error(status) || tryaddr != status) + return NULL; + } + return retval; +} /* * Read one ELF library into memory, mmap it into the correct locations and @@ -334,23 +495,28 @@ goof: * are required. */ -struct elf_resolve *_dl_load_elf_shared_library(int secure, - struct dyn_elf **rpnt, char *libname) +struct elf_resolve *_dl_load_elf_shared_library(unsigned int rflags, + struct dyn_elf **rpnt, const char *libname) { ElfW(Ehdr) *epnt; unsigned long dynamic_addr = 0; ElfW(Dyn) *dpnt; struct elf_resolve *tpnt; ElfW(Phdr) *ppnt; +#if defined(USE_TLS) && USE_TLS + ElfW(Phdr) *tlsppnt = NULL; +#endif char *status, *header; unsigned long dynamic_info[DYNAMIC_SIZE]; unsigned long *lpnt; unsigned long libaddr; unsigned long minvma = 0xffffffff, maxvma = 0; + unsigned int rtld_flags; int i, flags, piclib, infile; ElfW(Addr) relro_addr = 0; size_t relro_size = 0; struct stat st; + uint32_t *p32; DL_LOADADDR_TYPE lib_loadaddr; DL_INIT_LOADADDR_EXTRA_DECLS @@ -366,13 +532,14 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure, _dl_close(infile); return NULL; } - /* If we are in secure mode (i.e. a setu/gid binary using LD_PRELOAD), + /* If we are in secure mode (i.e. a setuid/gid binary using LD_PRELOAD), we don't load the library if it isn't setuid. */ - if (secure) + if (rflags & __RTLD_SECURE) { if (!(st.st_mode & S_ISUID)) { _dl_close(infile); return NULL; } + } /* Check if file is already loaded */ for (tpnt = _dl_loaded_modules; tpnt; tpnt = tpnt->next) { @@ -383,8 +550,12 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure, return tpnt; } } + if (rflags & RTLD_NOLOAD) { + _dl_close(infile); + return NULL; + } header = _dl_mmap((void *) 0, _dl_pagesize, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + MAP_PRIVATE | MAP_ANONYMOUS | MAP_UNINITIALIZED, -1, 0); if (_dl_mmap_check_error(header)) { _dl_dprintf(2, "%s:%i: can't map '%s'\n", _dl_progname, __LINE__, libname); _dl_internal_error_number = LD_ERROR_MMAP_FAILED; @@ -394,11 +565,8 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure, _dl_read(infile, header, _dl_pagesize); epnt = (ElfW(Ehdr) *) (intptr_t) header; - if (epnt->e_ident[0] != 0x7f || - epnt->e_ident[1] != 'E' || - epnt->e_ident[2] != 'L' || - epnt->e_ident[3] != 'F') - { + p32 = (uint32_t*)&epnt->e_ident; + if (*p32 != ELFMAG_U32) { _dl_dprintf(2, "%s: '%s' is not an ELF file\n", _dl_progname, libname); _dl_internal_error_number = LD_ERROR_NOTELF; @@ -407,11 +575,15 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure, return NULL; } - if ((epnt->e_type != ET_DYN) || (epnt->e_machine != MAGIC1 + if ((epnt->e_type != ET_DYN +#ifdef __LDSO_STANDALONE_SUPPORT__ + && epnt->e_type != ET_EXEC +#endif + ) || (epnt->e_machine != MAGIC1 #ifdef MAGIC2 && epnt->e_machine != MAGIC2 #endif - )) + )) { _dl_internal_error_number = (epnt->e_type != ET_DYN ? LD_ERROR_NOTDYN : LD_ERROR_NOTMAGIC); @@ -436,7 +608,7 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure, if (ppnt->p_type == PT_LOAD) { /* See if this is a PIC library. */ - if (i == 0 && ppnt->p_vaddr > 0x1000000) { + if (minvma == 0xffffffff && ppnt->p_vaddr > 0x1000000) { piclib = 0; minvma = ppnt->p_vaddr; } @@ -447,22 +619,48 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure, maxvma = ppnt->p_vaddr + ppnt->p_memsz; } } + if (ppnt->p_type == PT_TLS) { +#if defined(USE_TLS) && USE_TLS + if (ppnt->p_memsz == 0) + /* Nothing to do for an empty segment. */ + continue; + else + /* Save for after 'tpnt' is actually allocated. */ + tlsppnt = ppnt; +#else + /* + * Yup, the user was an idiot and tried to sneak in a library with + * TLS in it and we don't support it. Let's fall on our own sword + * and scream at the luser while we die. + */ + _dl_dprintf(2, "%s: '%s' library contains unsupported TLS\n", + _dl_progname, libname); + _dl_internal_error_number = LD_ERROR_TLS_FAILED; + _dl_close(infile); + _dl_munmap(header, _dl_pagesize); + return NULL; +#endif + } ppnt++; } +#ifdef __LDSO_STANDALONE_SUPPORT__ + if (epnt->e_type == ET_EXEC) + piclib = 0; +#endif + DL_CHECK_LIB_TYPE (epnt, piclib, _dl_progname, libname); - maxvma = (maxvma + ADDR_ALIGN) & ~ADDR_ALIGN; - minvma = minvma & ~0xffffU; + maxvma = (maxvma + ADDR_ALIGN) & PAGE_ALIGN; + minvma = minvma & ~ADDR_ALIGN; flags = MAP_PRIVATE /*| MAP_DENYWRITE */ ; - if (!piclib) - flags |= MAP_FIXED; if (piclib == 0 || piclib == 1) { status = (char *) _dl_mmap((char *) (piclib ? 0 : minvma), maxvma - minvma, PROT_NONE, flags | MAP_ANONYMOUS, -1, 0); if (_dl_mmap_check_error(status)) { + cant_map: _dl_dprintf(2, "%s:%i: can't map '%s'\n", _dl_progname, __LINE__, libname); _dl_internal_error_number = LD_ERROR_MMAP_FAILED; _dl_close(infile); @@ -476,15 +674,20 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure, /* Get the memory to store the library */ ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff]; - DL_INIT_LOADADDR(lib_loadaddr, libaddr, ppnt, epnt->e_phnum); + DL_INIT_LOADADDR(lib_loadaddr, libaddr - minvma, ppnt, epnt->e_phnum); + /* Set _dl_library_offset to lib_loadaddr or 0. */ + DL_SET_LIB_OFFSET(lib_loadaddr); for (i = 0; i < epnt->e_phnum; i++) { if (DL_IS_SPECIAL_SEGMENT (epnt, ppnt)) { char *addr; addr = DL_MAP_SEGMENT (epnt, ppnt, infile, flags); - if (addr == NULL) + if (addr == NULL) { + cant_map1: + DL_LOADADDR_UNMAP (lib_loadaddr, maxvma - minvma); goto cant_map; + } DL_INIT_LOADADDR_HDR (lib_loadaddr, addr, ppnt); ppnt++; @@ -498,152 +701,14 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure, char *tryaddr; ssize_t size; - /* See if this is a PIC library. */ - if (i == 0 && ppnt->p_vaddr > 0x1000000) { - piclib = 0; - /* flags |= MAP_FIXED; */ - } - if (ppnt->p_flags & PF_W) { - unsigned long map_size; - char *cpnt; - char *piclib2map = 0; - - if (piclib == 2 && - /* We might be able to avoid this - call if memsz doesn't require - an additional page, but this - would require mmap to always - return page-aligned addresses - and a whole number of pages - allocated. Unfortunately on - uClinux may return misaligned - addresses and may allocate - partial pages, so we may end up - doing unnecessary mmap calls. - - This is what we could do if we - knew mmap would always return - aligned pages: - - ((ppnt->p_vaddr + ppnt->p_filesz - + ADDR_ALIGN) - & PAGE_ALIGN) - < ppnt->p_vaddr + ppnt->p_memsz) - - Instead, we have to do this: */ - ppnt->p_filesz < ppnt->p_memsz) - { - piclib2map = (char *) - _dl_mmap(0, (ppnt->p_vaddr & ADDR_ALIGN) - + ppnt->p_memsz, - LXFLAGS(ppnt->p_flags), - flags | MAP_ANONYMOUS, -1, 0); - if (_dl_mmap_check_error(piclib2map)) - goto cant_map; - DL_INIT_LOADADDR_HDR - (lib_loadaddr, piclib2map - + (ppnt->p_vaddr & ADDR_ALIGN), ppnt); - } - - tryaddr = piclib == 2 ? piclib2map - : ((char*) (piclib ? libaddr : 0) + - (ppnt->p_vaddr & PAGE_ALIGN)); - - size = (ppnt->p_vaddr & ADDR_ALIGN) - + ppnt->p_filesz; - - /* For !MMU, mmap to fixed address will fail. - So instead of desperately call mmap and fail, - we set status to MAP_FAILED to save a call - to mmap (). */ -#ifndef __ARCH_USE_MMU__ - if (piclib2map == 0) -#endif - status = (char *) _dl_mmap - (tryaddr, size, LXFLAGS(ppnt->p_flags), - flags | (piclib2map ? MAP_FIXED : 0), - infile, ppnt->p_offset & OFFS_ALIGN); -#ifndef __ARCH_USE_MMU__ - else - status = MAP_FAILED; -#endif -#ifdef _DL_PREAD - if (_dl_mmap_check_error(status) && piclib2map - && (_DL_PREAD (infile, tryaddr, size, - ppnt->p_offset & OFFS_ALIGN) - == size)) - status = tryaddr; -#endif - if (_dl_mmap_check_error(status) - || (tryaddr && tryaddr != status)) { - cant_map: - _dl_dprintf(2, "%s:%i: can't map '%s'\n", - _dl_progname, __LINE__, libname); - _dl_internal_error_number = LD_ERROR_MMAP_FAILED; - DL_LOADADDR_UNMAP (lib_loadaddr, maxvma - minvma); - _dl_close(infile); - _dl_munmap(header, _dl_pagesize); - return NULL; - } - - if (! piclib2map) { - DL_INIT_LOADADDR_HDR - (lib_loadaddr, status - + (ppnt->p_vaddr & ADDR_ALIGN), ppnt); - } - /* Now we want to allocate and - zero-out any data from the end of - the region we mapped in from the - file (filesz) to the end of the - loadable segment (memsz). We may - need additional pages for memsz, - that we map in below, and we can - count on the kernel to zero them - out, but we have to zero out stuff - in the last page that we mapped in - from the file. However, we can't - assume to have actually obtained - full pages from the kernel, since - we didn't ask for them, and uClibc - may not give us full pages for - small allocations. So only zero - out up to memsz or the end of the - page, whichever comes first. */ - - /* CPNT is the beginning of the memsz - portion not backed by filesz. */ - cpnt = (char *) (status + size); - - /* MAP_SIZE is the address of the - beginning of the next page. */ - map_size = (ppnt->p_vaddr + ppnt->p_filesz - + ADDR_ALIGN) & PAGE_ALIGN; - -#ifndef MIN -# define MIN(a,b) ((a) < (b) ? (a) : (b)) -#endif - _dl_memset (cpnt, 0, - MIN (map_size - - (ppnt->p_vaddr - + ppnt->p_filesz), - ppnt->p_memsz - - ppnt->p_filesz)); - - if (map_size < ppnt->p_vaddr + ppnt->p_memsz - && !piclib2map) { - tryaddr = map_size + (char*)(piclib ? libaddr : 0); - status = (char *) _dl_mmap(tryaddr, - ppnt->p_vaddr + ppnt->p_memsz - map_size, - LXFLAGS(ppnt->p_flags), flags | MAP_ANONYMOUS | MAP_FIXED, -1, 0); - if (_dl_mmap_check_error(status) - || tryaddr != status) - goto cant_map; - } + status = map_writeable (infile, ppnt, piclib, flags, libaddr); + if (status == NULL) + goto cant_map1; } else { tryaddr = (piclib == 2 ? 0 : (char *) (ppnt->p_vaddr & PAGE_ALIGN) - + (piclib ? libaddr : 0)); + + (piclib ? libaddr : DL_GET_LIB_OFFSET())); size = (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_filesz; status = (char *) _dl_mmap (tryaddr, size, LXFLAGS(ppnt->p_flags), @@ -652,11 +717,11 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure, infile, ppnt->p_offset & OFFS_ALIGN); if (_dl_mmap_check_error(status) || (tryaddr && tryaddr != status)) - goto cant_map; - DL_INIT_LOADADDR_HDR - (lib_loadaddr, status - + (ppnt->p_vaddr & ADDR_ALIGN), ppnt); + goto cant_map1; } + DL_INIT_LOADADDR_HDR(lib_loadaddr, + status + (ppnt->p_vaddr & ADDR_ALIGN), + ppnt); /* if (libaddr == 0 && piclib) { libaddr = (unsigned long) status; @@ -665,10 +730,16 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure, } ppnt++; } - _dl_close(infile); - /* For a non-PIC library, the addresses are all absolute */ + /* + * The dynamic_addr must be take into acount lib_loadaddr value, to note + * it is zero when the SO has been mapped to the elf's physical addr + */ +#ifdef __LDSO_PRELINK_SUPPORT__ + if (DL_GET_LIB_OFFSET()) { +#else if (piclib) { +#endif dynamic_addr = (unsigned long) DL_RELOC_ADDR(lib_loadaddr, dynamic_addr); } @@ -684,12 +755,13 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure, _dl_dprintf(2, "%s: '%s' is missing a dynamic section\n", _dl_progname, libname); _dl_munmap(header, _dl_pagesize); + _dl_close(infile); return NULL; } dpnt = (ElfW(Dyn) *) dynamic_addr; _dl_memset(dynamic_info, 0, sizeof(dynamic_info)); - _dl_parse_dynamic_info(dpnt, dynamic_info, NULL, lib_loadaddr); + rtld_flags = _dl_parse_dynamic_info(dpnt, dynamic_info, NULL, lib_loadaddr); /* If the TEXTREL is set, this means that we need to make the pages writable before we perform relocations. Do this now. They get set back again later. */ @@ -698,32 +770,103 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure, #ifndef __FORCE_SHAREABLE_TEXT_SEGMENTS__ ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff]; for (i = 0; i < epnt->e_phnum; i++, ppnt++) { - if (ppnt->p_type == PT_LOAD && !(ppnt->p_flags & PF_W)) - _dl_mprotect((void *) ((piclib ? libaddr : 0) + + if (ppnt->p_type == PT_LOAD && !(ppnt->p_flags & PF_W)) { +#ifdef __ARCH_USE_MMU__ + _dl_mprotect((void *) ((piclib ? libaddr : DL_GET_LIB_OFFSET()) + (ppnt->p_vaddr & PAGE_ALIGN)), (ppnt->p_vaddr & ADDR_ALIGN) + (unsigned long) ppnt->p_filesz, PROT_READ | PROT_WRITE | PROT_EXEC); +#else + void *new_addr; + new_addr = map_writeable (infile, ppnt, piclib, flags, libaddr); + if (!new_addr) { + _dl_dprintf(2, "Can't modify %s's text section.", + libname); + _dl_exit(1); + } + DL_UPDATE_LOADADDR_HDR(lib_loadaddr, + new_addr + (ppnt->p_vaddr & ADDR_ALIGN), + ppnt); + /* This has invalidated all pointers into the previously readonly segment. + Update any them to point into the remapped segment. */ + _dl_parse_dynamic_info(dpnt, dynamic_info, NULL, lib_loadaddr); +#endif + } } #else - _dl_dprintf(_dl_debug_file, "Can't modify %s's text section. Use GCC option -fPIC for shared objects, please.\n",libname); + _dl_dprintf(2, "Can't modify %s's text section." + " Use GCC option -fPIC for shared objects, please.\n", + libname); _dl_exit(1); #endif } + _dl_close(infile); + tpnt = _dl_add_elf_hash_table(libname, lib_loadaddr, dynamic_info, dynamic_addr, 0); + tpnt->mapaddr = libaddr; tpnt->relro_addr = relro_addr; tpnt->relro_size = relro_size; tpnt->st_dev = st.st_dev; tpnt->st_ino = st.st_ino; - tpnt->ppnt = (ElfW(Phdr) *) DL_RELOC_ADDR(tpnt->loadaddr, epnt->e_phoff); + tpnt->ppnt = (ElfW(Phdr) *) + DL_RELOC_ADDR(DL_GET_RUN_ADDR(tpnt->loadaddr, tpnt->mapaddr), + epnt->e_phoff); tpnt->n_phent = epnt->e_phnum; + tpnt->rtld_flags = rflags | rtld_flags; +#ifdef __LDSO_STANDALONE_SUPPORT__ + tpnt->l_entry = epnt->e_entry; +#endif + +#if defined(USE_TLS) && USE_TLS + if (tlsppnt) { + _dl_debug_early("Found TLS header for %s\n", libname); +# if NO_TLS_OFFSET != 0 + tpnt->l_tls_offset = NO_TLS_OFFSET; +# endif + tpnt->l_tls_blocksize = tlsppnt->p_memsz; + tpnt->l_tls_align = tlsppnt->p_align; + if (tlsppnt->p_align == 0) + tpnt->l_tls_firstbyte_offset = 0; + else + tpnt->l_tls_firstbyte_offset = tlsppnt->p_vaddr & + (tlsppnt->p_align - 1); + tpnt->l_tls_initimage_size = tlsppnt->p_filesz; + tpnt->l_tls_initimage = (void *) tlsppnt->p_vaddr; + + /* Assign the next available module ID. */ + tpnt->l_tls_modid = _dl_next_tls_modid (); + + /* We know the load address, so add it to the offset. */ +#ifdef __LDSO_STANDALONE_SUPPORT__ + if ((tpnt->l_tls_initimage != NULL) && piclib) +#else + if (tpnt->l_tls_initimage != NULL) +#endif + { +# ifdef __SUPPORT_LD_DEBUG_EARLY__ + char *tmp = (char *) tpnt->l_tls_initimage; + tpnt->l_tls_initimage = (char *) tlsppnt->p_vaddr + tpnt->loadaddr; + _dl_debug_early("Relocated TLS initial image from %x to %x (size = %x)\n", tmp, tpnt->l_tls_initimage, tpnt->l_tls_initimage_size); + tmp = 0; +# else + tpnt->l_tls_initimage = (char *) tlsppnt->p_vaddr + tpnt->loadaddr; +# endif + } + } +#endif /* * Add this object into the symbol chain */ - if (*rpnt) { - (*rpnt)->next = (struct dyn_elf *) _dl_malloc(sizeof(struct dyn_elf)); + if (*rpnt +#ifdef __LDSO_STANDALONE_SUPPORT__ + /* Do not create a new chain entry for the main executable */ + && (*rpnt)->dyn +#endif + ) { + (*rpnt)->next = _dl_malloc(sizeof(struct dyn_elf)); _dl_memset((*rpnt)->next, 0, sizeof(struct dyn_elf)); (*rpnt)->next->prev = (*rpnt); *rpnt = (*rpnt)->next; @@ -734,14 +877,17 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure, * and initialize the _dl_symbol_table. */ else { - *rpnt = _dl_symbol_tables = (struct dyn_elf *) _dl_malloc(sizeof(struct dyn_elf)); + *rpnt = _dl_symbol_tables = _dl_malloc(sizeof(struct dyn_elf)); _dl_memset(*rpnt, 0, sizeof(struct dyn_elf)); } #endif (*rpnt)->dyn = tpnt; - tpnt->symbol_scope = _dl_symbol_tables; tpnt->usage_count++; +#ifdef __LDSO_STANDALONE_SUPPORT__ + tpnt->libtype = (epnt->e_type == ET_DYN) ? elf_lib : elf_executable; +#else tpnt->libtype = elf_lib; +#endif /* * OK, the next thing we need to do is to insert the dynamic linker into @@ -756,6 +902,76 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure, INIT_GOT(lpnt, tpnt); } +#ifdef __DSBT__ + /* Handle DSBT initialization */ + { + struct elf_resolve *t, *ref; + int idx = tpnt->dsbt_index; + void **dsbt = tpnt->dsbt_table; + + /* + * It is okay (required actually) to have zero idx for an executable. + * This is the case when running ldso standalone and the program + * is being mapped in via _dl_load_shared_library(). + */ + if (idx == 0 && tpnt->libtype != elf_executable) { + if (!dynamic_info[DT_TEXTREL]) { + /* This DSO has not been assigned an index. */ + _dl_dprintf(2, "%s: '%s' is missing a dsbt index assignment!\n", + _dl_progname, libname); + _dl_exit(1); + } + /* Find a dsbt table from another module. */ + ref = NULL; + for (t = _dl_loaded_modules; t; t = t->next) { + if (ref == NULL && t != tpnt) { + ref = t; + break; + } + } + idx = tpnt->dsbt_size; + while (idx-- > 0) + if (!ref || ref->dsbt_table[idx] == NULL) + break; + if (idx <= 0) { + _dl_dprintf(2, "%s: '%s' caused DSBT table overflow!\n", + _dl_progname, libname); + _dl_exit(1); + } + _dl_if_debug_dprint("\n\tfile='%s'; assigned index %d\n", + libname, idx); + tpnt->dsbt_index = idx; + } + + /* make sure index is not already used */ + if (_dl_ldso_dsbt[idx]) { + struct elf_resolve *dup; + const char *dup_name; + + for (dup = _dl_loaded_modules; dup; dup = dup->next) + if (dup != tpnt && dup->dsbt_index == idx) + break; + if (dup) + dup_name = dup->libname; + else if (idx == 1) + dup_name = "runtime linker"; + else + dup_name = "unknown library"; + _dl_dprintf(2, "%s: '%s' dsbt index %d already used by %s!\n", + _dl_progname, libname, idx, dup_name); + _dl_exit(1); + } + + /* + * Setup dsbt slot for this module in dsbt of all modules. + */ + for (t = _dl_loaded_modules; t; t = t->next) + t->dsbt_table[idx] = dsbt; + _dl_ldso_dsbt[idx] = dsbt; + _dl_memcpy(dsbt, _dl_ldso_dsbt, + tpnt->dsbt_size * sizeof(tpnt->dsbt_table[0])); + } +#endif _dl_if_debug_dprint("\n\tfile='%s'; generating link map\n", libname); _dl_if_debug_dprint("\t\tdynamic: %x base: %x\n", dynamic_addr, DL_LOADADDR_BASE(lib_loadaddr)); _dl_if_debug_dprint("\t\t entry: %x phdr: %x phnum: %x\n\n", @@ -767,7 +983,7 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure, } /* now_flag must be RTLD_NOW or zero */ -int _dl_fixup(struct dyn_elf *rpnt, int now_flag) +int _dl_fixup(struct dyn_elf *rpnt, struct r_scope_elem *scope, int now_flag) { int goof = 0; struct elf_resolve *tpnt; @@ -775,7 +991,7 @@ int _dl_fixup(struct dyn_elf *rpnt, int now_flag) ElfW(Addr) reloc_addr; if (rpnt->next) - goof = _dl_fixup(rpnt->next, now_flag); + goof = _dl_fixup(rpnt->next, scope, now_flag); if (goof) return goof; tpnt = rpnt->dyn; @@ -802,10 +1018,13 @@ int _dl_fixup(struct dyn_elf *rpnt, int now_flag) relative_count = tpnt->dynamic_info[DT_RELCONT_IDX]; if (relative_count) { /* Optimize the XX_RELATIVE relocations if possible */ reloc_size -= relative_count * sizeof(ELF_RELOC); - elf_machine_relative(tpnt->loadaddr, reloc_addr, relative_count); +#ifdef __LDSO_PRELINK_SUPPORT__ + if (tpnt->loadaddr || (!tpnt->dynamic_info[DT_GNU_PRELINKED_IDX])) +#endif + elf_machine_relative(tpnt->loadaddr, reloc_addr, relative_count); reloc_addr += relative_count * sizeof(ELF_RELOC); } - goof += _dl_parse_relocation_information(rpnt, + goof += _dl_parse_relocation_information(rpnt, scope, reloc_addr, reloc_size); tpnt->init_flag |= RELOCS_DONE; @@ -821,15 +1040,26 @@ int _dl_fixup(struct dyn_elf *rpnt, int now_flag) tpnt->dynamic_info[DT_JMPREL], tpnt->dynamic_info [DT_PLTRELSZ]); } else { - goof += _dl_parse_relocation_information(rpnt, + goof += _dl_parse_relocation_information(rpnt, scope, tpnt->dynamic_info[DT_JMPREL], tpnt->dynamic_info[DT_PLTRELSZ]); } tpnt->init_flag |= JMP_RELOCS_DONE; } + +#if 0 +/* _dl_add_to_slotinfo is called by init_tls() for initial DSO + or by dlopen() for dynamically loaded DSO. */ +#if defined(USE_TLS) && USE_TLS + /* Add object to slot information data if necessasy. */ + if (tpnt->l_tls_blocksize != 0 && tls_init_tp_called) + _dl_add_to_slotinfo ((struct link_map *) tpnt); +#endif +#endif return goof; } +#ifdef IS_IN_rtld /* Minimal printf which handles only %s, %d, and %x */ void _dl_dprintf(int fd, const char *fmt, ...) { @@ -840,7 +1070,7 @@ void _dl_dprintf(int fd, const char *fmt, ...) #endif va_list args; char *start, *ptr, *string; - static char *buf; + char *buf; if (!fmt) return; @@ -895,7 +1125,7 @@ void _dl_dprintf(int fd, const char *fmt, ...) break; } case 'x': - case 'X': + case 'p': { char tmp[22]; #if __WORDSIZE > 32 @@ -932,35 +1162,10 @@ char *_dl_strdup(const char *string) _dl_strcpy(retval, string); return retval; } +#endif -void _dl_parse_dynamic_info(ElfW(Dyn) *dpnt, unsigned long dynamic_info[], - void *debug_addr, DL_LOADADDR_TYPE load_off) -{ - __dl_parse_dynamic_info(dpnt, dynamic_info, debug_addr, load_off); -} - -/* we want this in ldso.so and libdl.a but nowhere else */ -#ifdef __USE_GNU -#if defined IS_IN_rtld || (defined IS_IN_libdl && ! defined SHARED) -extern __typeof(dl_iterate_phdr) __dl_iterate_phdr; -int -__dl_iterate_phdr (int (*callback) (struct dl_phdr_info *info, size_t size, void *data), void *data) +unsigned int _dl_parse_dynamic_info(ElfW(Dyn) *dpnt, unsigned long dynamic_info[], + void *debug_addr, DL_LOADADDR_TYPE load_off) { - struct elf_resolve *l; - struct dl_phdr_info info; - int ret = 0; - - for (l = _dl_loaded_modules; l != NULL; l = l->next) { - info.dlpi_addr = l->loadaddr; - info.dlpi_name = l->libname; - info.dlpi_phdr = l->ppnt; - info.dlpi_phnum = l->n_phent; - ret = callback (&info, sizeof (struct dl_phdr_info), data); - if (ret) - break; - } - return ret; + return __dl_parse_dynamic_info(dpnt, dynamic_info, debug_addr, load_off); } -strong_alias(__dl_iterate_phdr, dl_iterate_phdr) -#endif -#endif diff --git a/ldso/ldso/dl-hash.c b/ldso/ldso/dl-hash.c index 2a393353b..740626e27 100644 --- a/ldso/ldso/dl-hash.c +++ b/ldso/ldso/dl-hash.c @@ -32,14 +32,6 @@ /* Various symbol table handling functions, including symbol lookup */ - -/* - * This is the start of the linked list that describes all of the files present - * in the system with pointers to all of the symbol, string, and hash tables, - * as well as all of the other good stuff in the binary. - */ -struct elf_resolve *_dl_loaded_modules = NULL; - /* * This is the list of modules that are loaded when the image is first * started. As we add more via dlopen, they get added into other @@ -103,17 +95,18 @@ struct elf_resolve *_dl_add_elf_hash_table(const char *libname, struct elf_resolve *tpnt; int i; - if (!_dl_loaded_modules) { - tpnt = _dl_loaded_modules = (struct elf_resolve *) _dl_malloc(sizeof(struct elf_resolve)); - _dl_memset(tpnt, 0, sizeof(struct elf_resolve)); - } else { - tpnt = _dl_loaded_modules; - while (tpnt->next) - tpnt = tpnt->next; - tpnt->next = (struct elf_resolve *) _dl_malloc(sizeof(struct elf_resolve)); - _dl_memset(tpnt->next, 0, sizeof(struct elf_resolve)); - tpnt->next->prev = tpnt; - tpnt = tpnt->next; + tpnt = _dl_malloc(sizeof(struct elf_resolve)); + _dl_memset(tpnt, 0, sizeof(struct elf_resolve)); + + if (!_dl_loaded_modules) + _dl_loaded_modules = tpnt; + else { + struct elf_resolve *t = _dl_loaded_modules; + while (t->next) + t = t->next; + t->next = tpnt; + t->next->prev = t; + tpnt = t->next; } tpnt->next = NULL; @@ -122,6 +115,15 @@ struct elf_resolve *_dl_add_elf_hash_table(const char *libname, tpnt->dynamic_addr = (ElfW(Dyn) *)dynamic_addr; tpnt->libtype = loaded_file; +#ifdef __DSBT__ + if (dynamic_info[DT_DSBT_BASE_IDX] != 0) + tpnt->dsbt_table = (void *)dynamic_info[DT_DSBT_BASE_IDX]; + if (dynamic_info[DT_DSBT_SIZE_IDX] != 0) + tpnt->dsbt_size = dynamic_info[DT_DSBT_SIZE_IDX]; + if (dynamic_info[DT_DSBT_INDEX_IDX] != 0) + tpnt->dsbt_index = dynamic_info[DT_DSBT_INDEX_IDX]; +#endif /* __DSBT__ */ + #ifdef __LDSO_GNU_HASH_SUPPORT__ if (dynamic_info[DT_GNU_HASH_IDX] != 0) { Elf32_Word *hash32 = (Elf_Symndx*)dynamic_info[DT_GNU_HASH_IDX]; @@ -153,7 +155,6 @@ struct elf_resolve *_dl_add_elf_hash_table(const char *libname, tpnt->chains = hash_addr; } tpnt->loadaddr = loadaddr; - tpnt->mapaddr = DL_RELOC_ADDR(loadaddr, 0); for (i = 0; i < DYNAMIC_SIZE; i++) tpnt->dynamic_info[i] = dynamic_info[i]; return tpnt; @@ -164,6 +165,22 @@ struct elf_resolve *_dl_add_elf_hash_table(const char *libname, static __attribute_noinline__ const ElfW(Sym) * check_match (const ElfW(Sym) *sym, char *strtab, const char* undef_name, int type_class) { + +#if defined(USE_TLS) && USE_TLS + if ((sym->st_value == 0 && (ELF_ST_TYPE(sym->st_info) != STT_TLS)) + || (type_class & (sym->st_shndx == SHN_UNDEF))) + /* No value or undefined symbol itself */ + return NULL; + + if (ELF_ST_TYPE(sym->st_info) > STT_FUNC + && ELF_ST_TYPE(sym->st_info) != STT_COMMON + && ELF_ST_TYPE(sym->st_info) != STT_TLS) + /* Ignore all but STT_NOTYPE, STT_OBJECT, STT_FUNC and STT_COMMON + * entries (and STT_TLS if TLS is supported) since these + * are no code/data definitions. + */ + return NULL; +#else if (type_class & (sym->st_shndx == SHN_UNDEF)) /* undefined symbol itself */ return NULL; @@ -179,7 +196,11 @@ check_match (const ElfW(Sym) *sym, char *strtab, const char* undef_name, int typ * code/data definitions */ return NULL; - +#endif +#ifdef ARCH_SKIP_RELOC + if (ARCH_SKIP_RELOC(type_class, sym)) + return NULL; +#endif if (_dl_strcmp(strtab + sym->st_name, undef_name) != 0) return NULL; @@ -259,109 +280,116 @@ _dl_lookup_sysv_hash(struct elf_resolve *tpnt, ElfW(Sym) *symtab, unsigned long * This function resolves externals, and this is either called when we process * relocations or when we call an entry in the PLT table for the first time. */ -char *_dl_lookup_hash(const char *name, struct dyn_elf *rpnt, - struct elf_resolve *mytpnt, int type_class -#ifdef __FDPIC__ - , struct elf_resolve **tpntp -#endif - ) +char *_dl_find_hash(const char *name, struct r_scope_elem *scope, struct elf_resolve *mytpnt, + int type_class, struct symbol_ref *sym_ref) { struct elf_resolve *tpnt = NULL; ElfW(Sym) *symtab; + int i = 0; unsigned long elf_hash_number = 0xffffffff; const ElfW(Sym) *sym = NULL; - const ElfW(Sym) *weak_sym = 0; - struct elf_resolve *weak_tpnt = 0; + char *weak_result = NULL; + struct r_scope_elem *loop_scope; #ifdef __LDSO_GNU_HASH_SUPPORT__ unsigned long gnu_hash_number = _dl_gnu_hash((const unsigned char *)name); #endif - for (; rpnt; rpnt = rpnt->next) { - tpnt = rpnt->dyn; - - if (!(tpnt->rtld_flags & RTLD_GLOBAL) && mytpnt) { - if (mytpnt == tpnt) - ; - else { - struct init_fini_list *tmp; - - for (tmp = mytpnt->rtld_local; tmp; tmp = tmp->next) { - if (tmp->tpnt == tpnt) - break; + if ((sym_ref) && (sym_ref->sym) && (ELF32_ST_VISIBILITY(sym_ref->sym->st_other) == STV_PROTECTED)) { + sym = sym_ref->sym; + if (mytpnt) + tpnt = mytpnt; + } else + for (loop_scope = scope; loop_scope && !sym; loop_scope = loop_scope->next) { + for (i = 0; i < loop_scope->r_nlist; i++) { + tpnt = loop_scope->r_list[i]; + + if (!(tpnt->rtld_flags & RTLD_GLOBAL) && mytpnt) { + if (mytpnt == tpnt) + ; + else { + struct init_fini_list *tmp; + + for (tmp = mytpnt->rtld_local; tmp; tmp = tmp->next) { + if (tmp->tpnt == tpnt) + break; + } + if (!tmp) + continue; } - if (!tmp) - continue; } - } - /* Don't search the executable when resolving a copy reloc. */ - if ((type_class & ELF_RTYPE_CLASS_COPY) && tpnt->libtype == elf_executable) - continue; + /* Don't search the executable when resolving a copy reloc. */ + if ((type_class & ELF_RTYPE_CLASS_COPY) && tpnt->libtype == elf_executable) + continue; - /* If the hash table is empty there is nothing to do here. */ - if (tpnt->nbucket == 0) - continue; + /* If the hash table is empty there is nothing to do here. */ + if (tpnt->nbucket == 0) + continue; - symtab = (ElfW(Sym) *) (intptr_t) (tpnt->dynamic_info[DT_SYMTAB]); + symtab = (ElfW(Sym) *) (intptr_t) (tpnt->dynamic_info[DT_SYMTAB]); #ifdef __LDSO_GNU_HASH_SUPPORT__ - /* Prefer GNU hash style, if any */ - if (tpnt->l_gnu_bitmask) { - sym = _dl_lookup_gnu_hash(tpnt, symtab, gnu_hash_number, name, type_class); - if (sym != NULL) - /* If sym has been found, do not search further */ - break; - } else { + /* Prefer GNU hash style, if any */ + if (tpnt->l_gnu_bitmask) { + sym = _dl_lookup_gnu_hash(tpnt, symtab, gnu_hash_number, name, type_class); + if (sym != NULL) + /* If sym has been found, do not search further */ + break; + } else { #endif - /* Use the old SysV-style hash table */ + /* Use the old SysV-style hash table */ - /* Calculate the old sysv hash number only once */ - if (elf_hash_number == 0xffffffff) - elf_hash_number = _dl_elf_hash((const unsigned char *)name); + /* Calculate the old sysv hash number only once */ + if (elf_hash_number == 0xffffffff) + elf_hash_number = _dl_elf_hash((const unsigned char *)name); - sym = _dl_lookup_sysv_hash(tpnt, symtab, elf_hash_number, name, type_class); - if (sym != NULL) - break; + sym = _dl_lookup_sysv_hash(tpnt, symtab, elf_hash_number, name, type_class); + if (sym != NULL) + /* If sym has been found, do not search further */ + break; #ifdef __LDSO_GNU_HASH_SUPPORT__ - } + } #endif - } /* end of for (; rpnt; rpnt = rpnt->next) { */ + } /* End of inner for */ + } if (sym) { + if (sym_ref) { + sym_ref->sym = sym; + sym_ref->tpnt = tpnt; + } /* At this point we have found the requested symbol, do binding */ +#if defined(USE_TLS) && USE_TLS + if (ELF_ST_TYPE(sym->st_info) == STT_TLS) { + _dl_assert(sym_ref != NULL); + return (char *)sym->st_value; + } +#endif + switch (ELF_ST_BIND(sym->st_info)) { case STB_WEAK: #if 0 -/* Perhaps we should support old style weak symbol handling - * per what glibc does when you export LD_DYNAMIC_WEAK */ - if (!weak_sym) { - weak_tpnt = tpnt; - weak_sym = sym; - } + /* Perhaps we should support old style weak symbol handling + * per what glibc does when you export LD_DYNAMIC_WEAK */ + if (!weak_result) + weak_result = (char *)DL_FIND_HASH_VALUE(tpnt, type_class, sym); break; #endif case STB_GLOBAL: -#ifdef __FDPIC__ - if (tpntp) - *tpntp = tpnt; +#if defined(__FRV_FDPIC__) || defined(__BFIN_FDPIC__) + if (sym_ref) + sym_ref->tpnt = tpnt; #endif - return (char *) DL_FIND_HASH_VALUE (tpnt, type_class, sym); + return (char *)DL_FIND_HASH_VALUE(tpnt, type_class, sym); default: /* Local symbols not handled here */ break; } } - if (weak_sym) { -#ifdef __FDPIC__ - if (tpntp) - *tpntp = weak_tpnt; -#endif - return (char *) DL_FIND_HASH_VALUE (weak_tpnt, type_class, weak_sym); - } -#ifdef __FDPIC__ - if (tpntp) - *tpntp = NULL; +#if defined(__FRV_FDPIC__) || defined(__BFIN_FDPIC__) + if (sym_ref) + sym_ref->tpnt = tpnt; #endif - return NULL; + return weak_result; } diff --git a/ldso/ldso/dl-startup.c b/ldso/ldso/dl-startup.c index 42fb44e9c..18a39ce2c 100644 --- a/ldso/ldso/dl-startup.c +++ b/ldso/ldso/dl-startup.c @@ -32,8 +32,8 @@ /* * The main trick with this program is that initially, we ourselves are not - * dynamicly linked. This means that we cannot access any global variables or - * call any functions. No globals initially, since the Global Offset Table + * dynamically linked. This means that we cannot access any global variables + * or call any functions. No globals initially, since the Global Offset Table * (GOT) is initialized by the linker assuming a virtual address of 0, and no * function calls initially since the Procedure Linkage Table (PLT) is not yet * initialized. @@ -55,12 +55,12 @@ * * Fortunately, the linker itself leaves a few clues lying around, and when the * kernel starts the image, there are a few further clues. First of all, there - * is Auxiliary Vector Table information sitting on which is provided to us by - * the kernel, and which includes information about the load address that the - * program interpreter was loaded at, the number of sections, the address the - * application was loaded at and so forth. Here this information is stored in - * the array auxvt. For details see linux/fs/binfmt_elf.c where it calls - * NEW_AUX_ENT() a bunch of time.... + * is Auxiliary Vector Table information sitting on the stack which is provided + * to us by the kernel, and which includes information about the address + * that the program interpreter was loaded at, the number of sections, the + * address the application was loaded at, and so forth. Here this information + * is stored in the array auxvt. For details see linux/fs/binfmt_elf.c where + * it calls NEW_AUX_ENT() a bunch of times.... * * Next, we need to find the GOT. On most arches there is a register pointing * to the GOT, but just in case (and for new ports) I've added some (slow) C @@ -94,8 +94,13 @@ /* Pull in all the arch specific stuff */ #include "dl-startup.h" +#ifdef __LDSO_PRELINK_SUPPORT__ +/* This is defined by the linker script. */ +extern ElfW(Addr) _begin[] attribute_hidden; +#endif + /* Static declarations */ -int (*_dl_elf_main) (int, char **, char **); +static int (*_dl_elf_main) (int, char **, char **); static void* __rtld_stack_end; /* Points to argc on stack, e.g *((long *)__rtld_stackend) == argc */ strong_alias(__rtld_stack_end, __libc_stack_end) /* Exported version of __rtld_stack_end */ @@ -103,8 +108,7 @@ strong_alias(__rtld_stack_end, __libc_stack_end) /* Exported version of __rtld_s /* When we enter this piece of code, the program stack looks like this: argc argument counter (integer) argv[0] program name (pointer) - argv[1...N] program args (pointers) - argv[argc-1] end of args (integer) + argv[1..argc-1] program args (pointers) NULL env[0...N] environment variables (pointers) NULL @@ -122,8 +126,9 @@ DL_START(unsigned long args) struct elf_resolve *tpnt = &tpnt_tmp; ElfW(auxv_t) auxvt[AT_EGID + 1]; ElfW(Dyn) *dpnt; + uint32_t *p32; - /* WARNING! -- we cannot make _any_ funtion calls until we have + /* WARNING! -- we cannot make _any_ function calls until we have * taken care of fixing up our own relocations. Making static * inline calls is ok, but _no_ function calls. Not yet * anyways. */ @@ -131,12 +136,12 @@ DL_START(unsigned long args) /* First obtain the information on the stack that tells us more about what binary is loaded, where it is loaded, etc, etc */ GET_ARGV(aux_dat, args); - argc = *(aux_dat - 1); + argc = aux_dat[-1]; argv = (char **) aux_dat; aux_dat += argc; /* Skip over the argv pointers */ aux_dat++; /* Skip over NULL at end of argv */ envp = (char **) aux_dat; -#ifndef NO_EARLY_SEND_STDERR +#if !defined(NO_EARLY_SEND_STDERR) SEND_EARLY_STDERR_DEBUG("argc="); SEND_NUMBER_STDERR_DEBUG(argc, 0); SEND_EARLY_STDERR_DEBUG(" argv="); @@ -164,11 +169,26 @@ DL_START(unsigned long args) aux_dat += 2; } - /* locate the ELF header. We need this done as soon as possible - * (esp since SEND_STDERR() needs this on some platforms... */ + /* + * Locate the dynamic linker ELF header. We need this done as soon as + * possible (esp since SEND_STDERR() needs this on some platforms... + */ + +#ifdef __LDSO_PRELINK_SUPPORT__ + /* + * The `_begin' symbol created by the linker script points to ld.so ELF + * We use it if the kernel is not passing a valid address through the auxvt. + */ + + if (!auxvt[AT_BASE].a_un.a_val) + auxvt[AT_BASE].a_un.a_val = (ElfW(Addr)) &_begin; + /* Note: if the dynamic linker itself is prelinked, the load_addr is 0 */ + DL_INIT_LOADADDR_BOOT(load_addr, elf_machine_load_address()); +#else if (!auxvt[AT_BASE].a_un.a_val) auxvt[AT_BASE].a_un.a_val = elf_machine_load_address(); DL_INIT_LOADADDR_BOOT(load_addr, auxvt[AT_BASE].a_un.a_val); +#endif header = (ElfW(Ehdr) *) auxvt[AT_BASE].a_un.a_val; /* Check the ELF header to make sure everything looks ok. */ @@ -177,16 +197,14 @@ DL_START(unsigned long args) /* Do not use an inline _dl_strncmp here or some arches * will blow chunks, i.e. those that need to relocate all * string constants... */ - || header->e_ident[EI_MAG0] != ELFMAG0 - || header->e_ident[EI_MAG1] != ELFMAG1 - || header->e_ident[EI_MAG2] != ELFMAG2 - || header->e_ident[EI_MAG3] != ELFMAG3) - { + || *(p32 = (uint32_t*)&header->e_ident) != ELFMAG_U32 + ) { SEND_EARLY_STDERR("Invalid ELF header\n"); _dl_exit(0); } SEND_EARLY_STDERR_DEBUG("ELF header="); - SEND_ADDRESS_STDERR_DEBUG(DL_LOADADDR_BASE(load_addr), 1); + SEND_ADDRESS_STDERR_DEBUG( + DL_LOADADDR_BASE(DL_GET_RUN_ADDR(load_addr, header)), 1); /* Locate the global offset table. Since this code must be PIC * we can take advantage of the magic offset register, if we @@ -195,7 +213,7 @@ DL_START(unsigned long args) DL_BOOT_COMPUTE_GOT(got); /* Now, finally, fix up the location of the dynamic stuff */ - DL_BOOT_COMPUTE_DYN (dpnt, got, load_addr); + DL_BOOT_COMPUTE_DYN(dpnt, got, (DL_LOADADDR_TYPE)header); SEND_EARLY_STDERR_DEBUG("First Dynamic section entry="); SEND_ADDRESS_STDERR_DEBUG(dpnt, 1); @@ -212,37 +230,42 @@ DL_START(unsigned long args) _dl_parse_dynamic_info(dpnt, tpnt->dynamic_info, NULL, load_addr); #endif + /* + * BIG ASSUMPTION: We assume that the dynamic loader does not + * have any TLS data itself. If this ever occurs + * more work than what is done below for the + * loader will have to happen. + */ +#if defined(USE_TLS) && USE_TLS + /* This was done by _dl_memset above. */ + /* tpnt->l_tls_modid = 0; */ +# if NO_TLS_OFFSET != 0 + tpnt->l_tls_offset = NO_TLS_OFFSET; +# endif +#endif + SEND_EARLY_STDERR_DEBUG("Done scanning DYNAMIC section\n"); #if defined(PERFORM_BOOTSTRAP_GOT) - SEND_EARLY_STDERR_DEBUG("About to do specific GOT bootstrap\n"); /* some arches (like MIPS) we have to tweak the GOT before relocations */ PERFORM_BOOTSTRAP_GOT(tpnt); - #endif -#if !defined(PERFORM_BOOTSTRAP_GOT) || defined(__avr32__) +#if !defined(PERFORM_BOOTSTRAP_GOT) || defined(__avr32__) || defined(__mips__) /* OK, now do the relocations. We do not do a lazy binding here, so that once we are done, we have considerably more flexibility. */ SEND_EARLY_STDERR_DEBUG("About to do library loader relocations\n"); { - int goof, indx; -#ifdef ELF_MACHINE_PLTREL_OVERLAP + int indx; +#if defined(ELF_MACHINE_PLTREL_OVERLAP) # define INDX_MAX 1 #else # define INDX_MAX 2 #endif - goof = 0; for (indx = 0; indx < INDX_MAX; indx++) { - unsigned int i; - unsigned long *reloc_addr; - unsigned long symbol_addr; - int symtab_index; - ElfW(Sym) *sym; - ELF_RELOC *rpnt; unsigned long rel_addr, rel_size; ElfW(Word) relative_count = tpnt->dynamic_info[DT_RELCONT_IDX]; @@ -254,51 +277,65 @@ DL_START(unsigned long args) if (!rel_addr) continue; - /* Now parse the relocation information */ - /* Since ldso is linked with -Bsymbolic, all relocs will be RELATIVE(for those archs that have - RELATIVE relocs) which means that the for(..) loop below has nothing to do and can be deleted. - Possibly one should add a HAVE_RELATIVE_RELOCS directive and #ifdef away some code. */ if (!indx && relative_count) { rel_size -= relative_count * sizeof(ELF_RELOC); - elf_machine_relative(load_addr, rel_addr, relative_count); +#ifdef __LDSO_PRELINK_SUPPORT__ + if (load_addr || !tpnt->dynamic_info[DT_GNU_PRELINKED_IDX]) +#endif + elf_machine_relative(load_addr, rel_addr, relative_count); rel_addr += relative_count * sizeof(ELF_RELOC); } - rpnt = (ELF_RELOC *) rel_addr; - for (i = 0; i < rel_size; i += sizeof(ELF_RELOC), rpnt++) { - reloc_addr = (unsigned long *) DL_RELOC_ADDR(load_addr, (unsigned long)rpnt->r_offset); - symtab_index = ELF_R_SYM(rpnt->r_info); - symbol_addr = 0; - sym = NULL; - if (symtab_index) { - char *strtab; - ElfW(Sym) *symtab; - - symtab = (ElfW(Sym) *) tpnt->dynamic_info[DT_SYMTAB]; - strtab = (char *) tpnt->dynamic_info[DT_STRTAB]; - sym = &symtab[symtab_index]; - symbol_addr = (unsigned long) DL_RELOC_ADDR(load_addr, sym->st_value); - -#ifndef EARLY_STDERR_SPECIAL - SEND_STDERR_DEBUG("relocating symbol: "); - SEND_STDERR_DEBUG(strtab + sym->st_name); - SEND_STDERR_DEBUG("\n"); + /* + * Since ldso is linked with -Bsymbolic, all relocs should be RELATIVE. All archs + * that need bootstrap relocations need to define ARCH_NEEDS_BOOTSTRAP_RELOCS. + */ +#ifdef ARCH_NEEDS_BOOTSTRAP_RELOCS + { + ELF_RELOC *rpnt; + unsigned int i; + ElfW(Sym) *sym; + unsigned long symbol_addr; + int symtab_index; + unsigned long *reloc_addr; + + /* Now parse the relocation information */ + rpnt = (ELF_RELOC *) rel_addr; + for (i = 0; i < rel_size; i += sizeof(ELF_RELOC), rpnt++) { + reloc_addr = (unsigned long *) DL_RELOC_ADDR(load_addr, (unsigned long)rpnt->r_offset); + symtab_index = ELF_R_SYM(rpnt->r_info); + symbol_addr = 0; + sym = NULL; + if (symtab_index) { + char *strtab; + ElfW(Sym) *symtab; + + symtab = (ElfW(Sym) *) tpnt->dynamic_info[DT_SYMTAB]; + strtab = (char *) tpnt->dynamic_info[DT_STRTAB]; + sym = &symtab[symtab_index]; + symbol_addr = (unsigned long) DL_RELOC_ADDR(load_addr, sym->st_value); +#if !defined(EARLY_STDERR_SPECIAL) + SEND_STDERR_DEBUG("relocating symbol: "); + SEND_STDERR_DEBUG(strtab + sym->st_name); + SEND_STDERR_DEBUG("\n"); #endif - } else { - SEND_STDERR_DEBUG("relocating unknown symbol\n"); + } else { + SEND_STDERR_DEBUG("relocating unknown symbol\n"); + } + /* Use this machine-specific macro to perform the actual relocation. */ + PERFORM_BOOTSTRAP_RELOC(rpnt, reloc_addr, symbol_addr, load_addr, sym); } - /* Use this machine-specific macro to perform the actual relocation. */ - PERFORM_BOOTSTRAP_RELOC(rpnt, reloc_addr, symbol_addr, load_addr, sym); } - } - - if (goof) { - _dl_exit(14); +#else /* ARCH_NEEDS_BOOTSTRAP_RELOCS */ + if (rel_size) { + SEND_EARLY_STDERR("Cannot continue, found non relative relocs during the bootstrap.\n"); + _dl_exit(14); + } +#endif } } #endif - /* Wahoo!!! */ SEND_STDERR_DEBUG("Done relocating ldso; we can now use globals and make function calls!\n"); /* Now we have done the mandatory linking of some things. We are now @@ -308,16 +345,15 @@ DL_START(unsigned long args) __rtld_stack_end = (void *)(argv - 1); - _dl_get_ready_to_run(tpnt, load_addr, auxvt, envp, argv - DL_GET_READY_TO_RUN_EXTRA_ARGS); - + _dl_elf_main = (int (*)(int, char **, char **)) + _dl_get_ready_to_run(tpnt, load_addr, auxvt, envp, argv + DL_GET_READY_TO_RUN_EXTRA_ARGS); /* Transfer control to the application. */ SEND_STDERR_DEBUG("transfering control to application @ "); - _dl_elf_main = (int (*)(int, char **, char **)) auxvt[AT_ENTRY].a_un.a_val; SEND_ADDRESS_STDERR_DEBUG(_dl_elf_main, 1); -#ifndef START +#if !defined(START) return _dl_elf_main; #else START(); diff --git a/ldso/ldso/dl-symbols.c b/ldso/ldso/dl-symbols.c new file mode 100644 index 000000000..e5c00211a --- /dev/null +++ b/ldso/ldso/dl-symbols.c @@ -0,0 +1,21 @@ +/* + * This contains all symbols shared between + * dynamic linker ld.so and into static libc + * + * Copyright (c) 2008 STMicroelectronics Ltd + * Author: Carmelo Amoroso <carmelo.amoroso@st.com> + * + * + * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball. + * + */ + +/* + * This is the start of the linked list that describes all of the files present + * in the system with pointers to all of the symbol, string, and hash tables, + * as well as all of the other good stuff in the binary. + */ +#include <ldso.h> + +struct elf_resolve *_dl_loaded_modules = NULL; + diff --git a/ldso/ldso/dl-tls.c b/ldso/ldso/dl-tls.c new file mode 100644 index 000000000..ced20fa2b --- /dev/null +++ b/ldso/ldso/dl-tls.c @@ -0,0 +1,1056 @@ +/* vi: set sw=4 ts=4: */ +/* + * Thread-local storage handling in the ELF dynamic linker. + * + * Copyright (C) 2005 by Steven J. Hill <sjhill@realitydiluted.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. The name of the above contributors may not be + * used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include <tls.h> +#include <dl-tls.h> +#include <ldsodefs.h> + +void *(*_dl_calloc_function) (size_t __nmemb, size_t __size) = NULL; +void *(*_dl_realloc_function) (void *__ptr, size_t __size) = NULL; +void *(*_dl_memalign_function) (size_t __boundary, size_t __size) = NULL; + +void (*_dl_free_function) (void *__ptr); +void *_dl_memalign (size_t __boundary, size_t __size); +struct link_map *_dl_update_slotinfo (unsigned long int req_modid); + +/* Round up N to the nearest multiple of P, where P is a power of 2 + --- without using libgcc division routines. */ +#define roundup_pow2(n, p) (((n) + (p) - 1) & ~((p) - 1)) + +void * +_dl_calloc (size_t __nmemb, size_t __size) +{ + void *result; + size_t size = (__size * __nmemb); + + if (_dl_calloc_function) + return (*_dl_calloc_function) (__nmemb, __size); + + if ((result = _dl_malloc(size)) != NULL) { + _dl_memset(result, 0, size); + } + + return result; +} + +void * +_dl_realloc (void * __ptr, size_t __size) +{ + if (_dl_realloc_function) + return (*_dl_realloc_function) (__ptr, __size); + + _dl_debug_early("NOT IMPLEMENTED PROPERLY!!!\n"); + return NULL; +} + +/* The __tls_get_addr function has two basic forms which differ in the + arguments. The IA-64 form takes two parameters, the module ID and + offset. The form used, among others, on IA-32 takes a reference to + a special structure which contain the same information. The second + form seems to be more often used (in the moment) so we default to + it. Users of the IA-64 form have to provide adequate definitions + of the following macros. */ +#ifndef GET_ADDR_ARGS +# define GET_ADDR_ARGS tls_index *ti +#endif +#ifndef GET_ADDR_MODULE +# define GET_ADDR_MODULE ti->ti_module +#endif +#ifndef GET_ADDR_OFFSET +# define GET_ADDR_OFFSET ti->ti_offset +#endif + +/* + * Amount of excess space to allocate in the static TLS area + * to allow dynamic loading of modules defining IE-model TLS data. + */ +#define TLS_STATIC_SURPLUS 64 + DL_NNS * 100 + +/* Value used for dtv entries for which the allocation is delayed. */ +#define TLS_DTV_UNALLOCATED ((void *) -1l) + +/* + * We are trying to perform a static TLS relocation in MAP, but it was + * dynamically loaded. This can only work if there is enough surplus in + * the static TLS area already allocated for each running thread. If this + * object's TLS segment is too big to fit, we fail. If it fits, + * we set MAP->l_tls_offset and return. + */ +int +internal_function +_dl_try_allocate_static_tls (struct link_map* map) +{ + /* If the alignment requirements are too high fail. */ + if (map->l_tls_align > _dl_tls_static_align) + { +fail: + return -1; + } + +# ifdef TLS_TCB_AT_TP + size_t freebytes; + size_t n; + size_t blsize; + + freebytes = _dl_tls_static_size - _dl_tls_static_used - TLS_TCB_SIZE; + + blsize = map->l_tls_blocksize + map->l_tls_firstbyte_offset; + if (freebytes < blsize) + goto fail; + + n = (freebytes - blsize) & ~(map->l_tls_align - 1); + + size_t offset = _dl_tls_static_used + (freebytes - n + - map->l_tls_firstbyte_offset); + + map->l_tls_offset = _dl_tls_static_used = offset; +# elif defined(TLS_DTV_AT_TP) + size_t used; + size_t check; + + size_t offset = roundup_pow2 (_dl_tls_static_used, map->l_tls_align); + used = offset + map->l_tls_blocksize; + check = used; + + /* dl_tls_static_used includes the TCB at the beginning. */ + if (check > _dl_tls_static_size) + goto fail; + + map->l_tls_offset = offset; + _dl_tls_static_used = used; +# else +# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined" +# endif + + /* + * If the object is not yet relocated we cannot initialize the + * static TLS region. Delay it. + */ + if (((struct elf_resolve *) map)->init_flag & RELOCS_DONE) + { +#ifdef SHARED + /* + * Update the slot information data for at least the generation of + * the DSO we are allocating data for. + */ + if (__builtin_expect (THREAD_DTV()[0].counter != _dl_tls_generation, 0)) + (void) _dl_update_slotinfo (map->l_tls_modid); +#endif + _dl_init_static_tls (map); + } + else + map->l_need_tls_init = 1; + + return 0; +} + +/* + * This function intentionally does not return any value but signals error + * directly, as static TLS should be rare and code handling it should + * not be inlined as much as possible. + */ +void +internal_function __attribute_noinline__ +_dl_allocate_static_tls (struct link_map *map) +{ + if (_dl_try_allocate_static_tls (map)) { + _dl_dprintf(2, "cannot allocate memory in static TLS block"); + _dl_exit(30); + } +} + +#ifdef SHARED +/* Initialize static TLS area and DTV for current (only) thread. + libpthread implementations should provide their own hook + to handle all threads. */ +void +attribute_hidden __attribute_noinline__ +_dl_nothread_init_static_tls (struct link_map *map) +{ +# ifdef TLS_TCB_AT_TP + void *dest = (char *) THREAD_SELF - map->l_tls_offset; +# elif defined(TLS_DTV_AT_TP) + void *dest = (char *) THREAD_SELF + map->l_tls_offset + TLS_PRE_TCB_SIZE; +# else +# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined" +# endif + + /* Fill in the DTV slot so that a later LD/GD access will find it. */ + dtv_t *dtv = THREAD_DTV (); + if (!(map->l_tls_modid <= dtv[-1].counter)) { + _dl_dprintf(2, "map->l_tls_modid <= dtv[-1].counter FAILED!\n"); + _dl_exit(30); + } + dtv[map->l_tls_modid].pointer.val = dest; + dtv[map->l_tls_modid].pointer.is_static = true; + + /* Initialize the memory. */ + _dl_memcpy(dest, map->l_tls_initimage, map->l_tls_initimage_size); + _dl_memset((dest + map->l_tls_initimage_size), '\0', + map->l_tls_blocksize - map->l_tls_initimage_size); +} +#endif + +/* Taken from glibc/sysdeps/generic/dl-tls.c */ +static void +oom (void) +{ + _dl_debug_early("cannot allocate thread-local memory: ABORT\n"); + _dl_exit(30); +} + +size_t +internal_function +_dl_next_tls_modid (void) +{ + size_t result; + + if (__builtin_expect (_dl_tls_dtv_gaps, false)) + { + size_t disp = 0; + struct dtv_slotinfo_list *runp = _dl_tls_dtv_slotinfo_list; + + /* Note that this branch will never be executed during program + start since there are no gaps at that time. Therefore it + does not matter that the dl_tls_dtv_slotinfo is not allocated + yet when the function is called for the first times. + + NB: the offset +1 is due to the fact that DTV[0] is used + for something else. */ + result = _dl_tls_static_nelem + 1; + if (result <= _dl_tls_max_dtv_idx) + do + { + while (result - disp < runp->len) + { + if (runp->slotinfo[result - disp].map == NULL) + break; + + ++result; + _dl_assert (result <= _dl_tls_max_dtv_idx + 1); + } + + if (result - disp < runp->len) + break; + + disp += runp->len; + } + while ((runp = runp->next) != NULL); + + if (result > _dl_tls_max_dtv_idx) + { + /* The new index must indeed be exactly one higher than the + previous high. */ + _dl_assert (result == _dl_tls_max_dtv_idx + 1); + /* There is no gap anymore. */ + _dl_tls_dtv_gaps = false; + + goto nogaps; + } + } + else + { + /* No gaps, allocate a new entry. */ + nogaps: + + result = ++_dl_tls_max_dtv_idx; + } + + return result; +} + +void +internal_function +_dl_determine_tlsoffset (void) +{ + size_t max_align = TLS_TCB_ALIGN; + size_t freetop = 0; + size_t freebottom = 0; + + /* The first element of the dtv slot info list is allocated. */ + _dl_assert (_dl_tls_dtv_slotinfo_list != NULL); + /* There is at this point only one element in the + dl_tls_dtv_slotinfo_list list. */ + _dl_assert (_dl_tls_dtv_slotinfo_list->next == NULL); + + struct dtv_slotinfo *slotinfo = _dl_tls_dtv_slotinfo_list->slotinfo; + + /* Determining the offset of the various parts of the static TLS + block has several dependencies. In addition we have to work + around bugs in some toolchains. + + Each TLS block from the objects available at link time has a size + and an alignment requirement. The GNU ld computes the alignment + requirements for the data at the positions *in the file*, though. + I.e, it is not simply possible to allocate a block with the size + of the TLS program header entry. The data is layed out assuming + that the first byte of the TLS block fulfills + + p_vaddr mod p_align == &TLS_BLOCK mod p_align + + This means we have to add artificial padding at the beginning of + the TLS block. These bytes are never used for the TLS data in + this module but the first byte allocated must be aligned + according to mod p_align == 0 so that the first byte of the TLS + block is aligned according to p_vaddr mod p_align. This is ugly + and the linker can help by computing the offsets in the TLS block + assuming the first byte of the TLS block is aligned according to + p_align. + + The extra space which might be allocated before the first byte of + the TLS block need not go unused. The code below tries to use + that memory for the next TLS block. This can work if the total + memory requirement for the next TLS block is smaller than the + gap. */ + +# ifdef TLS_TCB_AT_TP + /* We simply start with zero. */ + size_t cnt, offset = 0; + + for (cnt = 1; slotinfo[cnt].map != NULL; ++cnt) + { + _dl_assert (cnt < _dl_tls_dtv_slotinfo_list->len); + + size_t firstbyte = (-slotinfo[cnt].map->l_tls_firstbyte_offset + & (slotinfo[cnt].map->l_tls_align - 1)); + size_t off; + max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align); + + if (freebottom - freetop >= slotinfo[cnt].map->l_tls_blocksize) + { + off = roundup_pow2 (freetop + slotinfo[cnt].map->l_tls_blocksize + - firstbyte, slotinfo[cnt].map->l_tls_align) + + firstbyte; + if (off <= freebottom) + { + freetop = off; + + /* XXX For some architectures we perhaps should store the + negative offset. */ + slotinfo[cnt].map->l_tls_offset = off; + continue; + } + } + + off = roundup_pow2 (offset + slotinfo[cnt].map->l_tls_blocksize + - firstbyte, slotinfo[cnt].map->l_tls_align) + + firstbyte; + if (off > offset + slotinfo[cnt].map->l_tls_blocksize + + (freebottom - freetop)) + { + freetop = offset; + freebottom = off - slotinfo[cnt].map->l_tls_blocksize; + } + offset = off; + + /* XXX For some architectures we perhaps should store the + negative offset. */ + slotinfo[cnt].map->l_tls_offset = off; + } + + _dl_tls_static_used = offset; + _dl_tls_static_size = (roundup_pow2 (offset + TLS_STATIC_SURPLUS, max_align) + + TLS_TCB_SIZE); +# elif defined(TLS_DTV_AT_TP) + /* The TLS blocks start right after the TCB. */ + size_t offset = TLS_TCB_SIZE; + size_t cnt; + + for (cnt = 1; slotinfo[cnt].map != NULL; ++cnt) + { + _dl_assert (cnt < _dl_tls_dtv_slotinfo_list->len); + + size_t firstbyte = (-slotinfo[cnt].map->l_tls_firstbyte_offset + & (slotinfo[cnt].map->l_tls_align - 1)); + size_t off; + max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align); + + if (slotinfo[cnt].map->l_tls_blocksize <= freetop - freebottom) + { + off = roundup_pow2 (freebottom, slotinfo[cnt].map->l_tls_align); + if (off - freebottom < firstbyte) + off += slotinfo[cnt].map->l_tls_align; + if (off + slotinfo[cnt].map->l_tls_blocksize - firstbyte <= freetop) + { + slotinfo[cnt].map->l_tls_offset = off - firstbyte; + freebottom = (off + slotinfo[cnt].map->l_tls_blocksize + - firstbyte); + continue; + } + } + + off = roundup_pow2 (offset, slotinfo[cnt].map->l_tls_align); + if (off - offset < firstbyte) + off += slotinfo[cnt].map->l_tls_align; + + slotinfo[cnt].map->l_tls_offset = off - firstbyte; + if (off - firstbyte - offset > freetop - freebottom) + { + freebottom = offset; + freetop = off - firstbyte; + } + + offset = off + slotinfo[cnt].map->l_tls_blocksize - firstbyte; + } + + _dl_tls_static_used = offset; + _dl_tls_static_size = roundup_pow2 (offset + TLS_STATIC_SURPLUS, + TLS_TCB_ALIGN); +# else +# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined" +# endif + + /* The alignment requirement for the static TLS block. */ + _dl_tls_static_align = max_align; +} + +/* This is called only when the data structure setup was skipped at startup, + when there was no need for it then. Now we have dynamically loaded + something needing TLS, or libpthread needs it. */ +rtld_hidden_proto(_dl_tls_setup) +int +internal_function +_dl_tls_setup (void) +{ + _dl_assert (_dl_tls_dtv_slotinfo_list == NULL); + _dl_assert (_dl_tls_max_dtv_idx == 0); + + const size_t nelem = 2 + TLS_SLOTINFO_SURPLUS; + + _dl_tls_dtv_slotinfo_list + = _dl_calloc (1, (sizeof (struct dtv_slotinfo_list) + + nelem * sizeof (struct dtv_slotinfo))); + if (_dl_tls_dtv_slotinfo_list == NULL) + return -1; + + _dl_tls_dtv_slotinfo_list->len = nelem; + + /* Number of elements in the static TLS block. It can't be zero + because of various assumptions. The one element is null. */ + _dl_tls_static_nelem = _dl_tls_max_dtv_idx = 1; + + /* This initializes more variables for us. */ + _dl_determine_tlsoffset (); + + return 0; +} +rtld_hidden_def (_dl_tls_setup) + +static void * +internal_function +allocate_dtv (void *result) +{ + dtv_t *dtv; + size_t dtv_length; + + /* We allocate a few more elements in the dtv than are needed for the + initial set of modules. This should avoid in most cases expansions + of the dtv. */ + dtv_length = _dl_tls_max_dtv_idx + DTV_SURPLUS; + dtv = _dl_calloc (dtv_length + 2, sizeof (dtv_t)); + if (dtv != NULL) + { + /* This is the initial length of the dtv. */ + dtv[0].counter = dtv_length; + + /* The rest of the dtv (including the generation counter) is + Initialize with zero to indicate nothing there. */ + + /* Add the dtv to the thread data structures. */ + INSTALL_DTV (result, dtv); + } + else + result = NULL; + + return result; +} + +/* Get size and alignment requirements of the static TLS block. */ +void +internal_function +_dl_get_tls_static_info (size_t *sizep, size_t *alignp) +{ + *sizep = _dl_tls_static_size; + *alignp = _dl_tls_static_align; +} + +void * +internal_function +_dl_allocate_tls_storage (void) +{ + void *result; + size_t size = _dl_tls_static_size; + +# if defined(TLS_DTV_AT_TP) + /* Memory layout is: + [ TLS_PRE_TCB_SIZE ] [ TLS_TCB_SIZE ] [ TLS blocks ] + ^ This should be returned. */ + size += (TLS_PRE_TCB_SIZE + _dl_tls_static_align - 1) + & ~(_dl_tls_static_align - 1); +# endif + + /* Allocate a correctly aligned chunk of memory. */ + result = _dl_memalign (_dl_tls_static_align, size); + if (__builtin_expect (result != NULL, 1)) + { + /* Allocate the DTV. */ + void *allocated = result; + +# ifdef TLS_TCB_AT_TP + /* The TCB follows the TLS blocks. */ + result = (char *) result + size - TLS_TCB_SIZE; + + /* Clear the TCB data structure. We can't ask the caller (i.e. + libpthread) to do it, because we will initialize the DTV et al. */ + _dl_memset (result, '\0', TLS_TCB_SIZE); +# elif defined(TLS_DTV_AT_TP) + result = (char *) result + size - _dl_tls_static_size; + + /* Clear the TCB data structure and TLS_PRE_TCB_SIZE bytes before it. + We can't ask the caller (i.e. libpthread) to do it, because we will + initialize the DTV et al. */ + _dl_memset ((char *) result - TLS_PRE_TCB_SIZE, '\0', + TLS_PRE_TCB_SIZE + TLS_TCB_SIZE); +# endif + + result = allocate_dtv (result); + if (result == NULL) + _dl_free (allocated); + } + + return result; +} + +void * +internal_function +_dl_allocate_tls_init (void *result) +{ + if (result == NULL) + /* The memory allocation failed. */ + return NULL; + + dtv_t *dtv = GET_DTV (result); + struct dtv_slotinfo_list *listp; + size_t total = 0; + size_t maxgen = 0; + + /* We have to prepare the dtv for all currently loaded modules using + TLS. For those which are dynamically loaded we add the values + indicating deferred allocation. */ + listp = _dl_tls_dtv_slotinfo_list; + while (1) + { + size_t cnt; + + for (cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt) + { + struct link_map *map; + void *dest; + + /* Check for the total number of used slots. */ + if (total + cnt > _dl_tls_max_dtv_idx) + break; + + map = listp->slotinfo[cnt].map; + if (map == NULL) + /* Unused entry. */ + continue; + + /* Keep track of the maximum generation number. This might + not be the generation counter. */ + maxgen = MAX (maxgen, listp->slotinfo[cnt].gen); + + if (map->l_tls_offset == NO_TLS_OFFSET) + { + /* For dynamically loaded modules we simply store + the value indicating deferred allocation. */ + dtv[map->l_tls_modid].pointer.val = TLS_DTV_UNALLOCATED; + dtv[map->l_tls_modid].pointer.is_static = false; + continue; + } + + _dl_assert (map->l_tls_modid == cnt); + _dl_assert (map->l_tls_blocksize >= map->l_tls_initimage_size); +# ifdef TLS_TCB_AT_TP + _dl_assert ((size_t) map->l_tls_offset >= map->l_tls_blocksize); + dest = (char *) result - map->l_tls_offset; +# elif defined(TLS_DTV_AT_TP) + dest = (char *) result + map->l_tls_offset; +# else +# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined" +# endif + + /* Copy the initialization image and clear the BSS part. */ + dtv[map->l_tls_modid].pointer.val = dest; + dtv[map->l_tls_modid].pointer.is_static = true; + _dl_memcpy(dest, map->l_tls_initimage, map->l_tls_initimage_size); + _dl_memset((dest + map->l_tls_initimage_size), '\0', + map->l_tls_blocksize - map->l_tls_initimage_size); + + } + + total += cnt; + if (total >= _dl_tls_max_dtv_idx) + break; + + listp = listp->next; + _dl_assert (listp != NULL); + } + + /* The DTV version is up-to-date now. */ + dtv[0].counter = maxgen; + + return result; +} + +void * +internal_function +_dl_allocate_tls (void *mem) +{ + return _dl_allocate_tls_init (mem == NULL + ? _dl_allocate_tls_storage () + : allocate_dtv (mem)); +} + +void +internal_function +_dl_deallocate_tls (void *tcb, bool dealloc_tcb) +{ + dtv_t *dtv = GET_DTV (tcb); + size_t cnt; + + /* We need to free the memory allocated for non-static TLS. */ + for (cnt = 0; cnt < dtv[-1].counter; ++cnt) + if (! dtv[1 + cnt].pointer.is_static + && dtv[1 + cnt].pointer.val != TLS_DTV_UNALLOCATED) + _dl_free (dtv[1 + cnt].pointer.val); + + /* The array starts with dtv[-1]. */ + if (dtv != _dl_initial_dtv) + _dl_free (dtv - 1); + + if (dealloc_tcb) + { +# ifdef TLS_TCB_AT_TP + /* The TCB follows the TLS blocks. Back up to free the whole block. */ + tcb -= _dl_tls_static_size - TLS_TCB_SIZE; +# elif defined(TLS_DTV_AT_TP) + /* Back up the TLS_PRE_TCB_SIZE bytes. */ + tcb -= (TLS_PRE_TCB_SIZE + _dl_tls_static_align - 1) + & ~(_dl_tls_static_align - 1); +# endif + _dl_free (tcb); + } +} + +static void * +allocate_and_init (struct link_map *map) +{ + void *newp; + + newp = _dl_memalign (map->l_tls_align, map->l_tls_blocksize); + if (newp == NULL) + { + _dl_dprintf(2, "%s:%d: Out of memory!!!\n", __func__, __LINE__); + _dl_exit(1); + } + + /* Initialize the memory. */ + _dl_memcpy (newp, map->l_tls_initimage, map->l_tls_initimage_size); + _dl_memset ((newp + map->l_tls_initimage_size), '\0', + map->l_tls_blocksize - map->l_tls_initimage_size); + + return newp; +} + +struct link_map * +_dl_update_slotinfo (unsigned long int req_modid) +{ + struct link_map *the_map = NULL; + dtv_t *dtv = THREAD_DTV (); + + /* The global dl_tls_dtv_slotinfo array contains for each module + index the generation counter current when the entry was created. + This array never shrinks so that all module indices which were + valid at some time can be used to access it. Before the first + use of a new module index in this function the array was extended + appropriately. Access also does not have to be guarded against + modifications of the array. It is assumed that pointer-size + values can be read atomically even in SMP environments. It is + possible that other threads at the same time dynamically load + code and therefore add to the slotinfo list. This is a problem + since we must not pick up any information about incomplete work. + The solution to this is to ignore all dtv slots which were + created after the one we are currently interested. We know that + dynamic loading for this module is completed and this is the last + load operation we know finished. */ + unsigned long int idx = req_modid; + struct dtv_slotinfo_list *listp = _dl_tls_dtv_slotinfo_list; + + _dl_debug_early ("Updating slotinfo for module %d\n", req_modid); + + while (idx >= listp->len) + { + idx -= listp->len; + listp = listp->next; + } + + if (dtv[0].counter < listp->slotinfo[idx].gen) + { + /* The generation counter for the slot is higher than what the + current dtv implements. We have to update the whole dtv but + only those entries with a generation counter <= the one for + the entry we need. */ + size_t new_gen = listp->slotinfo[idx].gen; + size_t total = 0; + + /* We have to look through the entire dtv slotinfo list. */ + listp = _dl_tls_dtv_slotinfo_list; + do + { + size_t cnt; + + for (cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt) + { + size_t gen = listp->slotinfo[cnt].gen; + + if (gen > new_gen) + /* This is a slot for a generation younger than the + one we are handling now. It might be incompletely + set up so ignore it. */ + continue; + + /* If the entry is older than the current dtv layout we + know we don't have to handle it. */ + if (gen <= dtv[0].counter) + continue; + + /* If there is no map this means the entry is empty. */ + struct link_map *map = listp->slotinfo[cnt].map; + if (map == NULL) + { + /* If this modid was used at some point the memory + might still be allocated. */ + if (! dtv[total + cnt].pointer.is_static + && dtv[total + cnt].pointer.val != TLS_DTV_UNALLOCATED) + { + _dl_free (dtv[total + cnt].pointer.val); + dtv[total + cnt].pointer.val = TLS_DTV_UNALLOCATED; + } + + continue; + } + + /* Check whether the current dtv array is large enough. */ + size_t modid = map->l_tls_modid; + _dl_assert (total + cnt == modid); + if (dtv[-1].counter < modid) + { + /* Reallocate the dtv. */ + dtv_t *newp; + size_t newsize = _dl_tls_max_dtv_idx + DTV_SURPLUS; + size_t oldsize = dtv[-1].counter; + + _dl_assert (map->l_tls_modid <= newsize); + + if (dtv == _dl_initial_dtv) + { + /* This is the initial dtv that was allocated + during rtld startup using the dl-minimal.c + malloc instead of the real malloc. We can't + free it, we have to abandon the old storage. */ + + newp = _dl_malloc ((2 + newsize) * sizeof (dtv_t)); + if (newp == NULL) + oom (); + _dl_memcpy (newp, &dtv[-1], oldsize * sizeof (dtv_t)); + } + else + { + newp = _dl_realloc (&dtv[-1], + (2 + newsize) * sizeof (dtv_t)); + if (newp == NULL) + oom (); + } + + newp[0].counter = newsize; + + /* Clear the newly allocated part. */ + _dl_memset (newp + 2 + oldsize, '\0', + (newsize - oldsize) * sizeof (dtv_t)); + + /* Point dtv to the generation counter. */ + dtv = &newp[1]; + + /* Install this new dtv in the thread data + structures. */ + INSTALL_NEW_DTV (dtv); + } + + /* If there is currently memory allocate for this + dtv entry free it. */ + /* XXX Ideally we will at some point create a memory + pool. */ + if (! dtv[modid].pointer.is_static + && dtv[modid].pointer.val != TLS_DTV_UNALLOCATED) + /* Note that free is called for NULL is well. We + deallocate even if it is this dtv entry we are + supposed to load. The reason is that we call + memalign and not malloc. */ + _dl_free (dtv[modid].pointer.val); + + /* This module is loaded dynamically- We defer memory + allocation. */ + dtv[modid].pointer.is_static = false; + dtv[modid].pointer.val = TLS_DTV_UNALLOCATED; + + if (modid == req_modid) + the_map = map; + } + + total += listp->len; + } + while ((listp = listp->next) != NULL); + + /* This will be the new maximum generation counter. */ + dtv[0].counter = new_gen; + } + + return the_map; +} + + +/* The generic dynamic and local dynamic model cannot be used in + statically linked applications. */ +void * +__tls_get_addr (GET_ADDR_ARGS) +{ + dtv_t *dtv = THREAD_DTV (); + struct link_map *the_map = NULL; + void *p; + + if (__builtin_expect (dtv[0].counter != _dl_tls_generation, 0)) + { + the_map = _dl_update_slotinfo (GET_ADDR_MODULE); + dtv = THREAD_DTV (); + } + + p = dtv[GET_ADDR_MODULE].pointer.val; + + if (__builtin_expect (p == TLS_DTV_UNALLOCATED, 0)) + { + /* The allocation was deferred. Do it now. */ + if (the_map == NULL) + { + /* Find the link map for this module. */ + size_t idx = GET_ADDR_MODULE; + struct dtv_slotinfo_list *listp = _dl_tls_dtv_slotinfo_list; + + while (idx >= listp->len) + { + idx -= listp->len; + listp = listp->next; + } + + the_map = listp->slotinfo[idx].map; + } + + p = dtv[GET_ADDR_MODULE].pointer.val = allocate_and_init (the_map); + dtv[GET_ADDR_MODULE].pointer.is_static = false; + } + + return (char *) p + GET_ADDR_OFFSET; +} + +void +_dl_add_to_slotinfo (struct link_map *l) +{ + /* Now that we know the object is loaded successfully add + modules containing TLS data to the dtv info table. We + might have to increase its size. */ + struct dtv_slotinfo_list *listp; + struct dtv_slotinfo_list *prevp; + size_t idx = l->l_tls_modid; + + _dl_debug_early("Adding to slotinfo for %s\n", l->l_name); + + /* Find the place in the dtv slotinfo list. */ + listp = _dl_tls_dtv_slotinfo_list; + prevp = NULL; /* Needed to shut up gcc. */ + do + { + /* Does it fit in the array of this list element? */ + if (idx < listp->len) + break; + idx -= listp->len; + prevp = listp; + listp = listp->next; + } + while (listp != NULL); + + if (listp == NULL) + { + /* When we come here it means we have to add a new element + to the slotinfo list. And the new module must be in + the first slot. */ + _dl_assert (idx == 0); + + listp = prevp->next = (struct dtv_slotinfo_list *) + _dl_malloc (sizeof (struct dtv_slotinfo_list) + + TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo)); + if (listp == NULL) + { + /* We ran out of memory. We will simply fail this + call but don't undo anything we did so far. The + application will crash or be terminated anyway very + soon. */ + + /* We have to do this since some entries in the dtv + slotinfo array might already point to this + generation. */ + ++_dl_tls_generation; + + _dl_dprintf(2, "cannot create TLS data structures: ABORT\n"); + _dl_exit (127); + } + + listp->len = TLS_SLOTINFO_SURPLUS; + listp->next = NULL; + _dl_memset (listp->slotinfo, '\0', + TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo)); + } + + /* Add the information into the slotinfo data structure. */ + listp->slotinfo[idx].map = l; + listp->slotinfo[idx].gen = _dl_tls_generation + 1; + /* ??? ideally this would be done once per call to dlopen. However there's + no easy way to indicate whether a library used TLS, so do it here + instead. */ + /* Bump the TLS generation number. */ + _dl_tls_generation++; +} + +/* Taken from glibc/elf/rtld.c */ +static bool tls_init_tp_called; + +/* _dl_error_catch_tsd points to this for the single-threaded case. + It's reset by the thread library for multithreaded programs. */ +void ** __attribute__ ((const)) +_dl_initial_error_catch_tsd (void) +{ + static +#if 0 /* def ARCH_NEEDS_BOOTSTRAP_RELOCS */ + /* If we have to do bootstrap relocs anyway we might as well */ + __thread +# endif + void *__tsd_data; + return &__tsd_data; +} + +#ifdef SHARED +void* +internal_function +init_tls (void); + +rtld_hidden_proto(init_tls) +void * +internal_function +init_tls (void) +{ + /* Number of elements in the static TLS block. */ + _dl_tls_static_nelem = _dl_tls_max_dtv_idx; + + /* Do not do this twice. The audit interface might have required + the DTV interfaces to be set up early. */ + if (_dl_initial_dtv != NULL) + return NULL; + + /* Allocate the array which contains the information about the + dtv slots. We allocate a few entries more than needed to + avoid the need for reallocation. */ + size_t nelem = _dl_tls_max_dtv_idx + 1 + TLS_SLOTINFO_SURPLUS; + + /* Allocate. */ + _dl_assert (_dl_tls_dtv_slotinfo_list == NULL); + _dl_tls_dtv_slotinfo_list = (struct dtv_slotinfo_list *) + _dl_calloc (sizeof (struct dtv_slotinfo_list) + + nelem * sizeof (struct dtv_slotinfo), 1); + /* No need to check the return value. If memory allocation failed + the program would have been terminated. */ + + struct dtv_slotinfo *slotinfo = _dl_tls_dtv_slotinfo_list->slotinfo; + _dl_tls_dtv_slotinfo_list->len = nelem; + _dl_tls_dtv_slotinfo_list->next = NULL; + + /* Fill in the information from the loaded modules. No namespace + but the base one can be filled at this time. */ + int i = 0; + struct link_map *l; + for (l = (struct link_map *) _dl_loaded_modules; l != NULL; l = l->l_next) + if (l->l_tls_blocksize != 0) + { + /* This is a module with TLS data. Store the map reference. + The generation counter is zero. */ + + /* Skeep slot[0]: it will be never used */ + slotinfo[++i].map = l; + } + _dl_assert (i == _dl_tls_max_dtv_idx); + + /* Compute the TLS offsets for the various blocks. */ + _dl_determine_tlsoffset (); + + /* Construct the static TLS block and the dtv for the initial + thread. For some platforms this will include allocating memory + for the thread descriptor. The memory for the TLS block will + never be freed. It should be allocated accordingly. The dtv + array can be changed if dynamic loading requires it. */ + void *tcbp = _dl_allocate_tls_storage (); + if (tcbp == NULL) { + _dl_debug_early("\ncannot allocate TLS data structures for initial thread"); + _dl_exit(30); + } + + /* Store for detection of the special case by __tls_get_addr + so it knows not to pass this dtv to the normal realloc. */ + _dl_initial_dtv = GET_DTV (tcbp); + + /* And finally install it for the main thread. If ld.so itself uses + TLS we know the thread pointer was initialized earlier. */ + const char *lossage = (char *)TLS_INIT_TP (tcbp, USE___THREAD); + if(__builtin_expect (lossage != NULL, 0)) { + _dl_debug_early("cannot set up thread-local storage: %s\n", lossage); + _dl_exit(30); + } + tls_init_tp_called = true; + + return tcbp; +} +rtld_hidden_def (init_tls) +#endif + diff --git a/ldso/ldso/fdpic/dl-inlines.h b/ldso/ldso/fdpic/dl-inlines.h new file mode 100644 index 000000000..a9bfc9311 --- /dev/null +++ b/ldso/ldso/fdpic/dl-inlines.h @@ -0,0 +1,228 @@ +/* Copyright (C) 2003, 2004 Red Hat, Inc. + * Contributed by Alexandre Oliva <aoliva@redhat.com> + * Copyright (C) 2006-2011 Analog Devices, Inc. + * + * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball. + */ + +#include <inline-hashtab.h> + +/* Initialize a DL_LOADADDR_TYPE given a got pointer and a complete load map. */ +static __always_inline void +__dl_init_loadaddr_map(struct elf32_fdpic_loadaddr *loadaddr, Elf32_Addr dl_boot_got_pointer, + struct elf32_fdpic_loadmap *map) +{ + if (map->version != 0) { + SEND_EARLY_STDERR("Invalid loadmap version number\n"); + _dl_exit(-1); + } + if (map->nsegs == 0) { + SEND_EARLY_STDERR("Invalid segment count in loadmap\n"); + _dl_exit(-1); + } + loadaddr->got_value = (void *)dl_boot_got_pointer; + loadaddr->map = map; +} + +/* + * Figure out how many LOAD segments there are in the given headers, + * and allocate a block for the load map big enough for them. + * got_value will be properly initialized later on, with INIT_GOT. + */ +static __always_inline int +__dl_init_loadaddr(struct elf32_fdpic_loadaddr *loadaddr, Elf32_Phdr *ppnt, + int pcnt) +{ + int count = 0, i; + size_t size; + + for (i = 0; i < pcnt; i++) + if (ppnt[i].p_type == PT_LOAD) + count++; + + loadaddr->got_value = 0; + + size = sizeof(struct elf32_fdpic_loadmap) + + (sizeof(struct elf32_fdpic_loadseg) * count); + loadaddr->map = _dl_malloc(size); + if (!loadaddr->map) + _dl_exit(-1); + + loadaddr->map->version = 0; + loadaddr->map->nsegs = 0; + + return count; +} + +/* Incrementally initialize a load map. */ +static __always_inline void +__dl_init_loadaddr_hdr(struct elf32_fdpic_loadaddr loadaddr, void *addr, + Elf32_Phdr *phdr, int maxsegs) +{ + struct elf32_fdpic_loadseg *segdata; + + if (loadaddr.map->nsegs == maxsegs) + _dl_exit(-1); + + segdata = &loadaddr.map->segs[loadaddr.map->nsegs++]; + segdata->addr = (Elf32_Addr)addr; + segdata->p_vaddr = phdr->p_vaddr; + segdata->p_memsz = phdr->p_memsz; + +#if defined(__SUPPORT_LD_DEBUG__) + if (_dl_debug) + _dl_dprintf(_dl_debug_file, "%i: mapped %x at %x, size %x\n", + loadaddr.map->nsegs - 1, + segdata->p_vaddr, segdata->addr, segdata->p_memsz); +#endif +} + +/* Replace an existing entry in the load map. */ +static __always_inline void +__dl_update_loadaddr_hdr(struct elf32_fdpic_loadaddr loadaddr, void *addr, + Elf32_Phdr *phdr) +{ + struct elf32_fdpic_loadseg *segdata; + void *oldaddr; + int i; + + for (i = 0; i < loadaddr.map->nsegs; i++) + if (loadaddr.map->segs[i].p_vaddr == phdr->p_vaddr && + loadaddr.map->segs[i].p_memsz == phdr->p_memsz) + break; + if (i == loadaddr.map->nsegs) + _dl_exit(-1); + + segdata = loadaddr.map->segs + i; + oldaddr = (void *)segdata->addr; + _dl_munmap(oldaddr, segdata->p_memsz); + segdata->addr = (Elf32_Addr)addr; + +#if defined (__SUPPORT_LD_DEBUG__) + if (_dl_debug) + _dl_dprintf(_dl_debug_file, "%i: changed mapping %x at %x (old %x), size %x\n", + loadaddr.map->nsegs - 1, + segdata->p_vaddr, segdata->addr, oldaddr, segdata->p_memsz); +#endif +} + + +#ifndef __dl_loadaddr_unmap +static __always_inline void +__dl_loadaddr_unmap(struct elf32_fdpic_loadaddr loadaddr, + struct funcdesc_ht *funcdesc_ht) +{ + int i; + + for (i = 0; i < loadaddr.map->nsegs; i++) + _dl_munmap((void *)loadaddr.map->segs[i].addr, + loadaddr.map->segs[i].p_memsz); + + /* + * _dl_unmap is only called for dlopen()ed libraries, for which + * calling free() is safe, or before we've completed the initial + * relocation, in which case calling free() is probably pointless, + * but still safe. + */ + _dl_free(loadaddr.map); + if (funcdesc_ht) + htab_delete(funcdesc_ht); +} +#endif + +/* Figure out whether the given address is in one of the mapped segments. */ +static __always_inline int +__dl_addr_in_loadaddr(void *p, struct elf32_fdpic_loadaddr loadaddr) +{ + struct elf32_fdpic_loadmap *map = loadaddr.map; + int c; + + for (c = 0; c < map->nsegs; c++) + if ((void *)map->segs[c].addr <= p && + (char *)p < (char *)map->segs[c].addr + map->segs[c].p_memsz) + return 1; + + return 0; +} + +static int +hash_pointer(void *p) +{ + return (int) ((long)p >> 3); +} + +static int +eq_pointer(void *p, void *q) +{ + struct funcdesc_value *entry = p; + + return entry->entry_point == q; +} + +void * +_dl_funcdesc_for (void *entry_point, void *got_value) +{ + struct elf_resolve *tpnt = ((void**)got_value)[2]; + struct funcdesc_ht *ht = tpnt->funcdesc_ht; + struct funcdesc_value **entry; + + _dl_assert(got_value == tpnt->loadaddr.got_value); + + if (!ht) { + ht = htab_create(); + if (!ht) + return (void*)-1; + tpnt->funcdesc_ht = ht; + } + + entry = htab_find_slot(ht, entry_point, 1, hash_pointer, eq_pointer); + + if (entry == NULL) + _dl_exit(1); + + if (*entry) { + _dl_assert((*entry)->entry_point == entry_point); + return _dl_stabilize_funcdesc(*entry); + } + + *entry = _dl_malloc(sizeof(**entry)); + (*entry)->entry_point = entry_point; + (*entry)->got_value = got_value; + + return _dl_stabilize_funcdesc(*entry); +} + +static __always_inline void const * +_dl_lookup_address(void const *address) +{ + struct elf_resolve *rpnt; + struct funcdesc_value const *fd; + + /* Make sure we don't make assumptions about its alignment. */ + __asm__ ("" : "+r" (address)); + + if ((Elf32_Addr)address & 7) + /* It's not a function descriptor. */ + return address; + + fd = address; + + for (rpnt = _dl_loaded_modules; rpnt; rpnt = rpnt->next) { + if (!rpnt->funcdesc_ht) + continue; + + if (fd->got_value != rpnt->loadaddr.got_value) + continue; + + address = htab_find_slot(rpnt->funcdesc_ht, (void *)fd->entry_point, 0, + hash_pointer, eq_pointer); + + if (address && *(struct funcdesc_value *const*)address == fd) { + address = (*(struct funcdesc_value *const*)address)->entry_point; + break; + } else + address = fd; + } + + return address; +} diff --git a/ldso/ldso/fdpic/dl-sysdep.h b/ldso/ldso/fdpic/dl-sysdep.h new file mode 100644 index 000000000..546811ad0 --- /dev/null +++ b/ldso/ldso/fdpic/dl-sysdep.h @@ -0,0 +1,136 @@ +/* Copyright (C) 2003, 2004 Red Hat, Inc. + * Contributed by Alexandre Oliva <aoliva@redhat.com> + * Copyright (C) 2006-2011 Analog Devices, Inc. + * Based on ../i386/dl-sysdep.h + * + * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball. + */ + +#define HAVE_DL_INLINES_H + +/* + * Initialization sequence for a GOT. Copy the resolver function + * descriptor and the pointer to the elf_resolve/link_map data + * structure. Initialize the got_value in the module while at that. + */ +#define INIT_GOT(GOT_BASE,MODULE) \ +{ \ + (MODULE)->loadaddr.got_value = (GOT_BASE); \ + GOT_BASE[0] = ((unsigned long *)&_dl_linux_resolve)[0]; \ + GOT_BASE[1] = ((unsigned long *)&_dl_linux_resolve)[1]; \ + GOT_BASE[2] = (unsigned long) MODULE; \ +} + +struct elf_resolve; + +struct funcdesc_value +{ + void *entry_point; + void *got_value; +} __attribute__((__aligned__(8))); + +struct funcdesc_ht; + +#define DL_LOADADDR_TYPE struct elf32_fdpic_loadaddr + +#define DL_RELOC_ADDR(LOADADDR, ADDR) \ + ((ElfW(Addr))__reloc_pointer ((void*)(ADDR), (LOADADDR).map)) + +#define DL_ADDR_TO_FUNC_PTR(ADDR, LOADADDR) \ + ((void(*)(void)) _dl_funcdesc_for ((void*)(ADDR), (LOADADDR).got_value)) + +#define _dl_stabilize_funcdesc(val) \ + ({ __asm__ ("" : "+m" (*(val))); (val); }) + +#define DL_CALL_FUNC_AT_ADDR(ADDR, LOADADDR, SIGNATURE, ...) \ + ({ struct funcdesc_value fd = { (void*)(ADDR), (LOADADDR).got_value }; \ + void (*pf)(void) = (void*) _dl_stabilize_funcdesc (&fd); \ + (* SIGNATURE pf)(__VA_ARGS__); }) + +#define DL_INIT_LOADADDR_BOOT(LOADADDR, BASEADDR) \ + (__dl_init_loadaddr_map (&(LOADADDR), dl_boot_got_pointer, \ + dl_boot_ldsomap ?: dl_boot_progmap)) + +#define DL_INIT_LOADADDR_PROG(LOADADDR, BASEADDR) \ + (__dl_init_loadaddr_map (&(LOADADDR), 0, dl_boot_progmap)) + +#define DL_INIT_LOADADDR_EXTRA_DECLS \ + int dl_init_loadaddr_load_count; +#define DL_INIT_LOADADDR(LOADADDR, BASEADDR, PHDR, PHDRCNT) \ + (dl_init_loadaddr_load_count = \ + __dl_init_loadaddr (&(LOADADDR), (PHDR), (PHDRCNT))) +#define DL_INIT_LOADADDR_HDR(LOADADDR, ADDR, PHDR) \ + (__dl_init_loadaddr_hdr ((LOADADDR), (ADDR), (PHDR), \ + dl_init_loadaddr_load_count)) +#define DL_UPDATE_LOADADDR_HDR(LOADADDR, ADDR, PHDR) \ + (__dl_update_loadaddr_hdr ((LOADADDR), (ADDR), (PHDR))) +#define DL_LOADADDR_UNMAP(LOADADDR, LEN) \ + (__dl_loadaddr_unmap ((LOADADDR), (NULL))) +#define DL_LIB_UNMAP(LIB, LEN) \ + (__dl_loadaddr_unmap ((LIB)->loadaddr, (LIB)->funcdesc_ht)) +#define DL_LOADADDR_BASE(LOADADDR) \ + ((LOADADDR).got_value) + +/* This is called from dladdr(), such that we map a function + descriptor's address to the function's entry point before trying to + find in which library it's defined. */ +#define DL_LOOKUP_ADDRESS(ADDRESS) (_dl_lookup_address (ADDRESS)) + +#define DL_ADDR_IN_LOADADDR(ADDR, TPNT, TFROM) \ + (! (TFROM) && __dl_addr_in_loadaddr ((void*)(ADDR), (TPNT)->loadaddr)) + +/* + * Compute the GOT address. On several platforms, we use assembly + * here. on FDPIC, there's no way to compute the GOT address, + * since the offset between text and data is not fixed, so we arrange + * for the ldso assembly entry point to pass this value as an argument + * to _dl_start. */ +#define DL_BOOT_COMPUTE_GOT(got) ((got) = dl_boot_got_pointer) + +#define DL_BOOT_COMPUTE_DYN(dpnt, got, load_addr) \ + ((dpnt) = dl_boot_ldso_dyn_pointer) + +/* We want want to apply all relocations in the interpreter during + bootstrap. Because of this, we have to skip the interpreter + relocations in _dl_parse_relocation_information(), see + elfinterp.c. */ +#define DL_SKIP_BOOTSTRAP_RELOC(SYMTAB, INDEX, STRTAB) 0 + +#ifdef __NR_pread +#define _DL_PREAD(FD, BUF, SIZE, OFFSET) \ + (_dl_pread((FD), (BUF), (SIZE), (OFFSET))) +#endif + +/* We want to return to dlsym() a function descriptor if the symbol + turns out to be a function. */ +#define DL_FIND_HASH_VALUE(TPNT, TYPE_CLASS, SYM) \ + (((TYPE_CLASS) & ELF_RTYPE_CLASS_DLSYM) \ + && ELF32_ST_TYPE((SYM)->st_info) == STT_FUNC \ + ? _dl_funcdesc_for ((void *)DL_RELOC_ADDR ((TPNT)->loadaddr, (SYM)->st_value), \ + (TPNT)->loadaddr.got_value) \ + : DL_RELOC_ADDR ((TPNT)->loadaddr, (SYM)->st_value)) + +#define DL_GET_READY_TO_RUN_EXTRA_PARMS \ + , struct elf32_fdpic_loadmap *dl_boot_progmap, Elf32_Addr dl_boot_got_pointer +#define DL_GET_READY_TO_RUN_EXTRA_ARGS \ + , dl_boot_progmap, dl_boot_got_pointer + +/* Define this to declare the library offset. */ +#define DL_DEF_LIB_OFFSET + +/* Define this to get the library offset. */ +#define DL_GET_LIB_OFFSET() 0 + +/* Define this to set the library offset. */ +#define DL_SET_LIB_OFFSET(offset) + +/* Define this to get the real object's runtime address. */ +#define DL_GET_RUN_ADDR(loadaddr, mapaddr) (loadaddr) + +#ifdef __USE_GNU +# include <link.h> +#else +# define __USE_GNU +# include <link.h> +# undef __USE_GNU +#endif diff --git a/ldso/ldso/frv/dl-debug.h b/ldso/ldso/frv/dl-debug.h index 9b146a62f..65c2386fb 100644 --- a/ldso/ldso/frv/dl-debug.h +++ b/ldso/ldso/frv/dl-debug.h @@ -10,7 +10,7 @@ * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball. */ -static const char *_dl_reltypes_tab[] = +static const char * const _dl_reltypes_tab[] = { [0] "R_FRV_NONE", "R_FRV_32", [2] "R_FRV_LABEL16", "R_FRV_LABEL24", diff --git a/ldso/ldso/frv/dl-inlines.h b/ldso/ldso/frv/dl-inlines.h index 0d469dd88..8fdf6eb48 100644 --- a/ldso/ldso/frv/dl-inlines.h +++ b/ldso/ldso/frv/dl-inlines.h @@ -1,456 +1 @@ -/* Copyright (C) 2003, 2004 Red Hat, Inc. - * Contributed by Alexandre Oliva <aoliva@redhat.com> - * - * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball. - */ - -#ifndef _dl_assert -# define _dl_assert(expr) -#endif - -/* Initialize a DL_LOADADDR_TYPE given a got pointer and a complete - load map. */ -inline static void -__dl_init_loadaddr_map (struct elf32_fdpic_loadaddr *loadaddr, void *got_value, - struct elf32_fdpic_loadmap *map) -{ - if (map->version != 0) - { - SEND_EARLY_STDERR ("Invalid loadmap version number\n"); - _dl_exit(-1); - } - if (map->nsegs == 0) - { - SEND_EARLY_STDERR ("Invalid segment count in loadmap\n"); - _dl_exit(-1); - } - loadaddr->got_value = got_value; - loadaddr->map = map; -} - -/* Figure out how many LOAD segments there are in the given headers, - and allocate a block for the load map big enough for them. - got_value will be properly initialized later on, with INIT_GOT. */ -inline static int -__dl_init_loadaddr (struct elf32_fdpic_loadaddr *loadaddr, Elf32_Phdr *ppnt, - int pcnt) -{ - int count = 0, i; - size_t size; - - for (i = 0; i < pcnt; i++) - if (ppnt[i].p_type == PT_LOAD) - count++; - - loadaddr->got_value = 0; - - size = sizeof (struct elf32_fdpic_loadmap) - + sizeof (struct elf32_fdpic_loadseg) * count; - loadaddr->map = _dl_malloc (size); - if (! loadaddr->map) - _dl_exit (-1); - - loadaddr->map->version = 0; - loadaddr->map->nsegs = 0; - - return count; -} - -/* Incrementally initialize a load map. */ -inline static void -__dl_init_loadaddr_hdr (struct elf32_fdpic_loadaddr loadaddr, void *addr, - Elf32_Phdr *phdr, int maxsegs) -{ - struct elf32_fdpic_loadseg *segdata; - - if (loadaddr.map->nsegs == maxsegs) - _dl_exit (-1); - - segdata = &loadaddr.map->segs[loadaddr.map->nsegs++]; - segdata->addr = (Elf32_Addr) addr; - segdata->p_vaddr = phdr->p_vaddr; - segdata->p_memsz = phdr->p_memsz; - -#if defined (__SUPPORT_LD_DEBUG__) - { - extern char *_dl_debug; - extern int _dl_debug_file; - if (_dl_debug) - _dl_dprintf(_dl_debug_file, "%i: mapped %x at %x, size %x\n", - loadaddr.map->nsegs-1, - segdata->p_vaddr, segdata->addr, segdata->p_memsz); - } -#endif -} - -inline static void __dl_loadaddr_unmap -(struct elf32_fdpic_loadaddr loadaddr, struct funcdesc_ht *funcdesc_ht); - -/* Figure out whether the given address is in one of the mapped - segments. */ -inline static int -__dl_addr_in_loadaddr (void *p, struct elf32_fdpic_loadaddr loadaddr) -{ - struct elf32_fdpic_loadmap *map = loadaddr.map; - int c; - - for (c = 0; c < map->nsegs; c++) - if ((void*)map->segs[c].addr <= p - && (char*)p < (char*)map->segs[c].addr + map->segs[c].p_memsz) - return 1; - - return 0; -} - -inline static void * _dl_funcdesc_for (void *entry_point, void *got_value); - -/* The hashcode handling code below is heavily inspired in libiberty's - hashtab code, but with most adaptation points and support for - deleting elements removed. - - Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc. - Contributed by Vladimir Makarov (vmakarov@cygnus.com). */ - -inline static unsigned long -higher_prime_number (unsigned long n) -{ - /* These are primes that are near, but slightly smaller than, a - power of two. */ - static const unsigned long primes[] = { - (unsigned long) 7, - (unsigned long) 13, - (unsigned long) 31, - (unsigned long) 61, - (unsigned long) 127, - (unsigned long) 251, - (unsigned long) 509, - (unsigned long) 1021, - (unsigned long) 2039, - (unsigned long) 4093, - (unsigned long) 8191, - (unsigned long) 16381, - (unsigned long) 32749, - (unsigned long) 65521, - (unsigned long) 131071, - (unsigned long) 262139, - (unsigned long) 524287, - (unsigned long) 1048573, - (unsigned long) 2097143, - (unsigned long) 4194301, - (unsigned long) 8388593, - (unsigned long) 16777213, - (unsigned long) 33554393, - (unsigned long) 67108859, - (unsigned long) 134217689, - (unsigned long) 268435399, - (unsigned long) 536870909, - (unsigned long) 1073741789, - (unsigned long) 2147483647, - /* 4294967291L */ - ((unsigned long) 2147483647) + ((unsigned long) 2147483644), - }; - - const unsigned long *low = &primes[0]; - const unsigned long *high = &primes[sizeof(primes) / sizeof(primes[0])]; - - while (low != high) - { - const unsigned long *mid = low + (high - low) / 2; - if (n > *mid) - low = mid + 1; - else - high = mid; - } - -#if 0 - /* If we've run out of primes, abort. */ - if (n > *low) - { - fprintf (stderr, "Cannot find prime bigger than %lu\n", n); - abort (); - } -#endif - - return *low; -} - -struct funcdesc_ht -{ - /* Table itself. */ - struct funcdesc_value **entries; - - /* Current size (in entries) of the hash table */ - size_t size; - - /* Current number of elements. */ - size_t n_elements; -}; - -inline static int -hash_pointer (const void *p) -{ - return (int) ((long)p >> 3); -} - -inline static struct funcdesc_ht * -htab_create (void) -{ - struct funcdesc_ht *ht = _dl_malloc (sizeof (struct funcdesc_ht)); - - if (! ht) - return NULL; - ht->size = 3; - ht->entries = _dl_malloc (sizeof (struct funcdesc_ht_value *) * ht->size); - if (! ht->entries) - return NULL; - - ht->n_elements = 0; - - _dl_memset (ht->entries, 0, sizeof (struct funcdesc_ht_value *) * ht->size); - - return ht; -} - -/* This is only called from _dl_loadaddr_unmap, so it's safe to call - _dl_free(). See the discussion below. */ -inline static void -htab_delete (struct funcdesc_ht *htab) -{ - int i; - - for (i = htab->size - 1; i >= 0; i--) - if (htab->entries[i]) - _dl_free (htab->entries[i]); - - _dl_free (htab->entries); - _dl_free (htab); -} - -/* Similar to htab_find_slot, but without several unwanted side effects: - - Does not call htab->eq_f when it finds an existing entry. - - Does not change the count of elements/searches/collisions in the - hash table. - This function also assumes there are no deleted entries in the table. - HASH is the hash value for the element to be inserted. */ - -inline static struct funcdesc_value ** -find_empty_slot_for_expand (struct funcdesc_ht *htab, int hash) -{ - size_t size = htab->size; - unsigned int index = hash % size; - struct funcdesc_value **slot = htab->entries + index; - int hash2; - - if (! *slot) - return slot; - - hash2 = 1 + hash % (size - 2); - for (;;) - { - index += hash2; - if (index >= size) - index -= size; - - slot = htab->entries + index; - if (! *slot) - return slot; - } -} - -/* The following function changes size of memory allocated for the - entries and repeatedly inserts the table elements. The occupancy - of the table after the call will be about 50%. Naturally the hash - table must already exist. Remember also that the place of the - table entries is changed. If memory allocation failures are allowed, - this function will return zero, indicating that the table could not be - expanded. If all goes well, it will return a non-zero value. */ - -inline static int -htab_expand (struct funcdesc_ht *htab) -{ - struct funcdesc_value **oentries; - struct funcdesc_value **olimit; - struct funcdesc_value **p; - struct funcdesc_value **nentries; - size_t nsize; - - oentries = htab->entries; - olimit = oentries + htab->size; - - /* Resize only when table after removal of unused elements is either - too full or too empty. */ - if (htab->n_elements * 2 > htab->size) - nsize = higher_prime_number (htab->n_elements * 2); - else - nsize = htab->size; - - nentries = _dl_malloc (sizeof (struct funcdesc_value *) * nsize); - _dl_memset (nentries, 0, sizeof (struct funcdesc_value *) * nsize); - if (nentries == NULL) - return 0; - htab->entries = nentries; - htab->size = nsize; - - p = oentries; - do - { - if (*p) - *find_empty_slot_for_expand (htab, hash_pointer ((*p)->entry_point)) - = *p; - - p++; - } - while (p < olimit); - -#if 0 /* We can't tell whether this was allocated by the _dl_malloc() - built into ld.so or malloc() in the main executable or libc, - and calling free() for something that wasn't malloc()ed could - do Very Bad Things (TM). Take the conservative approach - here, potentially wasting as much memory as actually used by - the hash table, even if multiple growths occur. That's not - so bad as to require some overengineered solution that would - enable us to keep track of how it was allocated. */ - _dl_free (oentries); -#endif - return 1; -} - -/* This function searches for a hash table slot containing an entry - equal to the given element. To delete an entry, call this with - INSERT = 0, then call htab_clear_slot on the slot returned (possibly - after doing some checks). To insert an entry, call this with - INSERT = 1, then write the value you want into the returned slot. - When inserting an entry, NULL may be returned if memory allocation - fails. */ - -inline static struct funcdesc_value ** -htab_find_slot (struct funcdesc_ht *htab, void *ptr, int insert) -{ - unsigned int index; - int hash, hash2; - size_t size; - struct funcdesc_value **entry; - - if (htab->size * 3 <= htab->n_elements * 4 - && htab_expand (htab) == 0) - return NULL; - - hash = hash_pointer (ptr); - - size = htab->size; - index = hash % size; - - entry = &htab->entries[index]; - if (!*entry) - goto empty_entry; - else if ((*entry)->entry_point == ptr) - return entry; - - hash2 = 1 + hash % (size - 2); - for (;;) - { - index += hash2; - if (index >= size) - index -= size; - - entry = &htab->entries[index]; - if (!*entry) - goto empty_entry; - else if ((*entry)->entry_point == ptr) - return entry; - } - - empty_entry: - if (!insert) - return NULL; - - htab->n_elements++; - return entry; -} - -void * -_dl_funcdesc_for (void *entry_point, void *got_value) -{ - struct elf_resolve *tpnt = ((void**)got_value)[2]; - struct funcdesc_ht *ht = tpnt->funcdesc_ht; - struct funcdesc_value **entry; - - _dl_assert (got_value == tpnt->loadaddr.got_value); - - if (! ht) - { - ht = htab_create (); - if (! ht) - return (void*)-1; - tpnt->funcdesc_ht = ht; - } - - entry = htab_find_slot (ht, entry_point, 1); - if (*entry) - { - _dl_assert ((*entry)->entry_point == entry_point); - return _dl_stabilize_funcdesc (*entry); - } - - *entry = _dl_malloc (sizeof (struct funcdesc_value)); - (*entry)->entry_point = entry_point; - (*entry)->got_value = got_value; - - return _dl_stabilize_funcdesc (*entry); -} - -inline static void const * -_dl_lookup_address (void const *address) -{ - struct elf_resolve *rpnt; - struct funcdesc_value const *fd; - - /* Make sure we don't make assumptions about its alignment. */ - __asm__ ("" : "+r" (address)); - - if ((Elf32_Addr)address & 7) - /* It's not a function descriptor. */ - return address; - - fd = (struct funcdesc_value const *)address; - - for (rpnt = _dl_loaded_modules; rpnt; rpnt = rpnt->next) - { - if (! rpnt->funcdesc_ht) - continue; - - if (fd->got_value != rpnt->loadaddr.got_value) - continue; - - address = htab_find_slot (rpnt->funcdesc_ht, (void*)fd->entry_point, 0); - - if (address && *(struct funcdesc_value *const*)address == fd) - { - address = (*(struct funcdesc_value *const*)address)->entry_point; - break; - } - else - address = fd; - } - - return address; -} - -void -__dl_loadaddr_unmap (struct elf32_fdpic_loadaddr loadaddr, - struct funcdesc_ht *funcdesc_ht) -{ - int i; - - for (i = 0; i < loadaddr.map->nsegs; i++) - _dl_munmap ((void*)loadaddr.map->segs[i].addr, - loadaddr.map->segs[i].p_memsz); - - /* _dl_unmap is only called for dlopen()ed libraries, for which - calling free() is safe, or before we've completed the initial - relocation, in which case calling free() is probably pointless, - but still safe. */ - _dl_free (loadaddr.map); - if (funcdesc_ht) - htab_delete (funcdesc_ht); -} +#include "../fdpic/dl-inlines.h" diff --git a/ldso/ldso/frv/dl-startup.h b/ldso/ldso/frv/dl-startup.h index 674f81c15..45e9cb9ce 100644 --- a/ldso/ldso/frv/dl-startup.h +++ b/ldso/ldso/frv/dl-startup.h @@ -27,9 +27,10 @@ __asm__("" \ " .text\n" \ -" .global _dl_boot\n" \ -" .type _dl_boot,@function\n" \ -"_dl_boot:\n" \ +" .global _start\n" \ +" .type _start,@function\n" \ +" .hidden _start\n" \ +"_start:\n" \ " call .Lcall\n" \ ".Lcall:\n" \ " movsg lr, gr4\n" \ @@ -54,45 +55,32 @@ __asm__("" \ " addi.p sp, #4, gr13\n" \ " addi sp, #-8, sp\n" \ " mov.p sp, gr12\n" \ -" call _dl_boot2\n" \ +" call _dl_start\n" \ " ldd.p @(sp, gr0), gr14\n" \ " addi sp, #8, sp\n" \ " movgs gr0, lr\n" \ " jmpl @(gr14, gr0)\n" \ -" .size _dl_boot,.-_dl_boot\n" \ +" .size _start,.-_start\n" \ ); -#define _dl_boot _dl_boot2 -#define DL_BOOT(X) \ +#undef DL_START +#define DL_START(X) \ static void __attribute__ ((used)) \ -_dl_boot (void *dl_boot_got_pointer, \ +_dl_start (Elf32_Addr dl_boot_got_pointer, \ struct elf32_fdpic_loadmap *dl_boot_progmap, \ struct elf32_fdpic_loadmap *dl_boot_ldsomap, \ Elf32_Dyn *dl_boot_ldso_dyn_pointer, \ struct funcdesc_value *dl_main_funcdesc, \ X) -struct elf32_fdpic_loadmap; - /* * Get a pointer to the argv array. On many platforms this can be just - * the address if the first argument, on other platforms we need to + * the address of the first argument, on other platforms we need to * do something a little more subtle here. */ #define GET_ARGV(ARGVP, ARGS) ARGVP = ((unsigned long*) ARGS) /* - * Compute the GOT address. On several platforms, we use assembly - * here. on FR-V FDPIC, there's no way to compute the GOT address, - * since the offset between text and data is not fixed, so we arrange - * for the assembly _dl_boot to pass this value as an argument to - * _dl_boot. */ -#define DL_BOOT_COMPUTE_GOT(got) ((got) = dl_boot_got_pointer) - -#define DL_BOOT_COMPUTE_DYN(dpnt, got, load_addr) \ - ((dpnt) = dl_boot_ldso_dyn_pointer) - -/* * Here is a macro to perform a relocation. This is only used when * bootstrapping the dynamic loader. RELP is the relocation that we * are performing, REL is the pointer to the address we are relocating. @@ -100,7 +88,7 @@ struct elf32_fdpic_loadmap; * load address. */ #define PERFORM_BOOTSTRAP_RELOC(RELP,REL,SYMBOL,LOAD,SYMTAB) \ - switch(ELF32_R_TYPE((RELP)->r_info)){ \ + switch(ELF_R_TYPE((RELP)->r_info)){ \ case R_FRV_32: \ *(REL) += (SYMBOL); \ break; \ @@ -128,6 +116,5 @@ struct elf32_fdpic_loadmap; while (exec_mod->libtype != elf_executable) \ exec_mod = exec_mod->next; \ dl_main_funcdesc->got_value = exec_mod->loadaddr.got_value; \ - /* _dl_dprintf(2, "entry point is (%x,%x)\n", dl_main_funcdesc->entry_point, dl_main_funcdesc->got_value); */ \ return; \ } while (0) diff --git a/ldso/ldso/frv/dl-syscalls.h b/ldso/ldso/frv/dl-syscalls.h index 093d0dca8..f40c4fd31 100644 --- a/ldso/ldso/frv/dl-syscalls.h +++ b/ldso/ldso/frv/dl-syscalls.h @@ -1,175 +1 @@ -/* Copyright (C) 2003, 2004 Red Hat, Inc. - * Contributed by Alexandre Oliva <aoliva@redhat.com> - * - * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball. - */ - -/* We can't use the real errno in ldso, since it has not yet - * been dynamicly linked in yet. */ -#include "sys/syscall.h" -extern int _dl_errno; -#undef __set_errno -#define __set_errno(X) {(_dl_errno) = (X);} -#include <sys/mman.h> - -/* The code below is extracted from libc/sysdeps/linux/frv/_mmap.c */ - -#if DYNAMIC_LOADER_IN_SIMULATOR -#define __NR___syscall_mmap2 __NR_mmap2 -static __inline__ _syscall6(__ptr_t, __syscall_mmap2, __ptr_t, addr, - size_t, len, int, prot, int, flags, int, fd, off_t, offset); - -/* Make sure we don't get another definition of _dl_mmap from the - machine-independent code. */ -#undef __NR_mmap -#undef __NR_mmap2 - -/* This is always 12, even on architectures where PAGE_SHIFT != 12. */ -# ifndef MMAP2_PAGE_SHIFT -# define MMAP2_PAGE_SHIFT 12 -# endif - -#include <bits/uClibc_page.h> /* for PAGE_SIZE */ -inline static void *_dl_memset(void*,int,size_t); -inline static ssize_t _dl_pread(int fd, void *buf, size_t count, off_t offset); - -static __ptr_t -_dl_mmap(__ptr_t addr, size_t len, int prot, int flags, int fd, __off_t offset) -{ - size_t plen = (len + PAGE_SIZE - 1) & -PAGE_SIZE; - -/* This is a hack to enable the dynamic loader to run within a - simulator that doesn't support mmap, with a number of very ugly - tricks. Also, it's not as useful as it sounds, since only dynamic - executables without DT_NEEDED dependencies can be run. AFAIK, they - can only be created with -pie. This trick suffices to enable the - dynamic loader to obtain a blank page that it maps early in the - bootstrap. */ - if ((flags & MAP_FIXED) == 0) - { - void *_dl_mmap_base = 0; - __ptr_t *ret = 0; - - if (! _dl_mmap_base) - { - void *stack; - __asm__ ("mov sp, %0" : "=r" (stack)); - _dl_mmap_base = (void *)(((long)stack + 2 * PAGE_SIZE) & -PAGE_SIZE); - retry: - if (((void **)_dl_mmap_base)[0] == _dl_mmap_base - && ((void **)_dl_mmap_base)[1023] == _dl_mmap_base - && (((void **)_dl_mmap_base)[177] - == ((void **)_dl_mmap_base)[771])) - { - while (((void**)_dl_mmap_base)[177]) - { - _dl_mmap_base = ((void**)_dl_mmap_base)[177]; - if (!(((void **)_dl_mmap_base)[0] == _dl_mmap_base - && ((void **)_dl_mmap_base)[1023] == _dl_mmap_base - && (((void **)_dl_mmap_base)[177] - == ((void**)_dl_mmap_base)[771]))) - ((void(*)())0)(); - } - } - else - { - int i; - for (i = 0; i < (int)PAGE_SIZE; i++) - if (*(char*)(_dl_mmap_base + i)) - break; - if (i != PAGE_SIZE) - { - _dl_mmap_base = (void*)((long)_dl_mmap_base + PAGE_SIZE); - goto retry; - } - ((void**)_dl_mmap_base)[-1] = - ((void**)_dl_mmap_base)[0] = - ((void**)_dl_mmap_base)[1023] = - _dl_mmap_base; - } - } - - if (_dl_mmap_base) - { - if (!(((void **)_dl_mmap_base)[0] == _dl_mmap_base - && ((void **)_dl_mmap_base)[1023] == _dl_mmap_base - && (((void **)_dl_mmap_base)[177] - == ((void**)_dl_mmap_base)[771]))) - ((void(*)())0)(); - ret = (__ptr_t)((char*)_dl_mmap_base + PAGE_SIZE); - _dl_mmap_base = - ((void**)_dl_mmap_base)[177] = - ((void**)_dl_mmap_base)[771] = - (char*)_dl_mmap_base + plen + PAGE_SIZE; - ((void**)_dl_mmap_base)[0] = - ((void**)_dl_mmap_base)[1023] = - _dl_mmap_base; - } - - if ((flags & MAP_ANONYMOUS) != 0) - { - _dl_memset (ret, 0, plen); - return ret; - } - - flags |= MAP_FIXED; - addr = ret; - } - if (offset & ((1 << MMAP2_PAGE_SHIFT) - 1)) { -#if 0 - __set_errno (EINVAL); -#endif - return MAP_FAILED; - } - if ((flags & MAP_FIXED) != 0) - { - if (_dl_pread(fd, addr, len, offset) != (ssize_t)len) - return (void*)MAP_FAILED; - if (plen != len) - _dl_memset (addr + len, 0, plen - len); - return addr; - } - return(__syscall_mmap2(addr, len, prot, flags, fd, (off_t) (offset >> MMAP2_PAGE_SHIFT))); -} -#endif - -#ifdef __NR_pread -#ifdef DYNAMIC_LOADER_IN_SIMULATOR -#include <unistd.h> - -#define __NR___syscall_lseek __NR_lseek -inline static unsigned long _dl_read(int fd, const void *buf, unsigned long count); - -inline static _syscall3(__off_t, __syscall_lseek, int, fd, __off_t, offset, - int, whence); -inline static ssize_t -_dl_pread(int fd, void *buf, size_t count, off_t offset) -{ - __off_t orig = __syscall_lseek (fd, 0, SEEK_CUR); - ssize_t ret; - - if (orig == -1) - return -1; - - if (__syscall_lseek (fd, offset, SEEK_SET) != offset) - return -1; - - ret = _dl_read (fd, buf, count); - - if (__syscall_lseek (fd, orig, SEEK_SET) != orig) - ((void(*)())0)(); - - return ret; -} -#else -#define __NR___syscall_pread __NR_pread -inline static _syscall5(ssize_t, __syscall_pread, int, fd, void *, buf, - size_t, count, off_t, offset_hi, off_t, offset_lo); - -inline static ssize_t -_dl_pread(int fd, void *buf, size_t count, off_t offset) -{ - return(__syscall_pread(fd,buf,count,__LONG_LONG_PAIR (offset >> 31, offset))); -} -#endif -#endif +/* stub for arch-specific syscall issues */ diff --git a/ldso/ldso/frv/dl-sysdep.h b/ldso/ldso/frv/dl-sysdep.h index 32d86ed2c..e6bf7f187 100644 --- a/ldso/ldso/frv/dl-sysdep.h +++ b/ldso/ldso/frv/dl-sysdep.h @@ -20,19 +20,6 @@ #define DL_NO_COPY_RELOCS -/* - * Initialization sequence for a GOT. Copy the resolver function - * descriptor and the pointer to the elf_resolve/link_map data - * structure. Initialize the got_value in the module while at that. - */ -#define INIT_GOT(GOT_BASE,MODULE) \ -{ \ - (MODULE)->loadaddr.got_value = (GOT_BASE); \ - GOT_BASE[0] = ((unsigned long *)&_dl_linux_resolve)[0]; \ - GOT_BASE[1] = ((unsigned long *)&_dl_linux_resolve)[1]; \ - GOT_BASE[2] = (unsigned long) MODULE; \ -} - /* Here we define the magic numbers that this dynamic loader should accept */ #define MAGIC1 EM_CYGNUS_FRV #undef MAGIC2 @@ -40,25 +27,11 @@ /* Used for error messages */ #define ELF_TARGET "FR-V" -struct elf_resolve; - -struct funcdesc_value -{ - void *entry_point; - void *got_value; -} __attribute__((__aligned__(8))); - +/* Need bootstrap relocations */ +#define ARCH_NEEDS_BOOTSTRAP_RELOCS extern int _dl_linux_resolve(void) __attribute__((__visibility__("hidden"))); -/* 16KiB page alignment. Should perhaps be made dynamic using - getpagesize(), based on AT_PAGESZ from auxvt? */ -#define PAGE_ALIGN 0xffffc000 -#define ADDR_ALIGN 0x3fff -#define OFFS_ALIGN 0x7fffc000 - -struct funcdesc_ht; - /* We must force strings used early in the bootstrap into the data segment, such that they are referenced with GOTOFF instead of GPREL, because GPREL needs the GOT to have already been @@ -67,52 +40,6 @@ struct funcdesc_ht; #define SEND_EARLY_STDERR(S) \ do { static char __s[] = (S); SEND_STDERR (__s); } while (0) -#define DL_LOADADDR_TYPE struct elf32_fdpic_loadaddr - -#define DL_RELOC_ADDR(ADDR, LOADADDR) \ - (__reloc_pointer ((void*)(ADDR), (LOADADDR).map)) - -#define DL_ADDR_TO_FUNC_PTR(ADDR, LOADADDR) \ - ((void(*)(void)) _dl_funcdesc_for ((void*)(ADDR), (LOADADDR).got_value)) - -#define _dl_stabilize_funcdesc(val) \ - ({ __asm__ ("" : "+m" (*(val))); (val); }) - -#define DL_CALL_FUNC_AT_ADDR(ADDR, LOADADDR, SIGNATURE, ...) \ - ({ struct funcdesc_value fd = { (void*)(ADDR), (LOADADDR).got_value }; \ - void (*pf)(void) = (void*) _dl_stabilize_funcdesc (&fd); \ - (* SIGNATURE pf)(__VA_ARGS__); }) - -#define DL_INIT_LOADADDR_BOOT(LOADADDR, BASEADDR) \ - (__dl_init_loadaddr_map (&(LOADADDR), dl_boot_got_pointer, \ - dl_boot_ldsomap ?: dl_boot_progmap)) - -#define DL_INIT_LOADADDR_PROG(LOADADDR, BASEADDR) \ - (__dl_init_loadaddr_map (&(LOADADDR), 0, dl_boot_progmap)) - -#define DL_INIT_LOADADDR_EXTRA_DECLS \ - int dl_init_loadaddr_load_count; -#define DL_INIT_LOADADDR(LOADADDR, BASEADDR, PHDR, PHDRCNT) \ - (dl_init_loadaddr_load_count = \ - __dl_init_loadaddr (&(LOADADDR), (PHDR), (PHDRCNT))) -#define DL_INIT_LOADADDR_HDR(LOADADDR, ADDR, PHDR) \ - (__dl_init_loadaddr_hdr ((LOADADDR), (ADDR), (PHDR), \ - dl_init_loadaddr_load_count)) -#define DL_LOADADDR_UNMAP(LOADADDR, LEN) \ - (__dl_loadaddr_unmap ((LOADADDR), (NULL))) -#define DL_LIB_UNMAP(LIB, LEN) \ - (__dl_loadaddr_unmap ((LIB)->loadaddr, (LIB)->funcdesc_ht)) -#define DL_LOADADDR_BASE(LOADADDR) \ - ((LOADADDR).got_value) - -/* This is called from dladdr(), such that we map a function - descriptor's address to the function's entry point before trying to - find in which library it's defined. */ -#define DL_LOOKUP_ADDRESS(ADDRESS) (_dl_lookup_address (ADDRESS)) - -#define DL_ADDR_IN_LOADADDR(ADDR, TPNT, TFROM) \ - (! (TFROM) && __dl_addr_in_loadaddr ((void*)(ADDR), (TPNT)->loadaddr)) - /* Make sure we only load libraries that use the same number of general-purpose and floating-point registers the dynamic loader was compiled for. */ @@ -154,38 +81,17 @@ do \ } \ while (0) -/* We want want to apply all relocations in the interpreter during - bootstrap. Because of this, we have to skip the interpreter - relocations in _dl_parse_relocation_information(), see - elfinterp.c. */ -#define DL_SKIP_BOOTSTRAP_RELOC(SYMTAB, INDEX, STRTAB) 0 - -#ifdef __NR_pread -#define _DL_PREAD(FD, BUF, SIZE, OFFSET) \ - (_dl_pread((FD), (BUF), (SIZE), (OFFSET))) -#endif - -/* We want to return to dlsym() a function descriptor if the symbol - turns out to be a function. */ -#define DL_FIND_HASH_VALUE(TPNT, TYPE_CLASS, SYM) \ - (((TYPE_CLASS) & ELF_RTYPE_CLASS_DLSYM) \ - && ELF32_ST_TYPE((SYM)->st_info) == STT_FUNC \ - ? _dl_funcdesc_for (DL_RELOC_ADDR ((SYM)->st_value, (TPNT)->loadaddr), \ - (TPNT)->loadaddr.got_value) \ - : DL_RELOC_ADDR ((SYM)->st_value, (TPNT)->loadaddr)) - -#define DL_GET_READY_TO_RUN_EXTRA_PARMS \ - , struct elf32_fdpic_loadmap *dl_boot_progmap -#define DL_GET_READY_TO_RUN_EXTRA_ARGS \ - , dl_boot_progmap - - +#include "../fdpic/dl-sysdep.h" +static __always_inline Elf32_Addr +elf_machine_load_address (void) +{ + return 0; +} -#ifdef __USE_GNU -# include <link.h> -#else -# define __USE_GNU -# include <link.h> -# undef __USE_GNU -#endif +static __always_inline void +elf_machine_relative (DL_LOADADDR_TYPE load_off, const Elf32_Addr rel_addr, + Elf32_Word relative_count) +{ + ; +} diff --git a/ldso/ldso/frv/elfinterp.c b/ldso/ldso/frv/elfinterp.c index df41f97bf..2c954b3ab 100644 --- a/ldso/ldso/frv/elfinterp.c +++ b/ldso/ldso/frv/elfinterp.c @@ -10,6 +10,7 @@ * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball. */ +#include <sys/cdefs.h> /* __attribute_used__ */ #include <features.h> /* Program to load an ELF binary on a linux system, and run it. @@ -26,47 +27,35 @@ struct funcdesc_value volatile attribute_hidden * _dl_linux_resolver (struct elf_resolve *tpnt, int reloc_entry) { - int reloc_type; ELF_RELOC *this_reloc; char *strtab; - Elf32_Sym *symtab; + ElfW(Sym) *symtab; int symtab_index; char *rel_addr; - struct elf_resolve *new_tpnt; char *new_addr; struct funcdesc_value funcval; struct funcdesc_value volatile *got_entry; char *symname; + struct symbol_ref sym_ref; - rel_addr = DL_RELOC_ADDR (tpnt->dynamic_info[DT_JMPREL], - tpnt->loadaddr); + rel_addr = (char *)tpnt->dynamic_info[DT_JMPREL]; this_reloc = (ELF_RELOC *)(intptr_t)(rel_addr + reloc_entry); - reloc_type = ELF32_R_TYPE(this_reloc->r_info); - symtab_index = ELF32_R_SYM(this_reloc->r_info); + symtab_index = ELF_R_SYM(this_reloc->r_info); - symtab = (Elf32_Sym *)(intptr_t) - DL_RELOC_ADDR (tpnt->dynamic_info[DT_SYMTAB], - tpnt->loadaddr); - strtab = DL_RELOC_ADDR (tpnt->dynamic_info[DT_STRTAB], tpnt->loadaddr); + symtab = (ElfW(Sym) *) tpnt->dynamic_info[DT_SYMTAB]; + strtab = (char *) tpnt->dynamic_info[DT_STRTAB]; + sym_ref.sym = &symtab[symtab_index]; + sym_ref.tpnt = NULL; symname= strtab + symtab[symtab_index].st_name; - if (reloc_type != R_FRV_FUNCDESC_VALUE) { - _dl_dprintf(2, "%s: Incorrect relocation type in jump relocations\n", - _dl_progname); - _dl_exit(1); - } - /* Address of GOT entry fix up */ - got_entry = (struct funcdesc_value *) - DL_RELOC_ADDR (this_reloc->r_offset, tpnt->loadaddr); + got_entry = (struct funcdesc_value *) DL_RELOC_ADDR(tpnt->loadaddr, this_reloc->r_offset); /* Get the address to be used to fill in the GOT entry. */ - new_addr = _dl_find_hash_mod(symname, tpnt->symbol_scope, NULL, 0, - &new_tpnt); + new_addr = _dl_find_hash(symname, &_dl_loaded_modules->symbol_scope, NULL, 0, &sym_ref); if (!new_addr) { - new_addr = _dl_find_hash_mod(symname, NULL, NULL, 0, - &new_tpnt); + new_addr = _dl_find_hash(symname, NULL, NULL, 0, &sym_ref); if (!new_addr) { _dl_dprintf(2, "%s: can't resolve symbol '%s'\n", _dl_progname, symname); @@ -75,7 +64,7 @@ _dl_linux_resolver (struct elf_resolve *tpnt, int reloc_entry) } funcval.entry_point = new_addr; - funcval.got_value = new_tpnt->loadaddr.got_value; + funcval.got_value = sym_ref.tpnt->loadaddr.got_value; #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_bindings) @@ -99,29 +88,28 @@ _dl_linux_resolver (struct elf_resolve *tpnt, int reloc_entry) } static int -_dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, +_dl_parse(struct elf_resolve *tpnt, struct r_scope_elem *scope, unsigned long rel_addr, unsigned long rel_size, - int (*reloc_fnc) (struct elf_resolve *tpnt, struct dyn_elf *scope, - ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab)) + int (*reloc_fnc) (struct elf_resolve *tpnt, struct r_scope_elem *scope, + ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab)) { unsigned int i; char *strtab; - Elf32_Sym *symtab; + ElfW(Sym) *symtab; ELF_RELOC *rpnt; int symtab_index; /* Now parse the relocation information */ - rpnt = (ELF_RELOC *)(intptr_t) DL_RELOC_ADDR (rel_addr, tpnt->loadaddr); + rpnt = (ELF_RELOC *) rel_addr; rel_size = rel_size / sizeof(ELF_RELOC); - symtab = (Elf32_Sym *)(intptr_t) - DL_RELOC_ADDR (tpnt->dynamic_info[DT_SYMTAB], tpnt->loadaddr); - strtab = DL_RELOC_ADDR (tpnt->dynamic_info[DT_STRTAB], tpnt->loadaddr); + symtab = (ElfW(Sym) *) tpnt->dynamic_info[DT_SYMTAB]; + strtab = (char *) tpnt->dynamic_info[DT_STRTAB]; - for (i = 0; i < rel_size; i++, rpnt++) { + for (i = 0; i < rel_size; i++, rpnt++) { int res; - symtab_index = ELF32_R_SYM(rpnt->r_info); + symtab_index = ELF_R_SYM(rpnt->r_info); debug_sym(symtab,strtab,symtab_index); debug_reloc(symtab,strtab,rpnt); @@ -136,7 +124,7 @@ _dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, if (res <0) { - int reloc_type = ELF32_R_TYPE(rpnt->r_info); + int reloc_type = ELF_R_TYPE(rpnt->r_info); #if defined (__SUPPORT_LD_DEBUG__) _dl_dprintf(2, "can't handle reloc type %s\n ", _dl_reltypes(reloc_type)); #else @@ -154,8 +142,8 @@ _dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, } static int -_dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, - ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab) +_dl_do_reloc (struct elf_resolve *tpnt,struct r_scope_elem *scope, + ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab) { int reloc_type; int symtab_index; @@ -169,24 +157,24 @@ _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, #if defined (__SUPPORT_LD_DEBUG__) unsigned long old_val; #endif + struct symbol_ref sym_ref; - reloc_addr = (unsigned long *)(intptr_t) - DL_RELOC_ADDR (rpnt->r_offset, tpnt->loadaddr); + reloc_addr = (unsigned long *) DL_RELOC_ADDR (tpnt->loadaddr, rpnt->r_offset); __asm__ ("" : "=r" (reloc_addr_packed) : "0" (reloc_addr)); - reloc_type = ELF32_R_TYPE(rpnt->r_info); - symtab_index = ELF32_R_SYM(rpnt->r_info); + reloc_type = ELF_R_TYPE(rpnt->r_info); + symtab_index = ELF_R_SYM(rpnt->r_info); symbol_addr = 0; + sym_ref.sym = &symtab[symtab_index]; + sym_ref.tpnt = NULL; symname = strtab + symtab[symtab_index].st_name; - if (ELF32_ST_BIND (symtab[symtab_index].st_info) == STB_LOCAL) { - symbol_addr = (unsigned long) - DL_RELOC_ADDR (symtab[symtab_index].st_value, - tpnt->loadaddr); + if (ELF_ST_BIND (symtab[symtab_index].st_info) == STB_LOCAL) { + symbol_addr = (unsigned long) DL_RELOC_ADDR(tpnt->loadaddr, symtab[symtab_index].st_value); symbol_tpnt = tpnt; } else { symbol_addr = (unsigned long) - _dl_find_hash_mod(symname, scope, NULL, 0, &symbol_tpnt); + _dl_find_hash(symname, scope, NULL, 0, &sym_ref); /* * We want to allow undefined references to weak symbols - this might @@ -194,11 +182,12 @@ _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, * here, so all bases should be covered. */ - if (!symbol_addr && ELF32_ST_BIND(symtab[symtab_index].st_info) != STB_WEAK) { + if (!symbol_addr && ELF_ST_BIND(symtab[symtab_index].st_info) != STB_WEAK) { _dl_dprintf (2, "%s: can't resolve symbol '%s'\n", _dl_progname, strtab + symtab[symtab_index].st_name); _dl_exit (1); } + symbol_tpnt = sym_ref.tpnt; } #if defined (__SUPPORT_LD_DEBUG__) @@ -228,7 +217,7 @@ _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, symbols must be ignored, because it may hold the address of a lazy PLT entry. */ - if (ELF32_ST_BIND + if (ELF_ST_BIND (symtab[symtab_index].st_info) == STB_LOCAL) funcval.entry_point += *reloc_addr; @@ -284,9 +273,9 @@ _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, static int _dl_do_lazy_reloc (struct elf_resolve *tpnt, - struct dyn_elf *scope __attribute_used__, - ELF_RELOC *rpnt, Elf32_Sym *symtab __attribute_used__, - char *strtab __attribute_used__) + struct r_scope_elem *scope __attribute__((unused)), + ELF_RELOC *rpnt, ElfW(Sym) *symtab __attribute__((unused)), + char *strtab __attribute__((unused))) { int reloc_type; struct funcdesc_value volatile *reloc_addr; @@ -296,8 +285,8 @@ _dl_do_lazy_reloc (struct elf_resolve *tpnt, #endif reloc_addr = (struct funcdesc_value *)(intptr_t) - DL_RELOC_ADDR (rpnt->r_offset, tpnt->loadaddr); - reloc_type = ELF32_R_TYPE(rpnt->r_info); + DL_RELOC_ADDR (tpnt->loadaddr, rpnt->r_offset); + reloc_type = ELF_R_TYPE(rpnt->r_info); #if defined (__SUPPORT_LD_DEBUG__) old_val = (unsigned long)reloc_addr->entry_point; @@ -307,9 +296,7 @@ _dl_do_lazy_reloc (struct elf_resolve *tpnt, break; case R_FRV_FUNCDESC_VALUE: funcval = *reloc_addr; - funcval.entry_point = - DL_RELOC_ADDR (funcval.entry_point, - tpnt->loadaddr); + funcval.entry_point = (void *) DL_RELOC_ADDR(tpnt->loadaddr, funcval.entry_point); funcval.got_value = tpnt->loadaddr.got_value; *reloc_addr = funcval; break; @@ -318,7 +305,7 @@ _dl_do_lazy_reloc (struct elf_resolve *tpnt, } #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_reloc && _dl_debug_detail) - _dl_dprintf(_dl_debug_file, "\tpatched: %x ==> %x @ %x", old_val, reloc_addr->entry_point, reloc_addr); + _dl_dprintf(_dl_debug_file, "\tpatched: %x ==> %x @ %x\n", old_val, reloc_addr->entry_point, reloc_addr); #endif return 0; @@ -333,23 +320,23 @@ _dl_parse_lazy_relocation_information int _dl_parse_relocation_information -(struct dyn_elf *rpnt, unsigned long rel_addr, unsigned long rel_size) +(struct dyn_elf *rpnt, struct r_scope_elem *scope, unsigned long rel_addr, unsigned long rel_size) { - return _dl_parse(rpnt->dyn, rpnt->dyn->symbol_scope, rel_addr, rel_size, _dl_do_reloc); + return _dl_parse(rpnt->dyn, scope, rel_addr, rel_size, _dl_do_reloc); } /* We don't have copy relocs. */ int _dl_parse_copy_information -(struct dyn_elf *rpnt __attribute_used__, - unsigned long rel_addr __attribute_used__, - unsigned long rel_size __attribute_used__) +(struct dyn_elf *rpnt __attribute__((unused)), + unsigned long rel_addr __attribute__((unused)), + unsigned long rel_size __attribute__((unused))) { return 0; } -#ifndef LIBDL +#ifndef IS_IN_libdl # include "../../libc/sysdeps/linux/frv/crtreloc.c" #endif diff --git a/ldso/ldso/h8300/dl-sysdep.h b/ldso/ldso/h8300/dl-sysdep.h new file mode 100644 index 000000000..880d0484e --- /dev/null +++ b/ldso/ldso/h8300/dl-sysdep.h @@ -0,0 +1 @@ +/* dl not supported */ diff --git a/ldso/ldso/i386/dl-debug.h b/ldso/ldso/i386/dl-debug.h index 72a01f8df..82baf1fcc 100644 --- a/ldso/ldso/i386/dl-debug.h +++ b/ldso/ldso/i386/dl-debug.h @@ -29,7 +29,7 @@ * SUCH DAMAGE. */ -static const char *_dl_reltypes_tab[] = { +static const char * const _dl_reltypes_tab[] = { [0] "R_386_NONE", "R_386_32", "R_386_PC32", "R_386_GOT32", [4] "R_386_PLT32", "R_386_COPY", "R_386_GLOB_DAT", "R_386_JMP_SLOT", [8] "R_386_RELATIVE", "R_386_GOTOFF", "R_386_GOTPC", diff --git a/ldso/ldso/i386/dl-startup.h b/ldso/ldso/i386/dl-startup.h index 45f69b85d..125132c87 100644 --- a/ldso/ldso/i386/dl-startup.h +++ b/ldso/ldso/i386/dl-startup.h @@ -7,6 +7,7 @@ __asm__ ( " .text\n" " .globl _start\n" " .type _start,@function\n" + " .hidden _start\n" "_start:\n" " call _dl_start\n" " # Save the user entry point address in %edi.\n" @@ -35,7 +36,7 @@ __asm__ ( ); /* Get a pointer to the argv array. On many platforms this can be just - * the address if the first argument, on other platforms we need to + * the address of the first argument, on other platforms we need to * do something a little more subtle here. */ #define GET_ARGV(ARGVP, ARGS) ARGVP = (((unsigned long*) & ARGS)+1) @@ -44,7 +45,7 @@ static __always_inline void PERFORM_BOOTSTRAP_RELOC(ELF_RELOC *rpnt, unsigned long *reloc_addr, unsigned long symbol_addr, unsigned long load_addr, attribute_unused Elf32_Sym *symtab) { - switch (ELF32_R_TYPE(rpnt->r_info)) + switch (ELF_R_TYPE(rpnt->r_info)) { case R_386_32: *reloc_addr += symbol_addr; diff --git a/ldso/ldso/i386/dl-syscalls.h b/ldso/ldso/i386/dl-syscalls.h index 996bb87c6..f40c4fd31 100644 --- a/ldso/ldso/i386/dl-syscalls.h +++ b/ldso/ldso/i386/dl-syscalls.h @@ -1,6 +1 @@ -/* We can't use the real errno in ldso, since it has not yet - * been dynamicly linked in yet. */ -#include "sys/syscall.h" -extern int _dl_errno; -#undef __set_errno -#define __set_errno(X) {(_dl_errno) = (X);} +/* stub for arch-specific syscall issues */ diff --git a/ldso/ldso/i386/dl-sysdep.h b/ldso/ldso/i386/dl-sysdep.h index 7090c929d..8efa1a832 100644 --- a/ldso/ldso/i386/dl-sysdep.h +++ b/ldso/ldso/i386/dl-sysdep.h @@ -25,48 +25,42 @@ do { \ struct elf_resolve; extern unsigned long _dl_linux_resolver(struct elf_resolve * tpnt, int reloc_entry); -/* 4096 bytes alignment */ -#define PAGE_ALIGN 0xfffff000 -#define ADDR_ALIGN 0xfff -#define OFFS_ALIGN 0x7ffff000 - /* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry or TLS variable, so undefined references should not be allowed to define the value. ELF_RTYPE_CLASS_NOCOPY iff TYPE should not be allowed to resolve to one of the main executable's symbols, as for a COPY reloc. */ #define elf_machine_type_class(type) \ - ((((type) == R_386_JMP_SLOT) * ELF_RTYPE_CLASS_PLT) \ + ((((type) == R_386_JMP_SLOT || (type) == R_386_TLS_DTPMOD32 \ + || (type) == R_386_TLS_DTPOFF32 || (type) == R_386_TLS_TPOFF32 \ + || (type) == R_386_TLS_TPOFF) * ELF_RTYPE_CLASS_PLT) \ | (((type) == R_386_COPY) * ELF_RTYPE_CLASS_COPY)) /* Return the link-time address of _DYNAMIC. Conveniently, this is the - first element of the GOT. This must be inlined in a function which - uses global data. */ -static __inline__ Elf32_Addr elf_machine_dynamic (void) attribute_unused; -static __inline__ Elf32_Addr + first element of the GOT, a special entry that is never relocated. */ +extern const Elf32_Addr _GLOBAL_OFFSET_TABLE_[] attribute_hidden; +static __always_inline Elf32_Addr __attribute__ ((unused, const)) elf_machine_dynamic (void) { - register Elf32_Addr *got __asm__ ("%ebx"); - return *got; + /* This produces a GOTOFF reloc that resolves to zero at link time, so in + fact just loads from the GOT register directly. By doing it without + an asm we can let the compiler choose any register. */ + return _GLOBAL_OFFSET_TABLE_[0]; } +extern Elf32_Dyn bygotoff[] __asm__ ("_DYNAMIC") attribute_hidden; /* Return the run-time load address of the shared object. */ -static __inline__ Elf32_Addr elf_machine_load_address (void) attribute_unused; -static __inline__ Elf32_Addr +static __always_inline Elf32_Addr attribute_unused elf_machine_load_address (void) { - /* It doesn't matter what variable this is, the reference never makes - it to assembly. We need a dummy reference to some global variable - via the GOT to make sure the compiler initialized %ebx in time. */ - Elf32_Addr addr; - __asm__ ("leal _dl_start@GOTOFF(%%ebx), %0\n" - "subl _dl_start@GOT(%%ebx), %0" - : "=r" (addr) : "m" (_dl_errno) : "cc"); - return addr; + /* Compute the difference between the runtime address of _DYNAMIC as seen + by a GOTOFF reference, and the link-time address found in the special + unrelocated first GOT entry. */ + return (Elf32_Addr) &bygotoff - elf_machine_dynamic (); } -static __inline__ void +static __always_inline void elf_machine_relative (Elf32_Addr load_off, const Elf32_Addr rel_addr, Elf32_Word relative_count) { diff --git a/ldso/ldso/i386/elfinterp.c b/ldso/ldso/i386/elfinterp.c index 62e854d87..aadb00add 100644 --- a/ldso/ldso/i386/elfinterp.c +++ b/ldso/ldso/i386/elfinterp.c @@ -47,10 +47,9 @@ extern int _dl_linux_resolve(void); unsigned long _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry) { - int reloc_type; ELF_RELOC *this_reloc; char *strtab; - Elf32_Sym *symtab; + ElfW(Sym) *symtab; int symtab_index; char *rel_addr; char *new_addr; @@ -60,10 +59,9 @@ _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry) rel_addr = (char *)tpnt->dynamic_info[DT_JMPREL]; this_reloc = (ELF_RELOC *)(intptr_t)(rel_addr + reloc_entry); - reloc_type = ELF32_R_TYPE(this_reloc->r_info); - symtab_index = ELF32_R_SYM(this_reloc->r_info); + symtab_index = ELF_R_SYM(this_reloc->r_info); - symtab = (Elf32_Sym *)(intptr_t)tpnt->dynamic_info[DT_SYMTAB]; + symtab = (ElfW(Sym) *)(intptr_t)tpnt->dynamic_info[DT_SYMTAB]; strtab = (char *)tpnt->dynamic_info[DT_STRTAB]; symname = strtab + symtab[symtab_index].st_name; @@ -73,7 +71,7 @@ _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry) got_addr = (char **)instr_addr; /* Get the address of the GOT entry. */ - new_addr = _dl_find_hash(symname, tpnt->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT); + new_addr = _dl_find_hash(symname, &_dl_loaded_modules->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT, NULL); if (unlikely(!new_addr)) { _dl_dprintf(2, "%s: can't resolve symbol '%s' in lib '%s'.\n", _dl_progname, symname, tpnt->libname); _dl_exit(1); @@ -100,14 +98,14 @@ _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry) } static int -_dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, +_dl_parse(struct elf_resolve *tpnt, struct r_scope_elem *scope, unsigned long rel_addr, unsigned long rel_size, - int (*reloc_fnc)(struct elf_resolve *tpnt, struct dyn_elf *scope, - ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab)) + int (*reloc_fnc)(struct elf_resolve *tpnt, struct r_scope_elem *scope, + ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab)) { unsigned int i; char *strtab; - Elf32_Sym *symtab; + ElfW(Sym) *symtab; ELF_RELOC *rpnt; int symtab_index; @@ -115,13 +113,13 @@ _dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, rpnt = (ELF_RELOC *)(intptr_t)rel_addr; rel_size /= sizeof(ELF_RELOC); - symtab = (Elf32_Sym *)(intptr_t)tpnt->dynamic_info[DT_SYMTAB]; + symtab = (ElfW(Sym) *)(intptr_t)tpnt->dynamic_info[DT_SYMTAB]; strtab = (char *)tpnt->dynamic_info[DT_STRTAB]; for (i = 0; i < rel_size; i++, rpnt++) { int res; - symtab_index = ELF32_R_SYM(rpnt->r_info); + symtab_index = ELF_R_SYM(rpnt->r_info); debug_sym(symtab, strtab, symtab_index); debug_reloc(symtab, strtab, rpnt); @@ -138,7 +136,7 @@ _dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, strtab + symtab[symtab_index].st_name); if (unlikely(res < 0)) { - int reloc_type = ELF32_R_TYPE(rpnt->r_info); + int reloc_type = ELF_R_TYPE(rpnt->r_info); #if defined (__SUPPORT_LD_DEBUG__) _dl_dprintf(2, "can't handle reloc type '%s' in lib '%s'\n", @@ -158,37 +156,50 @@ _dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, } static int -_dl_do_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, - ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab) +_dl_do_reloc(struct elf_resolve *tpnt, struct r_scope_elem *scope, + ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab) { int reloc_type; int symtab_index; char *symname; + struct elf_resolve *tls_tpnt = NULL; unsigned long *reloc_addr; unsigned long symbol_addr; #if defined (__SUPPORT_LD_DEBUG__) unsigned long old_val; #endif + struct symbol_ref sym_ref; reloc_addr = (unsigned long *)(intptr_t)(tpnt->loadaddr + (unsigned long)rpnt->r_offset); - reloc_type = ELF32_R_TYPE(rpnt->r_info); - symtab_index = ELF32_R_SYM(rpnt->r_info); + reloc_type = ELF_R_TYPE(rpnt->r_info); + symtab_index = ELF_R_SYM(rpnt->r_info); symbol_addr = 0; + sym_ref.sym = &symtab[symtab_index]; + sym_ref.tpnt = NULL; symname = strtab + symtab[symtab_index].st_name; if (symtab_index) { symbol_addr = (unsigned long)_dl_find_hash(symname, scope, tpnt, - elf_machine_type_class(reloc_type)); + elf_machine_type_class(reloc_type), &sym_ref); /* * We want to allow undefined references to weak symbols - this * might have been intentional. We should not be linking local * symbols here, so all bases should be covered. */ - if (unlikely(!symbol_addr && ELF32_ST_BIND(symtab[symtab_index].st_info) != STB_WEAK)) + if (unlikely(!symbol_addr && (ELF_ST_TYPE(symtab[symtab_index].st_info) != STT_TLS) + && ELF_ST_BIND(symtab[symtab_index].st_info) != STB_WEAK)) return 1; + if (_dl_trace_prelink) { + _dl_debug_lookup (symname, tpnt, &symtab[symtab_index], + &sym_ref, elf_machine_type_class(reloc_type)); + } + tls_tpnt = sym_ref.tpnt; + } else { + symbol_addr = symtab[symtab_index].st_value; + tls_tpnt = tpnt; } - + #if defined (__SUPPORT_LD_DEBUG__) old_val = *reloc_addr; #endif @@ -224,13 +235,33 @@ _dl_do_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, symtab[symtab_index].st_size); } break; +#if defined USE_TLS && USE_TLS + case R_386_TLS_DTPMOD32: + *reloc_addr = tls_tpnt->l_tls_modid; + break; + case R_386_TLS_DTPOFF32: + /* During relocation all TLS symbols are defined and used. + * Therefore the offset is already correct. */ + *reloc_addr = symbol_addr; + break; + case R_386_TLS_TPOFF32: + /* The offset is positive, backward from the thread pointer. */ + CHECK_STATIC_TLS((struct link_map*) tls_tpnt); + *reloc_addr += tls_tpnt->l_tls_offset - symbol_addr; + break; + case R_386_TLS_TPOFF: + /* The offset is negative, forward from the thread pointer. */ + CHECK_STATIC_TLS((struct link_map*) tls_tpnt); + *reloc_addr += symbol_addr - tls_tpnt->l_tls_offset; + break; +#endif default: return -1; } #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_reloc && _dl_debug_detail) - _dl_dprintf(_dl_debug_file, "\n\tpatched: %x ==> %x @ %x", + _dl_dprintf(_dl_debug_file, "\n\tpatched: %x ==> %x @ %x\n", old_val, *reloc_addr, reloc_addr); #endif @@ -238,8 +269,8 @@ _dl_do_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, } static int -_dl_do_lazy_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, - ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab) +_dl_do_lazy_reloc(struct elf_resolve *tpnt, struct r_scope_elem *scope, + ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab) { int reloc_type; unsigned long *reloc_addr; @@ -252,7 +283,7 @@ _dl_do_lazy_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, (void)strtab; reloc_addr = (unsigned long *)(intptr_t)(tpnt->loadaddr + (unsigned long)rpnt->r_offset); - reloc_type = ELF32_R_TYPE(rpnt->r_info); + reloc_type = ELF_R_TYPE(rpnt->r_info); #if defined (__SUPPORT_LD_DEBUG__) old_val = *reloc_addr; @@ -270,7 +301,7 @@ _dl_do_lazy_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_reloc && _dl_debug_detail) - _dl_dprintf(_dl_debug_file, "\n\tpatched: %x ==> %x @ %x", + _dl_dprintf(_dl_debug_file, "\n\tpatched: %x ==> %x @ %x\n", old_val, *reloc_addr, reloc_addr); #endif @@ -287,8 +318,9 @@ _dl_parse_lazy_relocation_information(struct dyn_elf *rpnt, int _dl_parse_relocation_information(struct dyn_elf *rpnt, + struct r_scope_elem *scope, unsigned long rel_addr, unsigned long rel_size) { - return _dl_parse(rpnt->dyn, rpnt->dyn->symbol_scope, rel_addr, rel_size, _dl_do_reloc); + return _dl_parse(rpnt->dyn, scope, rel_addr, rel_size, _dl_do_reloc); } diff --git a/ldso/ldso/ldso.c b/ldso/ldso/ldso.c index df7477c72..9f4e84130 100644 --- a/ldso/ldso/ldso.c +++ b/ldso/ldso/ldso.c @@ -38,38 +38,55 @@ #define ALLOW_ZERO_PLTGOT +#if defined(USE_TLS) && USE_TLS +#include "dl-tls.c" +#endif + /* Pull in the value of _dl_progname */ #include LDSO_ELFINTERP /* Global variables used within the shared library loader */ -char *_dl_library_path = 0; /* Where we look for libraries */ -char *_dl_preload = 0; /* Things to be loaded before the libs */ -char *_dl_ldsopath = 0; /* Location of the shared lib loader */ -int _dl_secure = 1; /* Are we dealing with setuid stuff? */ +#ifdef __LDSO_LD_LIBRARY_PATH__ +char *_dl_library_path = NULL; /* Where we look for libraries */ +#endif +#ifdef __LDSO_PRELOAD_ENV_SUPPORT__ +char *_dl_preload = NULL; /* Things to be loaded before the libs */ +#endif int _dl_errno = 0; /* We can't use the real errno in ldso */ size_t _dl_pagesize = 0; /* Store the page size for use later */ struct r_debug *_dl_debug_addr = NULL; /* Used to communicate with the gdb debugger */ void *(*_dl_malloc_function) (size_t size) = NULL; void (*_dl_free_function) (void *p) = NULL; +#ifdef __LDSO_PRELINK_SUPPORT__ +char *_dl_trace_prelink = NULL; /* Library for prelinking trace */ +struct elf_resolve *_dl_trace_prelink_map = NULL; /* Library module for prelinking trace */ +bool _dl_verbose = true; /* On by default */ +bool prelinked = false; +#endif +int _dl_secure = 1; /* Are we dealing with setuid stuff? */ + #ifdef __SUPPORT_LD_DEBUG__ -char *_dl_debug = 0; -char *_dl_debug_symbols = 0; -char *_dl_debug_move = 0; -char *_dl_debug_reloc = 0; -char *_dl_debug_detail = 0; -char *_dl_debug_nofixups = 0; -char *_dl_debug_bindings = 0; +char *_dl_debug = NULL; +char *_dl_debug_symbols = NULL; +char *_dl_debug_move = NULL; +char *_dl_debug_reloc = NULL; +char *_dl_debug_detail = NULL; +char *_dl_debug_nofixups = NULL; +char *_dl_debug_bindings = NULL; int _dl_debug_file = 2; #endif -/* Needed for standalone execution. */ +#ifdef __DSBT__ +void **_dl_ldso_dsbt = NULL; +#endif + unsigned long attribute_hidden _dl_skip_args = 0; + const char *_dl_progname = UCLIBC_LDSO; /* The name of the executable being run */ #include "dl-startup.c" +#include "dl-symbols.c" #include "dl-array.c" -/* Forward function declarations */ -static int _dl_suid_ok(void); /* * This stub function is used by some debuggers. The idea is that they @@ -87,28 +104,199 @@ void _dl_debug_state(void) } rtld_hidden_def(_dl_debug_state); -static unsigned char *_dl_malloc_addr = 0; /* Lets _dl_malloc use the already allocated memory page */ -static unsigned char *_dl_mmap_zero = 0; /* Also used by _dl_malloc */ +static unsigned char *_dl_malloc_addr = NULL; /* Lets _dl_malloc use the already allocated memory page */ +static unsigned char *_dl_mmap_zero = NULL; /* Also used by _dl_malloc */ static struct elf_resolve **init_fini_list; +static struct elf_resolve **scope_elem_list; static unsigned int nlist; /* # items in init_fini_list */ extern void _start(void); #ifdef __UCLIBC_HAS_SSP__ # include <dl-osinfo.h> -uintptr_t stack_chk_guard; +static uintptr_t stack_chk_guard; # ifndef THREAD_SET_STACK_GUARD /* Only exported for architectures that don't store the stack guard canary * in local thread area. */ uintptr_t __stack_chk_guard attribute_relro; -# ifdef __UCLIBC_HAS_SSP_COMPAT__ -strong_alias(__stack_chk_guard,__guard) -# endif -# elif __UCLIBC_HAS_SSP_COMPAT__ +# endif +# ifdef __UCLIBC_HAS_SSP_COMPAT__ uintptr_t __guard attribute_relro; # endif #endif +#ifdef __LDSO_SEARCH_INTERP_PATH__ +const char *_dl_ldsopath = NULL; /* Location of the shared lib loader */ + +static void _dl_ldsopath_init(struct elf_resolve *tpnt) +{ + char *ldsopath, *ptmp; + + /* + * Store the path where the shared lib loader was found for later use. + * Note that this logic isn't bullet proof when it comes to relative + * paths: if you use "./lib/ldso.so", and then the app does chdir() + * followed by dlopen(), the old ldso path won't get searched. But + * that is a fairly pathological use case, so if you don't like that, + * then set a full path to your interp and be done :P. + */ + ldsopath = _dl_strdup(tpnt->libname); + ptmp = _dl_strrchr(ldsopath, '/'); + /* + * If there is no "/", then set the path to "", and the code + * later on will take this to implicitly mean "search $PWD". + */ + if (!ptmp) + ptmp = ldsopath; + *ptmp = '\0'; + + _dl_ldsopath = ldsopath; + _dl_debug_early("Lib Loader: (%x) %s: using path: %s\n", + (unsigned) DL_LOADADDR_BASE(tpnt->loadaddr), tpnt->libname, + _dl_ldsopath); +} +#else +#define _dl_ldsopath_init(tpnt) +#endif + +char *_dl_getenv(const char *symbol, char **envp) +{ + char *pnt; + const char *pnt1; + + while ((pnt = *envp++)) { + pnt1 = symbol; + while (*pnt && *pnt == *pnt1) + pnt1++, pnt++; + if (!*pnt || *pnt != '=' || *pnt1) + continue; + return pnt + 1; + } + return 0; +} + +void _dl_unsetenv(const char *symbol, char **envp) +{ + char *pnt; + const char *pnt1; + char **newenvp = envp; + + for (pnt = *envp; pnt; pnt = *++envp) { + pnt1 = symbol; + while (*pnt && *pnt == *pnt1) + pnt1++, pnt++; + if (!*pnt || *pnt != '=' || *pnt1) + *newenvp++ = *envp; + } + *newenvp++ = *envp; + return; +} + +static int _dl_suid_ok(void) +{ + __kernel_uid_t uid, euid; + __kernel_gid_t gid, egid; + + uid = _dl_getuid(); + euid = _dl_geteuid(); + gid = _dl_getgid(); + egid = _dl_getegid(); + + if (uid == euid && gid == egid) { + return 1; + } + return 0; +} + +void *_dl_malloc(size_t size) +{ + void *retval; + +#if 0 + _dl_debug_early("request for %d bytes\n", size); +#endif + + if (_dl_malloc_function) + return (*_dl_malloc_function) (size); + + if (_dl_malloc_addr - _dl_mmap_zero + size > _dl_pagesize) { + size_t rounded_size; + + /* Since the above assumes we get a full page even if + we request less than that, make sure we request a + full page, since uClinux may give us less than than + a full page. We might round even + larger-than-a-page sizes, but we end up never + reusing _dl_mmap_zero/_dl_malloc_addr in that case, + so we don't do it. + + The actual page size doesn't really matter; as long + as we're self-consistent here, we're safe. */ + if (size < _dl_pagesize) + rounded_size = (size + ADDR_ALIGN) & _dl_pagesize; + else + rounded_size = size; + + _dl_debug_early("mmapping more memory\n"); + _dl_mmap_zero = _dl_malloc_addr = _dl_mmap((void *) 0, rounded_size, + PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_UNINITIALIZED, -1, 0); + if (_dl_mmap_check_error(_dl_mmap_zero)) { + _dl_dprintf(2, "%s: mmap of a spare page failed!\n", _dl_progname); + _dl_exit(20); + } + } + retval = _dl_malloc_addr; + _dl_malloc_addr += size; + + /* + * Align memory to DL_MALLOC_ALIGN byte boundary. Some + * platforms require this, others simply get better + * performance. + */ + _dl_malloc_addr = (unsigned char *) (((unsigned long) _dl_malloc_addr + DL_MALLOC_ALIGN - 1) & ~(DL_MALLOC_ALIGN - 1)); + return retval; +} + +static void *_dl_zalloc(size_t size) +{ + void *p = _dl_malloc(size); + if (p) + _dl_memset(p, 0, size); + return p; +} + +void _dl_free(void *p) +{ + if (_dl_free_function) + (*_dl_free_function) (p); +} + +#if defined(USE_TLS) && USE_TLS +void *_dl_memalign(size_t __boundary, size_t __size) +{ + void *result; + int i = 0; + size_t delta; + size_t rounded = 0; + + if (_dl_memalign_function) + return (*_dl_memalign_function) (__boundary, __size); + + while (rounded < __boundary) { + rounded = (1 << i++); + } + + delta = (((size_t) _dl_malloc_addr + __size) & (rounded - 1)); + + if ((result = _dl_malloc(rounded - delta)) == NULL) + return result; + + result = _dl_malloc(__size); + + return result; +} +#endif + static void __attribute__ ((destructor)) __attribute_used__ _dl_fini(void) { unsigned int i; @@ -130,20 +318,119 @@ static void __attribute__ ((destructor)) __attribute_used__ _dl_fini(void) } } -void _dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load_addr, - ElfW(auxv_t) auxvt[AT_EGID + 1], char **envp, - char **argv +#ifdef __LDSO_PRELINK_SUPPORT__ +static void trace_objects(struct elf_resolve *tpnt, char *str_name) +{ + if (_dl_strcmp(_dl_trace_prelink, tpnt->libname) == 0) + _dl_trace_prelink_map = tpnt; + if (tpnt->libtype == elf_executable) { +/* Main executeble */ + _dl_dprintf(1, "\t%s => %s (%x, %x)", tpnt->libname, tpnt->libname, + tpnt->mapaddr, DL_LOADADDR_BASE(tpnt->loadaddr)); + } else { +/* Preloaded, Needed or interpreter */ + _dl_dprintf(1, "\t%s => %s (%x, %x)", str_name, tpnt->libname, + tpnt->mapaddr, DL_LOADADDR_BASE(tpnt->loadaddr)); + } + +#if defined USE_TLS && USE_TLS + if ((tpnt->libtype != program_interpreter) && (tpnt->l_tls_modid)) + _dl_dprintf (1, " TLS(%x, %x)\n", tpnt->l_tls_modid, + (size_t) tpnt->l_tls_offset); + else +#endif + _dl_dprintf (1, "\n"); +} +#endif + +static struct elf_resolve * add_ldso(struct elf_resolve *tpnt, + DL_LOADADDR_TYPE load_addr, + ElfW(Addr) ldso_mapaddr, + ElfW(auxv_t) auxvt[AT_EGID + 1], + struct dyn_elf *rpnt) +{ + ElfW(Ehdr) *epnt = (ElfW(Ehdr) *) auxvt[AT_BASE].a_un.a_val; + ElfW(Phdr) *myppnt = (ElfW(Phdr) *) + DL_RELOC_ADDR(DL_GET_RUN_ADDR(load_addr, ldso_mapaddr), + epnt->e_phoff); + int j; + struct stat st; + + tpnt = _dl_add_elf_hash_table(tpnt->libname, tpnt->loadaddr, + tpnt->dynamic_info, (unsigned long)tpnt->dynamic_addr, + 0); + + tpnt->mapaddr = ldso_mapaddr; + if (_dl_stat(tpnt->libname, &st) >= 0) { + tpnt->st_dev = st.st_dev; + tpnt->st_ino = st.st_ino; + } + tpnt->n_phent = epnt->e_phnum; + tpnt->ppnt = myppnt; + for (j = 0; j < epnt->e_phnum; j++, myppnt++) { + if (myppnt->p_type == PT_GNU_RELRO) { + tpnt->relro_addr = myppnt->p_vaddr; + tpnt->relro_size = myppnt->p_memsz; + break; + } + } + tpnt->libtype = program_interpreter; + if (rpnt) { + rpnt->next = _dl_zalloc(sizeof(struct dyn_elf)); + rpnt->next->prev = rpnt; + rpnt = rpnt->next; + } else { + rpnt = _dl_zalloc(sizeof(struct dyn_elf)); + } + rpnt->dyn = tpnt; + tpnt->rtld_flags = RTLD_NOW | RTLD_GLOBAL; /* Must not be LAZY */ + + return tpnt; +} + +static ptrdiff_t _dl_build_local_scope (struct elf_resolve **list, + struct elf_resolve *map) +{ + struct elf_resolve **p = list; + struct init_fini_list *q; + + *p++ = map; + map->init_flag |= DL_RESERVED; + if (map->init_fini) + for (q = map->init_fini; q; q = q->next) + if (! (q->tpnt->init_flag & DL_RESERVED)) + p += _dl_build_local_scope (p, q->tpnt); + return p - list; +} + +static void _dl_setup_progname(const char *argv0) +{ + char image[PATH_MAX]; + ssize_t s; + + s = _dl_readlink(AT_FDCWD, "/proc/self/exe", image, sizeof(image)); + if (s > 0 && image[0] == '/') { + image[s] = 0; + _dl_progname = _dl_strdup(image); + } else if (argv0) { + _dl_progname = argv0; + } +} + +void *_dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load_addr, + ElfW(auxv_t) auxvt[AT_EGID + 1], char **envp, char **argv DL_GET_READY_TO_RUN_EXTRA_PARMS) { - ElfW(Addr) app_mapaddr = 0; + ElfW(Addr) app_mapaddr = 0, ldso_mapaddr = 0; ElfW(Phdr) *ppnt; ElfW(Dyn) *dpnt; char *lpntstr; - unsigned int i; + unsigned int i, cnt, nscope_elem; int unlazy = 0, trace_loaded_objects = 0; struct dyn_elf *rpnt; struct elf_resolve *tcurr; struct elf_resolve *tpnt1; + struct elf_resolve *ldso_tpnt = NULL; struct elf_resolve app_tpnt_tmp; struct elf_resolve *app_tpnt = &app_tpnt_tmp; struct r_debug *debug_addr; @@ -151,13 +438,18 @@ void _dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load_addr, unsigned long *_dl_envp; /* The environment address */ ElfW(Addr) relro_addr = 0; size_t relro_size = 0; - struct stat st; + struct r_scope_elem *global_scope; + struct elf_resolve **local_scope; + +#if defined(USE_TLS) && USE_TLS + void *tcbp = NULL; +#endif /* Wahoo!!! We managed to make a function call! Get malloc * setup so we can use _dl_dprintf() to print debug noise * instead of the SEND_STDERR macros used in dl-startup.c */ - _dl_memset(app_tpnt, 0x00, sizeof(*app_tpnt)); + _dl_memset(app_tpnt, 0, sizeof(*app_tpnt)); /* Store the page size for later use */ _dl_pagesize = (auxvt[AT_PAGESZ].a_un.a_val) ? (size_t) auxvt[AT_PAGESZ].a_un.a_val : PAGE_SIZE; @@ -176,14 +468,19 @@ void _dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load_addr, * been fixed up by now. Still no function calls outside of this * library, since the dynamic resolver is not yet ready. */ - if (argv[0]) { - _dl_progname = argv[0]; - } + _dl_setup_progname(argv[0]); + +#ifdef __DSBT__ + _dl_ldso_dsbt = (void *)tpnt->dynamic_info[DT_DSBT_BASE_IDX]; + _dl_ldso_dsbt[tpnt->dynamic_info[DT_DSBT_INDEX_IDX]] = _dl_ldso_dsbt; +#endif +#ifndef __LDSO_STANDALONE_SUPPORT__ if (_start == (void *) auxvt[AT_ENTRY].a_un.a_val) { - _dl_dprintf(_dl_debug_file, "Standalone execution is not supported yet\n"); + _dl_dprintf(2, "Standalone execution is not enabled\n"); _dl_exit(1); } +#endif /* Start to build the tables of the modules that are required for * this beast to run. We start with the basic executable, and then @@ -203,8 +500,12 @@ void _dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load_addr, auxvt[AT_UID].a_un.a_val == auxvt[AT_EUID].a_un.a_val && auxvt[AT_GID].a_un.a_val == auxvt[AT_EGID].a_un.a_val)) { _dl_secure = 0; +#ifdef __LDSO_PRELOAD_ENV_SUPPORT__ _dl_preload = _dl_getenv("LD_PRELOAD", envp); +#endif +#ifdef __LDSO_LD_LIBRARY_PATH__ _dl_library_path = _dl_getenv("LD_LIBRARY_PATH", envp); +#endif } else { static const char unsecure_envvars[] = #ifdef EXTRA_UNSECURE_ENVVARS @@ -214,30 +515,112 @@ void _dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load_addr, const char *nextp; _dl_secure = 1; +#ifdef __LDSO_PRELOAD_ENV_SUPPORT__ + _dl_preload = _dl_getenv("LD_PRELOAD", envp); +#endif nextp = unsecure_envvars; do { _dl_unsetenv (nextp, envp); /* We could use rawmemchr but this need not be fast. */ - nextp = (char *) _dl_strchr(nextp, '\0') + 1; + nextp = _dl_strchr(nextp, '\0') + 1; } while (*nextp != '\0'); - _dl_preload = NULL; +#ifdef __LDSO_LD_LIBRARY_PATH__ _dl_library_path = NULL; +#endif /* SUID binaries can be exploited if they do LAZY relocation. */ unlazy = RTLD_NOW; } - /* sjhill: your TLS init should go before this */ -#ifdef __UCLIBC_HAS_SSP__ - /* Set up the stack checker's canary. */ - stack_chk_guard = _dl_setup_stack_chk_guard (); -# ifdef THREAD_SET_STACK_GUARD - THREAD_SET_STACK_GUARD (stack_chk_guard); -# ifdef __UCLIBC_HAS_SSP_COMPAT__ - __guard = stack_chk_guard; -# endif -# else - __stack_chk_guard = stack_chk_guard; -# endif +#if defined(USE_TLS) && USE_TLS + _dl_error_catch_tsd = &_dl_initial_error_catch_tsd; + _dl_init_static_tls = &_dl_nothread_init_static_tls; +#endif + +#ifdef __LDSO_STANDALONE_SUPPORT__ + if (_start == (void *) auxvt[AT_ENTRY].a_un.a_val) { + ElfW(Addr) *aux_dat = (ElfW(Addr) *) argv; + int argc = (int) aux_dat[-1]; + + tpnt->libname = argv[0]; + while (argc > 1) + if (! _dl_strcmp (argv[1], "--library-path") && argc > 2) { +#ifdef __LDSO_LD_LIBRARY_PATH__ + _dl_library_path = argv[2]; +#endif + _dl_skip_args += 2; + argc -= 2; + argv += 2; + } else + break; + + /* + * If we have no further argument the program was called incorrectly. + * Grant the user some education. + */ + + if (argc < 2) { + _dl_dprintf(1, "\ +Usage: ld.so [OPTION]... EXECUTABLE-FILE [ARGS-FOR-PROGRAM...]\n\ +You have invoked `ld.so', the helper program for shared library executables.\n\ +This program usually lives in the file `/lib/ld.so', and special directives\n\ +in executable files using ELF shared libraries tell the system's program\n\ +loader to load the helper program from this file. This helper program loads\n\ +the shared libraries needed by the program executable, prepares the program\n\ +to run, and runs it. You may invoke this helper program directly from the\n\ +command line to load and run an ELF executable file; this is like executing\n\ +that file itself, but always uses this helper program from the file you\n\ +specified, instead of the helper program file specified in the executable\n\ +file you run. This is mostly of use for maintainers to test new versions\n\ +of this helper program; chances are you did not intend to run this program.\n\ +\n\ + --library-path PATH use given PATH instead of content of the environment\n\ + variable LD_LIBRARY_PATH\n"); + _dl_exit(1); + } + + ++_dl_skip_args; + ++argv; + _dl_progname = argv[0]; + + _dl_symbol_tables = rpnt = _dl_zalloc(sizeof(struct dyn_elf)); + /* + * It needs to load the _dl_progname and to map it + * Usually it is the main application launched by means of the ld.so + * but it could be also a shared object (when ld.so used for tracing) + * We keep the misleading app_tpnt name to avoid variable pollution + */ + app_tpnt = _dl_load_elf_shared_library(_dl_secure ? __RTLD_SECURE : 0, + &rpnt, _dl_progname); + if (!app_tpnt) { + _dl_dprintf(2, "can't load '%s'\n", _dl_progname); + _dl_exit(16); + } + /* + * FIXME: it needs to properly handle a PIE executable + * Usually for a main application, loadaddr is computed as difference + * between auxvt entry points and phdr, so if it is not 0, that it is a + * PIE executable. In this case instead we need to set the loadaddr to 0 + * because we are actually mapping the ELF for the main application by + * ourselves. So the PIE case must be checked. + */ + + app_tpnt->rtld_flags = unlazy | RTLD_GLOBAL; + + /* + * This is used by gdb to locate the chain of shared libraries that are + * currently loaded. + */ + debug_addr = _dl_zalloc(sizeof(struct r_debug)); + ppnt = (ElfW(Phdr) *)app_tpnt->ppnt; + for (i = 0; i < app_tpnt->n_phent; i++, ppnt++) { + if (ppnt->p_type == PT_DYNAMIC) { + dpnt = (ElfW(Dyn) *) DL_RELOC_ADDR(app_tpnt->loadaddr, ppnt->p_vaddr); + _dl_parse_dynamic_info(dpnt, app_tpnt->dynamic_info, debug_addr, app_tpnt->loadaddr); + } + } + + _dl_ldsopath_init(tpnt); + } else { #endif /* At this point we are now free to examine the user application, @@ -268,8 +651,7 @@ void _dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load_addr, * This is used by gdb to locate the chain of shared libraries that are * currently loaded. */ - debug_addr = _dl_malloc(sizeof(struct r_debug)); - _dl_memset(debug_addr, 0, sizeof(struct r_debug)); + debug_addr = _dl_zalloc(sizeof(struct r_debug)); ppnt = (ElfW(Phdr) *) auxvt[AT_PHDR].a_un.a_val; for (i = 0; i < auxvt[AT_PHNUM].a_un.a_val; i++, ppnt++) { @@ -297,7 +679,7 @@ void _dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load_addr, for (i = 0; i < auxvt[AT_PHNUM].a_un.a_val; i++, ppnt++) { if (ppnt->p_type == PT_LOAD && !(ppnt->p_flags & PF_W)) _dl_mprotect((void *) (DL_RELOC_ADDR(app_tpnt->loadaddr, ppnt->p_vaddr) & PAGE_ALIGN), - ((ppnt->p_vaddr + app_tpnt->loadaddr) & ADDR_ALIGN) + + (DL_RELOC_ADDR(app_tpnt->loadaddr, ppnt->p_vaddr) & ADDR_ALIGN) + (unsigned long) ppnt->p_filesz, PROT_READ | PROT_WRITE | PROT_EXEC); } @@ -305,7 +687,7 @@ void _dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load_addr, } #else if (app_tpnt->dynamic_info[DT_TEXTREL]) { - _dl_dprintf(_dl_debug_file, "Can't modify application's text section; use the GCC option -fPIE for position-independent executables.\n"); + _dl_dprintf(2, "Can't modify application's text section; use the GCC option -fPIE for position-independent executables.\n"); _dl_exit(1); } #endif @@ -323,13 +705,16 @@ void _dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load_addr, _dl_loaded_modules->libtype = elf_executable; _dl_loaded_modules->ppnt = (ElfW(Phdr) *) auxvt[AT_PHDR].a_un.a_val; _dl_loaded_modules->n_phent = auxvt[AT_PHNUM].a_un.a_val; - _dl_symbol_tables = rpnt = (struct dyn_elf *) _dl_malloc(sizeof(struct dyn_elf)); - _dl_memset(rpnt, 0, sizeof(struct dyn_elf)); + _dl_symbol_tables = rpnt = _dl_zalloc(sizeof(struct dyn_elf)); rpnt->dyn = _dl_loaded_modules; app_tpnt->mapaddr = app_mapaddr; app_tpnt->rtld_flags = unlazy | RTLD_GLOBAL; app_tpnt->usage_count++; - app_tpnt->symbol_scope = _dl_symbol_tables; +#ifdef __DSBT__ + _dl_ldso_dsbt[0] = app_tpnt->dsbt_table; + _dl_memcpy(app_tpnt->dsbt_table, _dl_ldso_dsbt, + app_tpnt->dsbt_size * sizeof(tpnt->dsbt_table[0])); +#endif lpnt = (unsigned long *) (app_tpnt->dynamic_info[DT_PLTGOT]); #ifdef ALLOW_ZERO_PLTGOT if (lpnt) @@ -339,24 +724,59 @@ void _dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load_addr, /* OK, fill this in - we did not have this before */ if (ppnt->p_type == PT_INTERP) { - char *ptmp; - tpnt->libname = (char *) DL_RELOC_ADDR(app_tpnt->loadaddr, ppnt->p_vaddr); - /* Store the path where the shared lib loader was found - * for later use - */ - _dl_ldsopath = _dl_strdup(tpnt->libname); - ptmp = _dl_strrchr(_dl_ldsopath, '/'); - if (ptmp != _dl_ldsopath) - *ptmp = '\0'; + _dl_ldsopath_init(tpnt); + } - _dl_debug_early("Lib Loader: (%x) %s\n", (unsigned) DL_LOADADDR_BASE(tpnt->loadaddr), tpnt->libname); + /* Discover any TLS sections if the target supports them. */ + if (ppnt->p_type == PT_TLS) { +#if defined(USE_TLS) && USE_TLS + if (ppnt->p_memsz > 0) { + app_tpnt->l_tls_blocksize = ppnt->p_memsz; + app_tpnt->l_tls_align = ppnt->p_align; + if (ppnt->p_align == 0) + app_tpnt->l_tls_firstbyte_offset = 0; + else + app_tpnt->l_tls_firstbyte_offset = + (ppnt->p_vaddr & (ppnt->p_align - 1)); + app_tpnt->l_tls_initimage_size = ppnt->p_filesz; + app_tpnt->l_tls_initimage = (void *) ppnt->p_vaddr; + + /* This image gets the ID one. */ + _dl_tls_max_dtv_idx = app_tpnt->l_tls_modid = 1; + + } + _dl_debug_early("Found TLS header for application program\n"); + break; +#else + _dl_dprintf(2, "Program uses unsupported TLS data!\n"); + _dl_exit(1); +#endif } } app_tpnt->relro_addr = relro_addr; app_tpnt->relro_size = relro_size; +#if defined(USE_TLS) && USE_TLS + /* + * Adjust the address of the TLS initialization image in + * case the executable is actually an ET_DYN object. + */ + if (app_tpnt->l_tls_initimage != NULL) { + char *tmp attribute_unused = + (char *) app_tpnt->l_tls_initimage; + app_tpnt->l_tls_initimage = + (char *) app_tpnt->l_tls_initimage + app_tpnt->loadaddr; + _dl_debug_early("Relocated TLS initial image from %x to %x (size = %x)\n", + tmp, app_tpnt->l_tls_initimage, app_tpnt->l_tls_initimage_size); + } +#endif + +#ifdef __LDSO_STANDALONE_SUPPORT__ + } /* ! ldso standalone mode */ +#endif + #ifdef __SUPPORT_LD_DEBUG__ _dl_debug = _dl_getenv("LD_DEBUG", envp); if (_dl_debug) { @@ -388,14 +808,14 @@ void _dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load_addr, len1 = _dl_strlen(dl_debug_output); len2 = _dl_strlen(tmp1); - filename = _dl_malloc(len1+len2+2); + filename = _dl_malloc(len1 + len2 + 2); if (filename) { _dl_strcpy (filename, dl_debug_output); filename[len1] = '.'; _dl_strcpy (&filename[len1+1], tmp1); - _dl_debug_file= _dl_open(filename, O_WRONLY|O_CREAT, 0644); + _dl_debug_file = _dl_open(filename, O_WRONLY|O_CREAT, 0644); if (_dl_debug_file < 0) { _dl_debug_file = 2; _dl_dprintf(_dl_debug_file, "can't open file: '%s'\n",filename); @@ -405,17 +825,28 @@ void _dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load_addr, } #endif +#ifdef __LDSO_PRELINK_SUPPORT__ +{ + char *ld_warn = _dl_getenv ("LD_WARN", envp); + + if (ld_warn && *ld_warn == '\0') + _dl_verbose = false; +} + _dl_trace_prelink = _dl_getenv("LD_TRACE_PRELINKING", envp); +#endif + if (_dl_getenv("LD_TRACE_LOADED_OBJECTS", envp) != NULL) { trace_loaded_objects++; } #ifndef __LDSO_LDD_SUPPORT__ if (trace_loaded_objects) { - _dl_dprintf(_dl_debug_file, "Use the ldd provided by uClibc\n"); + _dl_dprintf(2, "Use the ldd provided by uClibc\n"); _dl_exit(1); } #endif + ldso_mapaddr = (ElfW(Addr)) auxvt[AT_BASE].a_un.a_val; /* * OK, fix one more thing - set up debug_addr so it will point * to our chain. Later we may need to fill in more fields, but this @@ -423,7 +854,8 @@ void _dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load_addr, */ debug_addr->r_map = (struct link_map *) _dl_loaded_modules; debug_addr->r_version = 1; - debug_addr->r_ldbase = DL_LOADADDR_BASE(load_addr); + debug_addr->r_ldbase = (ElfW(Addr)) + DL_LOADADDR_BASE(DL_GET_RUN_ADDR(load_addr, ldso_mapaddr)); debug_addr->r_brk = (unsigned long) &_dl_debug_state; _dl_debug_addr = debug_addr; @@ -437,6 +869,7 @@ void _dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load_addr, _dl_map_cache(); +#ifdef __LDSO_PRELOAD_ENV_SUPPORT__ if (_dl_preload) { char c, *str, *str2; @@ -454,16 +887,19 @@ void _dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load_addr, if (!_dl_secure || _dl_strchr(str, '/') == NULL) { _dl_if_debug_dprint("\tfile='%s'; needed by '%s'\n", str, _dl_progname); - tpnt1 = _dl_load_shared_library(_dl_secure, &rpnt, NULL, str, trace_loaded_objects); + tpnt1 = _dl_load_shared_library( + _dl_secure ? __RTLD_SECURE : 0, + &rpnt, NULL, str, trace_loaded_objects); if (!tpnt1) { #ifdef __LDSO_LDD_SUPPORT__ - if (trace_loaded_objects) + if (trace_loaded_objects || _dl_trace_prelink) _dl_dprintf(1, "\t%s => not found\n", str); else #endif { - _dl_dprintf(2, "%s: can't load " "library '%s'\n", _dl_progname, str); - _dl_exit(15); + _dl_dprintf(2, "%s: library '%s' " + "from LD_PRELOAD can't be preloaded: ignored.\n", + _dl_progname, str); } } else { tpnt1->rtld_flags = unlazy | RTLD_GLOBAL; @@ -471,7 +907,7 @@ void _dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load_addr, _dl_debug_early("Loading: (%x) %s\n", DL_LOADADDR_BASE(tpnt1->loadaddr), tpnt1->libname); #ifdef __LDSO_LDD_SUPPORT__ - if (trace_loaded_objects && + if (trace_loaded_objects && !_dl_trace_prelink && tpnt1->usage_count == 1) { /* This is a real hack to make * ldd not print the library @@ -492,13 +928,14 @@ void _dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load_addr, str++; } } +#endif /* __LDSO_PRELOAD_ENV_SUPPORT__ */ #ifdef __LDSO_PRELOAD_FILE_SUPPORT__ do { - struct stat st; char *preload; int fd; char c, *cp, *cp2; + struct stat st; if (_dl_stat(LDSO_PRELOAD, &st) || st.st_size == 0) { break; @@ -547,11 +984,11 @@ void _dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load_addr, tpnt1 = _dl_load_shared_library(0, &rpnt, NULL, cp2, trace_loaded_objects); if (!tpnt1) { -#ifdef __LDSO_LDD_SUPPORT__ - if (trace_loaded_objects) +# ifdef __LDSO_LDD_SUPPORT__ + if (trace_loaded_objects || _dl_trace_prelink) _dl_dprintf(1, "\t%s => not found\n", cp2); else -#endif +# endif { _dl_dprintf(2, "%s: can't load library '%s'\n", _dl_progname, cp2); _dl_exit(15); @@ -561,14 +998,14 @@ void _dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load_addr, _dl_debug_early("Loading: (%x) %s\n", DL_LOADADDR_BASE(tpnt1->loadaddr), tpnt1->libname); -#ifdef __LDSO_LDD_SUPPORT__ - if (trace_loaded_objects && +# ifdef __LDSO_LDD_SUPPORT__ + if (trace_loaded_objects && !_dl_trace_prelink && tpnt1->usage_count == 1) { _dl_dprintf(1, "\t%s => %s (%x)\n", cp2, tpnt1->libname, DL_LOADADDR_BASE(tpnt1->loadaddr)); } -#endif +# endif } /* find start of next library */ @@ -593,14 +1030,22 @@ void _dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load_addr, lpntstr = (char*) (tcurr->dynamic_info[DT_STRTAB] + this_dpnt->d_un.d_val); name = _dl_get_last_path_component(lpntstr); - if (_dl_strcmp(name, UCLIBC_LDSO) == 0) - continue; - _dl_if_debug_dprint("\tfile='%s'; needed by '%s'\n", lpntstr, _dl_progname); - if (!(tpnt1 = _dl_load_shared_library(0, &rpnt, tcurr, lpntstr, trace_loaded_objects))) { + if (_dl_strcmp(name, UCLIBC_LDSO) == 0) { + if (!ldso_tpnt) { + /* Insert the ld.so only once */ + ldso_tpnt = add_ldso(tpnt, load_addr, + ldso_mapaddr, auxvt, rpnt); + } + ldso_tpnt->usage_count++; + tpnt1 = ldso_tpnt; + } else + tpnt1 = _dl_load_shared_library(0, &rpnt, tcurr, lpntstr, trace_loaded_objects); + + if (!tpnt1) { #ifdef __LDSO_LDD_SUPPORT__ - if (trace_loaded_objects) { + if (trace_loaded_objects || _dl_trace_prelink) { _dl_dprintf(1, "\t%s => not found\n", lpntstr); continue; } else @@ -621,7 +1066,7 @@ void _dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load_addr, _dl_debug_early("Loading: (%x) %s\n", DL_LOADADDR_BASE(tpnt1->loadaddr), tpnt1->libname); #ifdef __LDSO_LDD_SUPPORT__ - if (trace_loaded_objects && + if (trace_loaded_objects && !_dl_trace_prelink && tpnt1->usage_count == 1) { _dl_dprintf(1, "\t%s => %s (%x)\n", lpntstr, tpnt1->libname, @@ -633,12 +1078,18 @@ void _dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load_addr, } _dl_unmap_cache(); - --nlist; /* Exclude the application. */ + /* Keep track of the number of elements in the global scope */ + nscope_elem = nlist; + + if (_dl_loaded_modules->libtype == elf_executable) { + --nlist; /* Exclude the application. */ + tcurr = _dl_loaded_modules->next; + } else + tcurr = _dl_loaded_modules; init_fini_list = _dl_malloc(nlist * sizeof(struct elf_resolve *)); i = 0; - for (tcurr = _dl_loaded_modules->next; tcurr; tcurr = tcurr->next) { + for (; tcurr; tcurr = tcurr->next) init_fini_list[i++] = tcurr; - } /* Sort the INIT/FINI list in dependency order. */ for (tcurr = _dl_loaded_modules->next; tcurr; tcurr = tcurr->next) { @@ -684,43 +1135,13 @@ void _dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load_addr, * functions in the dynamic linker and to relocate the interpreter * again once all libs are loaded. */ - if (tpnt) { - ElfW(Ehdr) *epnt = (ElfW(Ehdr) *) auxvt[AT_BASE].a_un.a_val; - ElfW(Phdr) *myppnt = (ElfW(Phdr) *) DL_RELOC_ADDR(load_addr, epnt->e_phoff); - int j; - - tpnt = _dl_add_elf_hash_table(tpnt->libname, load_addr, - tpnt->dynamic_info, - (unsigned long)tpnt->dynamic_addr, - 0); - - if (_dl_stat(tpnt->libname, &st) >= 0) { - tpnt->st_dev = st.st_dev; - tpnt->st_ino = st.st_ino; - } - tpnt->n_phent = epnt->e_phnum; - tpnt->ppnt = myppnt; - for (j = 0; j < epnt->e_phnum; j++, myppnt++) { - if (myppnt->p_type == PT_GNU_RELRO) { - tpnt->relro_addr = myppnt->p_vaddr; - tpnt->relro_size = myppnt->p_memsz; - break; - } - } - tpnt->libtype = program_interpreter; + if (!ldso_tpnt) { + tpnt = add_ldso(tpnt, load_addr, ldso_mapaddr, auxvt, rpnt); tpnt->usage_count++; - tpnt->symbol_scope = _dl_symbol_tables; - if (rpnt) { - rpnt->next = (struct dyn_elf *) _dl_malloc(sizeof(struct dyn_elf)); - _dl_memset(rpnt->next, 0, sizeof(struct dyn_elf)); - rpnt->next->prev = rpnt; - rpnt = rpnt->next; - } else { - rpnt = (struct dyn_elf *) _dl_malloc(sizeof(struct dyn_elf)); - _dl_memset(rpnt, 0, sizeof(struct dyn_elf)); - } - rpnt->dyn = tpnt; - tpnt->rtld_flags = RTLD_NOW | RTLD_GLOBAL; /* Must not be LAZY */ + nscope_elem++; + } else + tpnt = ldso_tpnt; + #ifdef RERELOCATE_LDSO /* Only rerelocate functions for now. */ tpnt->init_flag = RELOCS_DONE; @@ -733,18 +1154,159 @@ void _dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load_addr, tpnt->init_flag = RELOCS_DONE | JMP_RELOCS_DONE; #endif tpnt = NULL; + + /* + * Allocate the global scope array. + */ + scope_elem_list = (struct elf_resolve **) _dl_malloc(nscope_elem * sizeof(struct elf_resolve *)); + + for (i = 0, tcurr = _dl_loaded_modules; tcurr; tcurr = tcurr->next) + scope_elem_list[i++] = tcurr; + + _dl_loaded_modules->symbol_scope.r_list = scope_elem_list; + _dl_loaded_modules->symbol_scope.r_nlist = nscope_elem; + /* + * The symbol scope of the application, that is the first entry of the + * _dl_loaded_modules list, is just the global scope to be used for the + * symbol lookup. + */ + global_scope = &_dl_loaded_modules->symbol_scope; + + /* Build the local scope for each loaded modules. */ + local_scope = _dl_malloc(nscope_elem * sizeof(struct elf_resolve *)); + i = 1; + for (tcurr = _dl_loaded_modules->next; tcurr; tcurr = tcurr->next) { + unsigned int k; + cnt = _dl_build_local_scope(local_scope, scope_elem_list[i++]); + tcurr->symbol_scope.r_list = _dl_malloc(cnt * sizeof(struct elf_resolve *)); + tcurr->symbol_scope.r_nlist = cnt; + _dl_memcpy (tcurr->symbol_scope.r_list, local_scope, cnt * sizeof (struct elf_resolve *)); + /* Restoring the init_flag.*/ + for (k = 1; k < nscope_elem; k++) + scope_elem_list[k]->init_flag &= ~DL_RESERVED; } + _dl_free(local_scope); + #ifdef __LDSO_LDD_SUPPORT__ - /* End of the line for ldd.... */ - if (trace_loaded_objects) { - _dl_dprintf(1, "\t%s => %s (%x)\n", - rpnt->dyn->libname + _dl_strlen(_dl_ldsopath) + 1, - rpnt->dyn->libname, DL_LOADADDR_BASE(rpnt->dyn->loadaddr)); + /* Exit if LD_TRACE_LOADED_OBJECTS is on. */ + if (trace_loaded_objects && !_dl_trace_prelink) + _dl_exit(0); +#endif + +#if defined(USE_TLS) && USE_TLS + /* We do not initialize any of the TLS functionality unless any of the + * initial modules uses TLS. This makes dynamic loading of modules with + * TLS impossible, but to support it requires either eagerly doing setup + * now or lazily doing it later. Doing it now makes us incompatible with + * an old kernel that can't perform TLS_INIT_TP, even if no TLS is ever + * used. Trying to do it lazily is too hairy to try when there could be + * multiple threads (from a non-TLS-using libpthread). */ + bool was_tls_init_tp_called = tls_init_tp_called; + if (tcbp == NULL) { + _dl_debug_early("Calling init_tls()!\n"); + tcbp = init_tls (); + } +#endif +#ifdef __UCLIBC_HAS_SSP__ + _dl_debug_early("Setting up SSP guards\n"); + /* Set up the stack checker's canary. */ + stack_chk_guard = _dl_setup_stack_chk_guard (); +# ifdef THREAD_SET_STACK_GUARD + THREAD_SET_STACK_GUARD (stack_chk_guard); +# else + __stack_chk_guard = stack_chk_guard; +# endif +# ifdef __UCLIBC_HAS_SSP_COMPAT__ + __guard = stack_chk_guard; +# endif +#endif + +#ifdef __LDSO_PRELINK_SUPPORT__ + if (_dl_trace_prelink) { + + unsigned int nscope_trace = ldso_tpnt ? nscope_elem : (nscope_elem - 1); + + for (i = 0; i < nscope_trace; i++) + trace_objects(scope_elem_list[i], + _dl_get_last_path_component(scope_elem_list[i]->libname)); + + if (_dl_verbose) + /* Warn about undefined symbols. */ + if (_dl_symbol_tables) + if (_dl_fixup(_dl_symbol_tables, global_scope, unlazy)) + _dl_exit(-1); _dl_exit(0); } + + if (_dl_loaded_modules->dynamic_info[DT_GNU_LIBLIST_IDX]) { + ElfW(Lib) *liblist, *liblistend; + struct elf_resolve **r_list, **r_listend, *l; + const char *strtab = (const char *)_dl_loaded_modules->dynamic_info[DT_STRTAB]; + + _dl_assert (_dl_loaded_modules->dynamic_info[DT_GNU_LIBLISTSZ_IDX] != 0); + liblist = (ElfW(Lib) *) _dl_loaded_modules->dynamic_info[DT_GNU_LIBLIST_IDX]; + liblistend = (ElfW(Lib) *) + ((char *) liblist + _dl_loaded_modules->dynamic_info[DT_GNU_LIBLISTSZ_IDX]); + r_list = _dl_loaded_modules->symbol_scope.r_list; + r_listend = r_list + nscope_elem; + + for (; r_list < r_listend && liblist < liblistend; r_list++) { + l = *r_list; + + if (l == _dl_loaded_modules) + continue; + + /* If the library is not mapped where it should, fail. */ + if (l->loadaddr) + break; + + /* Next, check if checksum matches. */ + if (l->dynamic_info[DT_CHECKSUM_IDX] == 0 || + l->dynamic_info[DT_CHECKSUM_IDX] != liblist->l_checksum) + break; + + if (l->dynamic_info[DT_GNU_PRELINKED_IDX] == 0 || + (l->dynamic_info[DT_GNU_PRELINKED_IDX] != liblist->l_time_stamp)) + break; + + if (_dl_strcmp(strtab + liblist->l_name, _dl_get_last_path_component(l->libname)) != 0) + break; + + ++liblist; + } + + + if (r_list == r_listend && liblist == liblistend) + prelinked = true; + + } + + _dl_debug_early ("prelink checking: %s\n", prelinked ? "ok" : "failed"); + + if (prelinked) { + if (_dl_loaded_modules->dynamic_info[DT_GNU_CONFLICT_IDX]) { + ELF_RELOC *conflict; + unsigned long conflict_size; + + _dl_assert (_dl_loaded_modules->dynamic_info[DT_GNU_CONFLICTSZ_IDX] != 0); + conflict = (ELF_RELOC *) _dl_loaded_modules->dynamic_info[DT_GNU_CONFLICT_IDX]; + conflict_size = _dl_loaded_modules->dynamic_info[DT_GNU_CONFLICTSZ_IDX]; + _dl_parse_relocation_information(_dl_symbol_tables, global_scope, + (unsigned long) conflict, conflict_size); + } + + /* Mark all the objects so we know they have been already relocated. */ + for (tpnt = _dl_loaded_modules; tpnt; tpnt = tpnt->next) { + tpnt->init_flag |= RELOCS_DONE; + if (tpnt->relro_size) + _dl_protect_relro (tpnt); + } + } else #endif + { + _dl_debug_early("Beginning relocation fixups\n"); #ifdef __mips__ @@ -762,13 +1324,36 @@ void _dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load_addr, * order so that COPY directives work correctly. */ if (_dl_symbol_tables) - if (_dl_fixup(_dl_symbol_tables, unlazy)) + if (_dl_fixup(_dl_symbol_tables, global_scope, unlazy)) _dl_exit(-1); for (tpnt = _dl_loaded_modules; tpnt; tpnt = tpnt->next) { if (tpnt->relro_size) _dl_protect_relro (tpnt); } + } /* not prelinked */ + +#if defined(USE_TLS) && USE_TLS + if (!was_tls_init_tp_called && _dl_tls_max_dtv_idx > 0) + ++_dl_tls_generation; + + _dl_debug_early("Calling _dl_allocate_tls_init()!\n"); + + /* Now that we have completed relocation, the initializer data + for the TLS blocks has its final values and we can copy them + into the main thread's TLS area, which we allocated above. */ + _dl_allocate_tls_init (tcbp); + + /* And finally install it for the main thread. If ld.so itself uses + TLS we know the thread pointer was initialized earlier. */ + if (! tls_init_tp_called) { + const char *lossage = (char *) TLS_INIT_TP (tcbp, USE___THREAD); + if (__builtin_expect (lossage != NULL, 0)) { + _dl_debug_early("cannot set up thread-local storage: %s\n", lossage); + _dl_exit(30); + } + } +#endif /* USE_TLS */ /* OK, at this point things are pretty much ready to run. Now we need * to touch up a few items that are required, and then we can let the @@ -777,7 +1362,7 @@ void _dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load_addr, * ld.so.1, so we have to look up each symbol individually. */ - _dl_envp = (unsigned long *) (intptr_t) _dl_find_hash(__C_SYMBOL_PREFIX__ "__environ", _dl_symbol_tables, NULL, 0); + _dl_envp = (unsigned long *) (intptr_t) _dl_find_hash(__C_SYMBOL_PREFIX__ "__environ", global_scope, NULL, 0, NULL); if (_dl_envp) *_dl_envp = (unsigned long) envp; @@ -833,115 +1418,34 @@ void _dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load_addr, /* Find the real malloc function and make ldso functions use that from now on */ _dl_malloc_function = (void* (*)(size_t)) (intptr_t) _dl_find_hash(__C_SYMBOL_PREFIX__ "malloc", - _dl_symbol_tables, NULL, ELF_RTYPE_CLASS_PLT); + global_scope, NULL, ELF_RTYPE_CLASS_PLT, NULL); - /* Notify the debugger that all objects are now mapped in. */ - _dl_debug_addr->r_state = RT_CONSISTENT; - _dl_debug_state(); -} +#if defined(USE_TLS) && USE_TLS + /* Find the real functions and make ldso functions use them from now on */ + _dl_calloc_function = (void* (*)(size_t, size_t)) (intptr_t) + _dl_find_hash(__C_SYMBOL_PREFIX__ "calloc", global_scope, NULL, ELF_RTYPE_CLASS_PLT, NULL); -char *_dl_getenv(const char *symbol, char **envp) -{ - char *pnt; - const char *pnt1; + _dl_realloc_function = (void* (*)(void *, size_t)) (intptr_t) + _dl_find_hash(__C_SYMBOL_PREFIX__ "realloc", global_scope, NULL, ELF_RTYPE_CLASS_PLT, NULL); - while ((pnt = *envp++)) { - pnt1 = symbol; - while (*pnt && *pnt == *pnt1) - pnt1++, pnt++; - if (!*pnt || *pnt != '=' || *pnt1) - continue; - return pnt + 1; - } - return 0; -} - -void _dl_unsetenv(const char *symbol, char **envp) -{ - char *pnt; - const char *pnt1; - char **newenvp = envp; - - for (pnt = *envp; pnt; pnt = *++envp) { - pnt1 = symbol; - while (*pnt && *pnt == *pnt1) - pnt1++, pnt++; - if (!*pnt || *pnt != '=' || *pnt1) - *newenvp++ = *envp; - } - *newenvp++ = *envp; - return; -} - -static int _dl_suid_ok(void) -{ - __kernel_uid_t uid, euid; - __kernel_gid_t gid, egid; + _dl_free_function = (void (*)(void *)) (intptr_t) + _dl_find_hash(__C_SYMBOL_PREFIX__ "free", global_scope, NULL, ELF_RTYPE_CLASS_PLT, NULL); - uid = _dl_getuid(); - euid = _dl_geteuid(); - gid = _dl_getgid(); - egid = _dl_getegid(); + _dl_memalign_function = (void* (*)(size_t, size_t)) (intptr_t) + _dl_find_hash(__C_SYMBOL_PREFIX__ "memalign", global_scope, NULL, ELF_RTYPE_CLASS_PLT, NULL); - if (uid == euid && gid == egid) { - return 1; - } - return 0; -} - -void *_dl_malloc(size_t size) -{ - void *retval; - -#if 0 - _dl_debug_early("request for %d bytes\n", size); #endif - if (_dl_malloc_function) - return (*_dl_malloc_function) (size); - - if (_dl_malloc_addr - _dl_mmap_zero + size > _dl_pagesize) { - size_t rounded_size; - - /* Since the above assumes we get a full page even if - we request less than that, make sure we request a - full page, since uClinux may give us less than than - a full page. We might round even - larger-than-a-page sizes, but we end up never - reusing _dl_mmap_zero/_dl_malloc_addr in that case, - so we don't do it. - - The actual page size doesn't really matter; as long - as we're self-consistent here, we're safe. */ - if (size < _dl_pagesize) - rounded_size = (size + _dl_pagesize - 1) & _dl_pagesize; - else - rounded_size = size; - - _dl_debug_early("mmapping more memory\n"); - _dl_mmap_zero = _dl_malloc_addr = _dl_mmap((void *) 0, rounded_size, - PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); - if (_dl_mmap_check_error(_dl_mmap_zero)) { - _dl_dprintf(2, "%s: mmap of a spare page failed!\n", _dl_progname); - _dl_exit(20); - } - } - retval = _dl_malloc_addr; - _dl_malloc_addr += size; - - /* - * Align memory to DL_MALLOC_ALIGN byte boundary. Some - * platforms require this, others simply get better - * performance. - */ - _dl_malloc_addr = (unsigned char *) (((unsigned long) _dl_malloc_addr + DL_MALLOC_ALIGN - 1) & ~(DL_MALLOC_ALIGN - 1)); - return retval; -} + /* Notify the debugger that all objects are now mapped in. */ + _dl_debug_addr->r_state = RT_CONSISTENT; + _dl_debug_state(); -void _dl_free (void *p) -{ - if (_dl_free_function) - (*_dl_free_function) (p); +#ifdef __LDSO_STANDALONE_SUPPORT__ + if (_start == (void *) auxvt[AT_ENTRY].a_un.a_val) + return (void *) app_tpnt->l_entry; + else +#endif + return (void *) auxvt[AT_ENTRY].a_un.a_val; } #include "dl-hash.c" diff --git a/ldso/ldso/m68k/dl-debug.h b/ldso/ldso/m68k/dl-debug.h index a9a80a067..71b513a3e 100644 --- a/ldso/ldso/m68k/dl-debug.h +++ b/ldso/ldso/m68k/dl-debug.h @@ -29,7 +29,7 @@ * SUCH DAMAGE. */ -static const char *_dl_reltypes_tab[] = { +static const char * const _dl_reltypes_tab[] = { [0] "R_68K_NONE", [1] "R_68K_32", "R_68K_16", "R_68K_8", [4] "R_68K_PC32", "R_68K_PC16", "R_68K_PC8", diff --git a/ldso/ldso/m68k/dl-startup.h b/ldso/ldso/m68k/dl-startup.h index 2ed9ead50..52a950c87 100644 --- a/ldso/ldso/m68k/dl-startup.h +++ b/ldso/ldso/m68k/dl-startup.h @@ -4,10 +4,22 @@ * Copyright (C) 2005 by Erik Andersen <andersen@codepoet.org> */ +/* Perform operation OP with PC-relative SRC as the first operand and + * DST as the second. TMP is available as a temporary if needed. */ + +#ifdef __mcoldfire__ +#define PCREL_OP(OP, SRC, DST, TMP, PC) \ + "move.l #" SRC " - ., " TMP "\n\t" OP " (-8, " PC ", " TMP "), " DST +#else +#define PCREL_OP(OP, SRC, DST, TMP, PC) \ + OP " " SRC "(" PC "), " DST +#endif + __asm__ ("\ .text\n\ .globl _start\n\ .type _start,@function\n\ + .hidden _start\n\ _start:\n\ move.l %sp, -(%sp)\n\ jbsr _dl_start\n\ @@ -21,7 +33,7 @@ _dl_start_user:\n\ move.l %d0, %a4\n\ # See if we were run as a command with the executable file\n\ # name as an extra leading argument.\n\ - move.l _dl_skip_args(%pc), %d0\n\ + " PCREL_OP ("move.l", "_dl_skip_args", "%d0", "%d0", "%pc") "\n\ # Pop the original argument count\n\ move.l (%sp)+, %d1\n\ # Subtract _dl_skip_args from it.\n\ @@ -31,7 +43,7 @@ _dl_start_user:\n\ # Push back the modified argument count.\n\ move.l %d1, -(%sp)\n\ # Pass our finalizer function to the user in %a1.\n\ - lea _dl_fini(%pc), %a1\n\ + " PCREL_OP ("lea", "_dl_fini", "%a1", "%a1", "%pc") "\n\ # Initialize %fp with the stack pointer.\n\ move.l %sp, %fp\n\ # Jump to the user's entry point.\n\ @@ -40,7 +52,7 @@ _dl_start_user:\n\ .previous"); /* Get a pointer to the argv array. On many platforms this can be just - * the address if the first argument, on other platforms we need to + * the address of the first argument, on other platforms we need to * do something a little more subtle here. */ #define GET_ARGV(ARGVP, ARGS) ARGVP = (((unsigned long *) ARGS) + 1) @@ -49,7 +61,7 @@ static __always_inline void PERFORM_BOOTSTRAP_RELOC(ELF_RELOC *rpnt, unsigned long *reloc_addr, unsigned long symbol_addr, unsigned long load_addr, Elf32_Sym *symtab) { - switch (ELF32_R_TYPE(rpnt->r_info)) + switch (ELF_R_TYPE(rpnt->r_info)) { case R_68K_8: *(char *) reloc_addr = symbol_addr + rpnt->r_addend; diff --git a/ldso/ldso/m68k/dl-syscalls.h b/ldso/ldso/m68k/dl-syscalls.h index 996bb87c6..f40c4fd31 100644 --- a/ldso/ldso/m68k/dl-syscalls.h +++ b/ldso/ldso/m68k/dl-syscalls.h @@ -1,6 +1 @@ -/* We can't use the real errno in ldso, since it has not yet - * been dynamicly linked in yet. */ -#include "sys/syscall.h" -extern int _dl_errno; -#undef __set_errno -#define __set_errno(X) {(_dl_errno) = (X);} +/* stub for arch-specific syscall issues */ diff --git a/ldso/ldso/m68k/dl-sysdep.h b/ldso/ldso/m68k/dl-sysdep.h index e42be3150..b5eda4e9c 100644 --- a/ldso/ldso/m68k/dl-sysdep.h +++ b/ldso/ldso/m68k/dl-sysdep.h @@ -22,14 +22,12 @@ do { \ /* Used for error messages */ #define ELF_TARGET "m68k" +/* Need bootstrap relocations */ +#define ARCH_NEEDS_BOOTSTRAP_RELOCS + struct elf_resolve; extern unsigned long _dl_linux_resolver (struct elf_resolve *, int); -/* 4096 bytes alignment */ -#define PAGE_ALIGN 0xfffff000 -#define ADDR_ALIGN 0xfff -#define OFFS_ALIGN 0x7ffff000 - /* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry, so PLT entries should not be allowed to define the value. ELF_RTYPE_CLASS_NOCOPY iff TYPE should not be allowed to resolve to one @@ -41,26 +39,36 @@ extern unsigned long _dl_linux_resolver (struct elf_resolve *, int); /* Return the link-time address of _DYNAMIC. Conveniently, this is the first element of the GOT. This must be inlined in a function which uses global data. */ -static __inline__ Elf32_Addr +static __always_inline Elf32_Addr elf_machine_dynamic (void) { - register Elf32_Addr *got __asm__ ("%a5"); - return *got; + Elf32_Addr got; + + __asm__ ("move.l _DYNAMIC@GOT.w(%%a5), %0" + : "=a" (got)); + return got; } +#ifdef __mcoldfire__ +#define PCREL_OP(OP, SRC, DST, TMP, PC) \ + "move.l #" SRC " - ., " TMP "\n\t" OP " (-8, " PC ", " TMP "), " DST +#else +#define PCREL_OP(OP, SRC, DST, TMP, PC) \ + OP " " SRC "(" PC "), " DST +#endif /* Return the run-time load address of the shared object. */ -static __inline__ Elf32_Addr +static __always_inline Elf32_Addr elf_machine_load_address (void) { Elf32_Addr addr; - __asm__ ("lea _dl_start(%%pc), %0\n\t" - "sub.l _dl_start@GOT.w(%%a5), %0" - : "=a" (addr)); + __asm__ (PCREL_OP ("lea", "_dl_start", "%0", "%0", "%%pc") "\n\t" + "sub.l _dl_start@GOT.w(%%a5), %0" + : "=a" (addr)); return addr; } -static __inline__ void +static __always_inline void elf_machine_relative (Elf32_Addr load_off, const Elf32_Addr rel_addr, Elf32_Word relative_count) { diff --git a/ldso/ldso/m68k/elfinterp.c b/ldso/ldso/m68k/elfinterp.c index 8f7364f30..fd7fe8513 100644 --- a/ldso/ldso/m68k/elfinterp.c +++ b/ldso/ldso/m68k/elfinterp.c @@ -47,7 +47,6 @@ extern int _dl_linux_resolve(void); unsigned long _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry) { - int reloc_type; ELF_RELOC *this_reloc; char *strtab; ElfW(Sym) *symtab; @@ -60,25 +59,18 @@ _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry) rel_addr = (char *)tpnt->dynamic_info[DT_JMPREL]; this_reloc = (ELF_RELOC *)(rel_addr + reloc_entry); - reloc_type = ELF_R_TYPE(this_reloc->r_info); symtab_index = ELF_R_SYM(this_reloc->r_info); symtab = (ElfW(Sym) *)tpnt->dynamic_info[DT_SYMTAB]; strtab = (char *)tpnt->dynamic_info[DT_STRTAB]; symname = strtab + symtab[symtab_index].st_name; - if (unlikely(reloc_type != R_68K_JMP_SLOT)) { - _dl_dprintf(2, "%s: Incorrect relocation type in jump relocations\n", - _dl_progname); - _dl_exit(1); - } - /* Address of the jump instruction to fix up. */ instr_addr = (this_reloc->r_offset + tpnt->loadaddr); got_addr = (char **)instr_addr; /* Get the address of the GOT entry. */ - new_addr = _dl_find_hash(symname, tpnt->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT); + new_addr = _dl_find_hash(symname, &_dl_loaded_modules->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT, NULL); if (unlikely(!new_addr)) { _dl_dprintf(2, "%s: Can't resolve symbol '%s'\n", _dl_progname, symname); _dl_exit(1); @@ -102,9 +94,9 @@ _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry) } static int -_dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, +_dl_parse(struct elf_resolve *tpnt, struct r_scope_elem *scope, unsigned long rel_addr, unsigned long rel_size, - int (*reloc_fnc)(struct elf_resolve *tpnt, struct dyn_elf *scope, + int (*reloc_fnc)(struct elf_resolve *tpnt, struct r_scope_elem *scope, ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab)) { unsigned int i; @@ -159,13 +151,13 @@ _dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, } static int -_dl_do_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, +_dl_do_reloc(struct elf_resolve *tpnt, struct r_scope_elem *scope, ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab) { int reloc_type; int symtab_index; char *symname; - ElfW(Sym) *sym; + struct symbol_ref sym_ref; ElfW(Addr) *reloc_addr; ElfW(Addr) symbol_addr; #if defined (__SUPPORT_LD_DEBUG__) @@ -175,22 +167,27 @@ _dl_do_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, reloc_addr = (ElfW(Addr)*)(tpnt->loadaddr + (unsigned long)rpnt->r_offset); reloc_type = ELF_R_TYPE(rpnt->r_info); symtab_index = ELF_R_SYM(rpnt->r_info); - sym = &symtab[symtab_index]; + sym_ref.sym = &symtab[symtab_index]; + sym_ref.tpnt = NULL; symbol_addr = 0; - symname = strtab + sym->st_name; + symname = strtab + sym_ref.sym->st_name; if (symtab_index) { symbol_addr = (ElfW(Addr))_dl_find_hash(symname, scope, tpnt, - elf_machine_type_class(reloc_type)); + elf_machine_type_class(reloc_type), &sym_ref); /* * We want to allow undefined references to weak symbols - this * might have been intentional. We should not be linking local * symbols here, so all bases should be covered. */ - if (unlikely(!symbol_addr && ELF_ST_BIND(sym->st_info) != STB_WEAK)) { + if (unlikely(!symbol_addr && ELF_ST_BIND(sym_ref.sym->st_info) != STB_WEAK)) { _dl_dprintf(2, "%s: can't resolve symbol '%s'\n", _dl_progname, symname); _dl_exit(1); } + if (_dl_trace_prelink) { + _dl_debug_lookup (symname, tpnt, &symtab[symtab_index], + &sym_ref, elf_machine_type_class(reloc_type)); + } } #if defined (__SUPPORT_LD_DEBUG__) @@ -238,14 +235,17 @@ _dl_do_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, if (_dl_debug_move) _dl_dprintf(_dl_debug_file, "\t%s move %d bytes from %x to %x\n", - symname, sym->st_size, + symname, sym_ref.sym->st_size, symbol_addr, reloc_addr); #endif _dl_memcpy ((void *) reloc_addr, (void *) symbol_addr, - sym->st_size); - } else + sym_ref.sym->st_size); + } +#if defined (__SUPPORT_LD_DEBUG__) + else _dl_dprintf(_dl_debug_file, "no symbol_addr to copy !?\n"); +#endif break; default: @@ -264,7 +264,7 @@ _dl_do_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, #undef LAZY_RELOC_WORKS #ifdef LAZY_RELOC_WORKS static int -_dl_do_lazy_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, +_dl_do_lazy_reloc(struct elf_resolve *tpnt, struct r_scope_elem *scope, ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab) { int reloc_type; @@ -313,14 +313,15 @@ _dl_parse_lazy_relocation_information(struct dyn_elf *rpnt, #ifdef LAZY_RELOC_WORKS (void)_dl_parse(rpnt->dyn, NULL, rel_addr, rel_size, _dl_do_lazy_reloc); #else - _dl_parse_relocation_information(rpnt, rel_addr, rel_size); + _dl_parse_relocation_information(rpnt, &_dl_loaded_modules->symbol_scope, rel_addr, rel_size); #endif } int _dl_parse_relocation_information(struct dyn_elf *rpnt, + struct r_scope_elem *scope, unsigned long rel_addr, unsigned long rel_size) { - return _dl_parse(rpnt->dyn, rpnt->dyn->symbol_scope, rel_addr, rel_size, _dl_do_reloc); + return _dl_parse(rpnt->dyn, scope, rel_addr, rel_size, _dl_do_reloc); } diff --git a/ldso/ldso/m68k/resolve.S b/ldso/ldso/m68k/resolve.S index d9a2929d6..1bd5c0096 100644 --- a/ldso/ldso/m68k/resolve.S +++ b/ldso/ldso/m68k/resolve.S @@ -19,5 +19,10 @@ _dl_linux_resolve: # Pop parameters addq.l #8, %sp # Call real function. +#if defined __mcoldfire__ + move.l %d0,-(%sp) + rts +#else jmp (%d0) +#endif .size _dl_linux_resolve,.-_dl_linux_resolve diff --git a/ldso/ldso/metag/dl-debug.h b/ldso/ldso/metag/dl-debug.h new file mode 100644 index 000000000..46c257c5c --- /dev/null +++ b/ldso/ldso/metag/dl-debug.h @@ -0,0 +1,33 @@ +/* + * Meta ELF shared library loader support. + * + * Program to load an elf binary on a linux system, and run it. + * References to symbols in sharable libraries can be resolved + * by either an ELF sharable library or a linux style of shared + * library. + * + * Copyright (C) 2013, Imagination Technologies Ltd. + * + * Licensed under LGPL v2.1 or later, see the file COPYING.LIB in this tarball. + */ + +static const char *_dl_reltypes_tab[] = { + [0] "R_METAG_HIADDR16", "R_METAG_LOADDR16", "R_METAG_ADDR32", + [3] "R_METAG_NONE", "R_METAG_RELBRANCH", "R_METAG_GETSETOFF", + [6] "R_METAG_REG32OP1", "R_METAG_REG32OP2", "R_METAG_REG32OP3", + [9] "R_METAG_REG16OP1", "R_METAG_REG16OP2", "R_METAG_REG16OP3", + [12] "R_METAG_REG32OP4", "R_METAG_HIOG", "R_METAG_LOOG", + [30] "R_METAG_GNU_VTINHERIT", "R_METAG_GNU_VTENTRY", + [32] "R_METAG_HI16_GOTOFF", "R_METAG_LO16_GOTOFF", + [34] "R_METAG_GETSET_GOTOFF", "R_METAG_GETSET_GOT", + [36] "R_METAG_HI16_GOTPC", "R_METAG_LO16_GOTPC", + [38] "R_METAG_HI16_PLT", "R_METAG_LO16_PLT", + [40] "R_METAG_RELBRANCH_PLT", "R_METAG_GOTOFF", + [42] "R_METAG_PLT", "R_METAG_COPY", "R_METAG_JMP_SLOT", + [45] "R_METAG_RELATIVE", "R_METAG_GLOB_DAT", "R_METAG_TLS_GD", + [48] "R_METAG_TLS_LDM", "R_METAG_TLS_LDO_HI16", "R_METAG_TLS_LDO_LO16", + [51] "R_METAG_TLS_LDO", "R_METAG_TLS_IE", "R_METAG_TLS_IENONPIC", + [54] "R_METAG_TLS_IENONPIC_HI16", "R_METAG_TLS_IENONPIC_LO16", + [56] "R_METAG_TLS_TPOFF", "R_METAG_TLS_DTPMOD", "R_METAG_TLS_DTPOFF", + [59] "R_METAG_TLS_LE", "R_METAG_TLS_LE_HI16", "R_METAG_TLS_LE_LO16" +}; diff --git a/ldso/ldso/metag/dl-inlines.h b/ldso/ldso/metag/dl-inlines.h new file mode 100644 index 000000000..82fba93ab --- /dev/null +++ b/ldso/ldso/metag/dl-inlines.h @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2013, Imagination Technologies Ltd. + * + * Licensed under LGPL v2.1 or later, see the file COPYING.LIB in this tarball. + */ + +static __always_inline int +__dl_is_special_segment (Elf32_Ehdr *epnt, + Elf32_Phdr *ppnt) +{ + if (ppnt->p_type != PT_LOAD && + ppnt->p_type != PT_DYNAMIC) + return 0; + + if (ppnt->p_vaddr >= 0x80000000 && + ppnt->p_vaddr < 0x82060000) + return 1; + + if (ppnt->p_vaddr >= 0xe0200000 && + ppnt->p_vaddr < 0xe0260000) + return 1; + + return 0; +} + +static __always_inline char * +__dl_map_segment (Elf32_Ehdr *epnt, + Elf32_Phdr *ppnt, + int infile, + int flags) +{ + char *addr = (char *)ppnt->p_vaddr; + + if (_DL_PREAD (infile, addr, ppnt->p_filesz, ppnt->p_offset) != ppnt->p_filesz) { + return 0; + } + + return addr; +} diff --git a/ldso/ldso/metag/dl-startup.h b/ldso/ldso/metag/dl-startup.h new file mode 100644 index 000000000..32b2e4b74 --- /dev/null +++ b/ldso/ldso/metag/dl-startup.h @@ -0,0 +1,68 @@ +/* + * Copyright (C) 2013 Imagination Technologies Ltd. + * + * Licensed under the LGPL v2.1 or later, see the file COPYING.LIB in this tarball. + */ + +/* + * This code fixes the stack pointer so that the dynamic linker + * can find argc, argv and auxvt (Auxillary Vector Table). + */ + +__asm__ ( +" .text\n" +" .global __start\n" +" .type __start,@function\n" +" .hidden __start\n" +"_start:\n" +"__start:\n" +" MSETL [A0StP++],D0Ar4,D0Ar2\n" +" MOV D1Ar1,D0Ar2\n" +" CALLR D1RtP,__dl_start\n" +" GETL D0Ar2,D1Ar1,[A0StP+#-(1*8)]\n" +" GETL D0Ar4,D1Ar3,[A0StP+#-(2*8)]\n" +" ADDT A1LbP,CPC1,#HI(__GLOBAL_OFFSET_TABLE__)\n" +" ADD A1LbP,A1LbP,#LO(__GLOBAL_OFFSET_TABLE__+4)\n" +" ADDT A1LbP,A1LbP,#HI(__dl_fini@GOTOFF)\n" +" ADD A1LbP,A1LbP,#LO(__dl_fini@GOTOFF)\n" +" MOV D0Ar4, A1LbP\n" +" SUB A0StP,A0StP,#(2*8)\n" +" MOV PC,D0Re0\n" +" .size __start,.-__start\n" +" .previous\n" +); + + +/* + * Get a pointer to the argv array. On many platforms this can be just + * the address if the first argument, on other platforms we need to + * do something a little more subtle here. + */ + +#define GET_ARGV(ARGVP, ARGS) ARGVP = (((unsigned long *) ARGS)) + + +/* Handle relocation of the symbols in the dynamic loader. */ +static inline +void PERFORM_BOOTSTRAP_RELOC(ELF_RELOC *rpnt, unsigned long *reloc_addr, + unsigned long symbol_addr, unsigned long load_addr, Elf32_Sym *symtab) +{ + switch (ELF32_R_TYPE(rpnt->r_info)) { + case R_METAG_GLOB_DAT: + case R_METAG_JMP_SLOT: + case R_METAG_ADDR32: + *reloc_addr = symbol_addr; + break; + case R_METAG_RELATIVE: + *reloc_addr = load_addr + rpnt->r_addend; + break; + case R_METAG_RELBRANCH: + *reloc_addr = symbol_addr + rpnt->r_addend - *reloc_addr - 4; + break; + case R_METAG_NONE: + break; + default: + _dl_exit(1); + break; + } +} diff --git a/ldso/ldso/metag/dl-syscalls.h b/ldso/ldso/metag/dl-syscalls.h new file mode 100644 index 000000000..70ceab10e --- /dev/null +++ b/ldso/ldso/metag/dl-syscalls.h @@ -0,0 +1,6 @@ +/* stub for arch-specific syscall issues + * + * Copyright (C) 2013, Imagination Technologies Ltd. + * + * Licensed under LGPL v2.1 or later, see the file COPYING.LIB in this tarball. + */ diff --git a/ldso/ldso/metag/dl-sysdep.h b/ldso/ldso/metag/dl-sysdep.h new file mode 100644 index 000000000..ec17440fc --- /dev/null +++ b/ldso/ldso/metag/dl-sysdep.h @@ -0,0 +1,121 @@ +/* + * Meta can never use Elf32_Rel relocations. + * + * Copyright (C) 2013, Imagination Technologies Ltd. + * + * Licensed under LGPL v2.1 or later, see the file COPYING.LIB in this tarball. + */ + +#define ELF_USES_RELOCA + +#include <elf.h> + +/* Initialization sequence for the GOT. */ +#define INIT_GOT(GOT_BASE,MODULE) \ +{ \ + GOT_BASE[1] = (unsigned long) MODULE; \ + GOT_BASE[2] = (unsigned long) _dl_linux_resolve; \ +} + +/* Maximum unsigned GOT [GS]ETD offset size, ie. 2^(11+2). */ +#define GOT_REG_OFFSET 0x2000 + +/* Defined some magic numbers that this ld.so should accept. */ +#define MAGIC1 EM_METAG +#undef MAGIC2 +#define ELF_TARGET "META" + +/* Need bootstrap relocations */ +#define ARCH_NEEDS_BOOTSTRAP_RELOCS + +struct elf_resolve; +extern unsigned long _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry); + +/* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry or + TLS variable, so undefined references should not be allowed to + define the value. + + ELF_RTYPE_CLASS_NOCOPY iff TYPE should not be allowed to resolve to one + of the main executable's symbols, as for a COPY reloc. */ +#define elf_machine_type_class(type) \ + ((((type) == R_METAG_JMP_SLOT || (type) == R_METAG_TLS_DTPMOD \ + || (type) == R_METAG_TLS_DTPOFF || (type) == R_METAG_TLS_TPOFF) \ + * ELF_RTYPE_CLASS_PLT) \ + | (((type) == R_METAG_COPY) * ELF_RTYPE_CLASS_COPY)) + +static inline Elf32_Addr +elf_machine_dynamic(Elf32_Ehdr *header) +{ + Elf32_Addr *got; + + __asm__ ("MOV %0,A1LbP" : "=r" (got)); + + if (header->e_ident[EI_ABIVERSION] >= 1) { + /* GOT register offset was introduced with ABI v1 */ + got = (Elf32_Addr*)((void*)got - GOT_REG_OFFSET); + } + return *got; +} + +#define DL_BOOT_COMPUTE_GOT(GOT) \ + ((GOT) = elf_machine_dynamic(header)) + +static inline Elf32_Addr +elf_machine_load_address(void) +{ + Elf32_Addr addr; + __asm__ ("MOV D1Ar1,A1LbP\n" + "ADDT D1Ar1,D1Ar1,#HI(__dl_start@GOTOFF)\n" + "ADD D1Ar1,D1Ar1,#LO(__dl_start@GOTOFF)\n" + "ADDT D0Ar2,D0Ar2,#HI(__dl_start_addr@GOTOFF)\n" + "ADD D0Ar2,D0Ar2,#LO(__dl_start_addr@GOTOFF)\n" + "GETD D0Ar2,[D0Ar2]\n" + "SUB %0,D1Ar1,D0Ar2\n" + ".section .data\n" + "__dl_start_addr: .long __dl_start\n" + ".previous\n" + : "=d" (addr) : : "D1Ar1", "D0Ar2"); + return addr; +} + +static inline void +elf_machine_relative(Elf32_Addr load_off, const Elf32_Addr rel_addr, + Elf32_Word relative_count) +{ + Elf32_Rela *rpnt = (void *)rel_addr; + + --rpnt; + do { + Elf32_Addr *const reloc_addr = + (void *)(load_off + (++rpnt)->r_offset); + + *reloc_addr = load_off + rpnt->r_addend; + } while (--relative_count); +} + +#define DL_MALLOC_ALIGN 8 + +#define HAVE_DL_INLINES_H + +#define DL_IS_SPECIAL_SEGMENT(EPNT, PPNT) \ + __dl_is_special_segment(EPNT, PPNT) +#define DL_MAP_SEGMENT(EPNT, PPNT, INFILE, FLAGS) \ + __dl_map_segment (EPNT, PPNT, INFILE, FLAGS) + +#define DL_CHECK_LIB_TYPE(epnt, piclib, _dl_progname, libname) \ +do \ +{ \ + ElfW(Phdr) *ppnt_; \ + char *header_ = (char *)epnt; \ + ppnt_ = (ElfW(Phdr) *)(intptr_t) & header_[epnt->e_phoff]; \ + if (ppnt_->p_vaddr >= 0x80000000 && \ + ppnt_->p_vaddr < 0x82060000) \ + (piclib) = 2; \ + if (ppnt_->p_vaddr >= 0xe0200000 && \ + ppnt_->p_vaddr < 0xe0260000) \ + (piclib) = 2; \ +} \ +while (0) + +#define _DL_PREAD(FD, BUF, SIZE, OFFSET) \ + (_dl_pread((FD), (BUF), (SIZE), (OFFSET))) diff --git a/ldso/ldso/metag/elfinterp.c b/ldso/ldso/metag/elfinterp.c new file mode 100644 index 000000000..e0f981741 --- /dev/null +++ b/ldso/ldso/metag/elfinterp.c @@ -0,0 +1,318 @@ +/* + * Meta ELF shared library loader support. + * + * Program to load an elf binary on a linux system, and run it. + * References to symbols in sharable libraries can be resolved + * by either an ELF sharable library or a linux style of shared + * library. + * + * Copyright (C) 2013, Imagination Technologies Ltd. + * + * Licensed under LGPL v2.1 or later, see the file COPYING.LIB in this tarball. + */ + +#include "ldso.h" + +/* Defined in resolve.S. */ +extern int _dl_linux_resolve(void); + +static inline unsigned long __get_unaligned_reloc(unsigned long *addr) +{ + char *rel_addr = (char *)addr; + unsigned long val; + + val = *rel_addr++ & 0xff; + val |= (*rel_addr++ << 8) & 0x0000ff00; + val |= (*rel_addr++ << 16) & 0x00ff0000; + val |= (*rel_addr++ << 24) & 0xff000000; + + return val; +} + +static inline void __put_unaligned_reloc(unsigned long *addr, + unsigned long val) +{ + char *rel_addr = (char *)addr; + + *rel_addr++ = (val & 0x000000ff); + *rel_addr++ = ((val & 0x0000ff00) >> 8); + *rel_addr++ = ((val & 0x00ff0000) >> 16); + *rel_addr++ = ((val & 0xff000000) >> 24); +} + +unsigned long +_dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry) +{ + int symtab_index; + char *strtab; + char *symname; + char *new_addr; + char *rel_addr; + char **got_addr; + ElfW(Sym) *symtab; + ELF_RELOC *this_reloc; + unsigned long instr_addr; + + rel_addr = (char *)tpnt->dynamic_info[DT_JMPREL]; + + this_reloc = (ELF_RELOC *)(intptr_t)(rel_addr + reloc_entry); + symtab_index = ELF_R_SYM(this_reloc->r_info); + + symtab = (ElfW(Sym) *)(intptr_t)tpnt->dynamic_info[DT_SYMTAB]; + strtab = (char *)tpnt->dynamic_info[DT_STRTAB]; + symname = strtab + symtab[symtab_index].st_name; + + /* Address of the jump instruction to fix up. */ + instr_addr = ((unsigned long)this_reloc->r_offset + + (unsigned long)tpnt->loadaddr); + got_addr = (char **)instr_addr; + + /* Get the address of the GOT entry. */ + new_addr = _dl_find_hash(symname, &_dl_loaded_modules->symbol_scope, tpnt, + ELF_RTYPE_CLASS_PLT, NULL); + if (unlikely(!new_addr)) { + _dl_dprintf(2, "%s: Can't resolve symbol '%s'\n", + _dl_progname, symname); + _dl_exit(1); + } + +#if defined (__SUPPORT_LD_DEBUG__) + if (_dl_debug_bindings) { + _dl_dprintf(_dl_debug_file, "\nresolve function: %s", symname); + if (_dl_debug_detail) + _dl_dprintf(_dl_debug_file, + "\n\tpatched: %x ==> %x @ %x\n", + *got_addr, new_addr, got_addr); + } + if (!_dl_debug_nofixups) { + *got_addr = new_addr; + } +#else + *got_addr = new_addr; +#endif + + return (unsigned long)new_addr; +} + +static int +_dl_parse(struct elf_resolve *tpnt, struct r_scope_elem *scope, + unsigned long rel_addr, unsigned long rel_size, + int (*reloc_fnc)(struct elf_resolve *tpnt, struct r_scope_elem *scope, + ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab)) +{ + int symtab_index; + unsigned int i; + char *strtab; + ElfW(Sym) *symtab; + ELF_RELOC *rpnt; + + /* Parse the relocation information. */ + rpnt = (ELF_RELOC *)(intptr_t)rel_addr; + rel_size /= sizeof(ELF_RELOC); + + symtab = (ElfW(Sym) *)(intptr_t)tpnt->dynamic_info[DT_SYMTAB]; + strtab = (char *)tpnt->dynamic_info[DT_STRTAB]; + + for (i = 0; i < rel_size; i++, rpnt++) { + int res; + + symtab_index = ELF_R_SYM(rpnt->r_info); + + debug_sym(symtab, strtab, symtab_index); + debug_reloc(symtab, strtab, rpnt); + + /* Pass over to actual relocation function. */ + res = reloc_fnc(tpnt, scope, rpnt, symtab, strtab); + + if (res == 0) + continue; + + _dl_dprintf(2, "\n%s: ", _dl_progname); + + if (symtab_index) + _dl_dprintf(2, "symbol '%s': ", + strtab + symtab[symtab_index].st_name); + + if (unlikely(res < 0)) { + int reloc_type = ELF_R_TYPE(rpnt->r_info); + +#if defined (__SUPPORT_LD_DEBUG__) + _dl_dprintf(2, "can't handle reloc type %s\n", + _dl_reltypes(reloc_type)); +#else + _dl_dprintf(2, "can't handle reloc type %x\n", + reloc_type); +#endif + _dl_exit(-res); + } else if (unlikely(res > 0)) { + _dl_dprintf(2, "can't resolve symbol\n"); + return res; + } + } + + return 0; +} + +static int +_dl_do_reloc(struct elf_resolve *tpnt, struct r_scope_elem *scope, + ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab) +{ + int reloc_type; + int symtab_index; + char *symname = NULL; + unsigned long *reloc_addr; + unsigned long symbol_addr; +#if defined (__SUPPORT_LD_DEBUG__) + unsigned long old_val = 0; +#endif + struct elf_resolve *tls_tpnt = NULL; + struct symbol_ref sym_ref; + + reloc_addr = (unsigned long *)(tpnt->loadaddr + rpnt->r_offset); + reloc_type = ELF_R_TYPE(rpnt->r_info); + symtab_index = ELF_R_SYM(rpnt->r_info); + symbol_addr = 0; + sym_ref.sym = &symtab[symtab_index]; + sym_ref.tpnt = NULL; + + if (symtab_index) { + symname = strtab + symtab[symtab_index].st_name; + symbol_addr = (unsigned long)_dl_find_hash(symname, scope, tpnt, + elf_machine_type_class(reloc_type), &sym_ref); + + if (!symbol_addr + && ELF_ST_TYPE(symtab[symtab_index].st_info) != STT_TLS + && ELF_ST_BIND(symtab[symtab_index].st_info) != STB_WEAK) { + _dl_dprintf(2, "%s: can't resolve symbol '%s'\n", + _dl_progname, symname); + return 1; + }; + if (_dl_trace_prelink) { + _dl_debug_lookup(symname, tpnt, &symtab[symtab_index], + &sym_ref, elf_machine_type_class(reloc_type)); + } + tls_tpnt = sym_ref.tpnt; + } + +#if defined (__SUPPORT_LD_DEBUG__) + if (reloc_type != R_METAG_NONE) + old_val = __get_unaligned_reloc(reloc_addr); +#endif + +#if defined USE_TLS && USE_TLS + /* In case of a TLS reloc, tls_tpnt NULL means we have an 'anonymous' + symbol. This is the case for a static tls variable, so the lookup + module is just that one is referencing the tls variable. */ + if (!tls_tpnt) + tls_tpnt = tpnt; +#endif + switch (reloc_type) { + case R_METAG_NONE: + break; + case R_METAG_GLOB_DAT: + case R_METAG_JMP_SLOT: + case R_METAG_ADDR32: + __put_unaligned_reloc(reloc_addr, + symbol_addr + rpnt->r_addend); + break; + case R_METAG_COPY: +#if defined (__SUPPORT_LD_DEBUG__) + if (_dl_debug_move) + _dl_dprintf(_dl_debug_file, + "\t%s move %d bytes from %x to %x\n", + symname, symtab[symtab_index].st_size, + symbol_addr + rpnt->r_addend, + reloc_addr); +#endif + + _dl_memcpy((char *)reloc_addr, + (char *)symbol_addr + rpnt->r_addend, + symtab[symtab_index].st_size); + break; + case R_METAG_RELATIVE: + __put_unaligned_reloc(reloc_addr, + (unsigned long)tpnt->loadaddr + + rpnt->r_addend); + break; +#if defined USE_TLS && USE_TLS + case R_METAG_TLS_DTPMOD: + *reloc_addr = tls_tpnt->l_tls_modid; + break; + case R_METAG_TLS_DTPOFF: + *reloc_addr = symbol_addr; + break; + case R_METAG_TLS_TPOFF: + CHECK_STATIC_TLS ((struct link_map *) tls_tpnt); + *reloc_addr = tls_tpnt->l_tls_offset + symbol_addr + rpnt->r_addend; + break; +#endif + default: + return -1; /* Calls _dl_exit(1). */ + } + +#if defined (__SUPPORT_LD_DEBUG__) + if (_dl_debug_reloc && _dl_debug_detail && reloc_type != R_METAG_NONE) { + unsigned long new_val = __get_unaligned_reloc(reloc_addr); + _dl_dprintf(_dl_debug_file, "\tpatched: %x ==> %x @ %x\n", + old_val, new_val, reloc_addr); + } +#endif + + return 0; +} + +static int +_dl_do_lazy_reloc(struct elf_resolve *tpnt, struct r_scope_elem *scope, + ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab) +{ + int reloc_type; + unsigned long *reloc_addr; +#if defined (__SUPPORT_LD_DEBUG__) + unsigned long old_val; +#endif + + reloc_addr = (unsigned long *)(tpnt->loadaddr + rpnt->r_offset); + reloc_type = ELF_R_TYPE(rpnt->r_info); + +#if defined (__SUPPORT_LD_DEBUG__) + old_val = *reloc_addr; +#endif + + switch (reloc_type) { + case R_METAG_NONE: + break; + case R_METAG_JMP_SLOT: + *reloc_addr += (unsigned long)tpnt->loadaddr; + break; + default: + return -1; /* Calls _dl_exit(1). */ + } + +#if defined (__SUPPORT_LD_DEBUG__) + if (_dl_debug_reloc && _dl_debug_detail) + _dl_dprintf(_dl_debug_file, "\tpatched: %x ==> %x @ %x\n", + old_val, *reloc_addr, reloc_addr); +#endif + + return 0; +} + +/* External interface to the generic part of the dynamic linker. */ + +void +_dl_parse_lazy_relocation_information(struct dyn_elf *rpnt, + unsigned long rel_addr, + unsigned long rel_size) +{ + _dl_parse(rpnt->dyn, NULL, rel_addr, rel_size, _dl_do_lazy_reloc); +} + +int +_dl_parse_relocation_information(struct dyn_elf *rpnt, + struct r_scope_elem *scope, + unsigned long rel_addr, + unsigned long rel_size) +{ + return _dl_parse(rpnt->dyn, scope, rel_addr, + rel_size, _dl_do_reloc); +} diff --git a/ldso/ldso/metag/metag_load_tp.S b/ldso/ldso/metag/metag_load_tp.S new file mode 100644 index 000000000..2f00a9fef --- /dev/null +++ b/ldso/ldso/metag/metag_load_tp.S @@ -0,0 +1,20 @@ +! Copyright (C) 2013 Imagination Technologies Ltd. + +! Licensed under LGPL v2.1 or later, see the file COPYING.LIB in this tarball. + +#include <features.h> + +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ + +#include <sysdep.h> + + .text + .global ___metag_load_tp + .type ___metag_load_tp,@function + +___metag_load_tp: + MOVT D1Ar1,#HI(0x6ffff000) + JUMP D1Ar1,#LO(0x6ffff000) + .size ___metag_load_tp,.-___metag_load_tp + +#endif /* __UCLIBC_HAS_THREADS_NATIVE__ */ diff --git a/ldso/ldso/metag/resolve.S b/ldso/ldso/metag/resolve.S new file mode 100644 index 000000000..8f23a340a --- /dev/null +++ b/ldso/ldso/metag/resolve.S @@ -0,0 +1,51 @@ +/* + * Meta dynamic resolver + * + * Copyright (C) 2013 Imagination Technologies Ltd. + * + * Licensed under the LGPL v2.1 or later, see the file COPYING.LIB in this tarball. + * + * This function is _not_ called directly. It is jumped to (so no return + * address is on the stack) when attempting to use a symbol that has not yet + * been resolved. The first time a jump symbol (such as a function call inside + * a shared library) is used (before it gets resolved) it will jump here to + * _dl_linux_resolve. When we get called the stack looks like this: + * reloc_entry + * tpnt + * + * This function saves all the registers then makes the function call + * _dl_linux_resolver(tpnt, reloc_entry). _dl_linux_resolver() figures out + * where the jump symbol is _really_ supposed to have jumped to and returns + * that to us. Once we have that, we overwrite tpnt with this fixed up + * address. We then clean up after ourselves, put all the registers back how we + * found them, then we jump to the fixed up address, which is where the jump + * symbol that got us here really wanted to jump to in the first place. + * -Erik Andersen + */ + + .text + .global __dl_linux_resolve + .type __dl_linux_resolve,@function + +__dl_linux_resolve: + !! Save registers on the stack. Do we need to save any more here? + MSETL [A0StP++],D0Ar6,D0Ar4,D0Ar2,D0FrT + SETL [A0StP++],A0FrP,A1LbP + !! Get the args for _dl_linux_resolver off the stack + GETL D0Re0,D1Re0,[A0StP+#-(6*8)] + GETD D1Ar1,[D0Re0] + MOV D0Ar2,D1Re0 + !! Multiply plt_index by sizeof(Elf32_Rela) + MULW D0Ar2,D0Ar2,#12 + !! Call the resolver + CALLR D1RtP,__dl_linux_resolver + !! Restore the registers from the stack + SUB A0.2,A0StP,#(1*8) + GETL A0FrP,A1LbP,[A0.2] + SUB A0.2,A0.2,#(4*8) + MGETL D0Ar6,D0Ar4,D0Ar2,D0FrT,[A0.2] + !! Also take into account args pushed by PLT + SUB A0StP,A0StP,#(6*8) + !! Jump to the resolved address + MOV PC,D0Re0 + .size __dl_linux_resolve, .-__dl_linux_resolve diff --git a/ldso/ldso/microblaze/dl-debug.h b/ldso/ldso/microblaze/dl-debug.h new file mode 100644 index 000000000..6fd7bd59f --- /dev/null +++ b/ldso/ldso/microblaze/dl-debug.h @@ -0,0 +1,54 @@ +/* vi: set sw=4 ts=4: */ +/* microblaze shared library loader suppport + * + * Copyright (C) 2011 Ryan Flux + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. The name of the above contributors may not be + * used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +static const char * const _dl_reltypes_tab[] = + { + "R_MICROBLAZE_NONE", + "R_MICROBLAZE_32", + "R_MICROBLAZE_32_PCREL", + "R_MICROBLAZE_64_PCREL", + "R_MICROBLAZE_32_PCREL_LO", + "R_MICROBLAZE_64", + "R_MICROBLAZE_32_LO", + "R_MICROBLAZE_SRO32", + "R_MICROBLAZE_SRW32", + "R_MICROBLAZE_64_NONE", + "R_MICROBLAZE_32_SYM_OP_SYM", + "R_MICROBLAZE_GNU_VTINHERIT", + "R_MICROBLAZE_GNU_VTENTRY", + "R_MICROBLAZE_GOTPC_64", + "R_MICROBLAZE_GOT_64", + "R_MICROBLAZE_PLT_64", + "R_MICROBLAZE_REL", + "R_MICROBLAZE_JUMP_SLOT", + "R_MICROBLAZE_GLOB_DAT", + "R_MICROBLAZE_GOTOFF_64", + "R_MICROBLAZE_GOTOFF_32", + "R_MICROBLAZE_COPY", + }; diff --git a/ldso/ldso/microblaze/dl-startup.h b/ldso/ldso/microblaze/dl-startup.h new file mode 100644 index 000000000..ba15a87c3 --- /dev/null +++ b/ldso/ldso/microblaze/dl-startup.h @@ -0,0 +1,102 @@ +/* Startup code for the microblaze platform, based on glibc 2.3.6, dl-machine.h */ + +/* + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ + +__asm__ ("\ + .text\n\ + .globl _start\n\ + .type _start,@function\n\ + .hidden _start\n\ +_start:\n\ + addk r5,r0,r1\n\ + addk r3,r0,r0\n\ +1:\n\ + addik r5,r5,4\n\ + lw r4,r5,r0\n\ + bneid r4,1b\n\ + addik r3,r3,1\n\ + addik r3,r3,-1\n\ + addk r5,r0,r1\n\ + sw r3,r5,r0\n\ + addik r1,r1,-24\n\ + sw r15,r1,r0\n\ + brlid r15,_dl_start\n\ + nop\n\ + /* FALLTHRU */\n\ +\n\ + .globl _dl_start_user\n\ + .type _dl_start_user,@function\n\ +_dl_start_user:\n\ + mfs r20,rpc\n\ + addik r20,r20,_GLOBAL_OFFSET_TABLE_+8\n\ + lwi r4,r20,_dl_skip_args@GOTOFF\n\ + lwi r5,r1,24\n\ + rsubk r5,r4,r5\n\ + addk r4,r4,r4\n\ + addk r4,r4,r4\n\ + addk r1,r1,r4\n\ + swi r5,r1,24\n\ + swi r3,r1,20\n\ + addk r6,r5,r0\n\ + addk r5,r5,r5\n\ + addk r5,r5,r5\n\ + addik r7,r1,28\n\ + addk r8,r7,r5\n\ + addik r8,r8,4\n\ + lwi r5,r1,24\n\ + lwi r3,r1,20\n\ + addk r4,r5,r5\n\ + addk r4,r4,r4\n\ + addik r6,r1,28\n\ + addk r7,r6,r4\n\ + addik r7,r7,4\n\ + addik r15,r20,_dl_fini@GOTOFF\n\ + addik r15,r15,-8\n\ + brad r3\n\ + addik r1,r1,24\n\ + nop\n\ + .size _dl_start_user, . - _dl_start_user\n\ + .previous"); + +/* + * Get a pointer to the argv array. On many platforms this can be just + * the address of the first argument, on other platforms we need to + * do something a little more subtle here. + */ +#define GET_ARGV(ARGVP, ARGS) ARGVP = (((unsigned long*) ARGS)+1) + +/* The ld.so library requires relocations */ +#define ARCH_NEEDS_BOOTSTRAP_RELOCS + +static __always_inline +void PERFORM_BOOTSTRAP_RELOC(ELF_RELOC *rpnt, unsigned long *reloc_addr, + unsigned long symbol_addr, unsigned long load_addr, attribute_unused Elf32_Sym *symtab) +{ + + switch (ELF_R_TYPE(rpnt->r_info)) + { + case R_MICROBLAZE_REL: + + *reloc_addr = load_addr + rpnt->r_addend; + break; + + default: + _dl_exit(1); + break; + + } + +} diff --git a/ldso/ldso/microblaze/dl-syscalls.h b/ldso/ldso/microblaze/dl-syscalls.h new file mode 100644 index 000000000..f40c4fd31 --- /dev/null +++ b/ldso/ldso/microblaze/dl-syscalls.h @@ -0,0 +1 @@ +/* stub for arch-specific syscall issues */ diff --git a/ldso/ldso/microblaze/dl-sysdep.h b/ldso/ldso/microblaze/dl-sysdep.h new file mode 100644 index 000000000..2b5521887 --- /dev/null +++ b/ldso/ldso/microblaze/dl-sysdep.h @@ -0,0 +1,84 @@ +/* elf reloc code for the microblaze platform, based on glibc 2.3.6, dl-machine.h */ + +/* + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ + +/* Use reloca */ +#define ELF_USES_RELOCA + +#include <elf.h> + + +/* Initialise the GOT */ +#define INIT_GOT(GOT_BASE,MODULE) \ +do { \ + GOT_BASE[2] = (unsigned long) _dl_linux_resolve; \ + GOT_BASE[1] = (unsigned long) MODULE; \ +} while(0) + +/* Here we define the magic numbers that this dynamic loader should accept */ + +#define MAGIC1 EM_MICROBLAZE_OLD +#undef MAGIC2 +/* Used for error messages */ +#define ELF_TARGET "microblaze" + +#define elf_machine_type_class(type) \ + (((type) == R_MICROBLAZE_JUMP_SLOT) * ELF_RTYPE_CLASS_PLT \ + | ((type) == R_MICROBLAZE_COPY) * ELF_RTYPE_CLASS_COPY) + +/* Return the link-time address of _DYNAMIC. Conveniently, this is the + first element of the GOT. This must be inlined in a function which + uses global data. */ +static inline Elf32_Addr +elf_machine_dynamic (void) +{ + Elf32_Addr got_entry_0; + __asm__ __volatile__( + "lwi %0,r20,0" + :"=r"(got_entry_0) + ); + return got_entry_0; +} + + +/* Return the run-time load address of the shared object. */ +static inline Elf32_Addr +elf_machine_load_address (void) +{ + /* Compute the difference between the runtime address of _DYNAMIC as seen + by a GOTOFF reference, and the link-time address found in the special + unrelocated first GOT entry. */ + Elf32_Addr dyn; + __asm__ __volatile__ ( + "addik %0,r20,_DYNAMIC@GOTOFF" + : "=r"(dyn) + ); + return dyn - elf_machine_dynamic (); +} + + + +static __always_inline void +elf_machine_relative (Elf32_Addr load_off, const Elf32_Addr rel_addr, + Elf32_Word relative_count) +{ + Elf32_Rel * rpnt = (void *) rel_addr; + do { + Elf32_Addr *const reloc_addr = (void *) (load_off + (rpnt)->r_offset); + + *reloc_addr += load_off; + } while (--relative_count); +} diff --git a/ldso/ldso/microblaze/elfinterp.c b/ldso/ldso/microblaze/elfinterp.c new file mode 100644 index 000000000..1f6aeffb7 --- /dev/null +++ b/ldso/ldso/microblaze/elfinterp.c @@ -0,0 +1,330 @@ +/* vi: set sw=4 ts=4: */ +/* microblaze ELF shared library loader suppport + * + * Copyright (c) 1994-2000 Eric Youngdale, Peter MacDonald, + * David Engel, Hongjiu Lu and Mitch D'Souza + * Copyright (C) 2001-2004 Erik Andersen + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. The name of the above contributors may not be + * used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include "ldso.h" + +/* Program to load an ELF binary on a linux system, and run it. + References to symbols in sharable libraries can be resolved by either + an ELF sharable library or a linux style of shared library. */ + +/* Disclaimer: I have never seen any AT&T source code for SVr4, nor have + I ever taken any courses on internals. This program was developed using + information available through the book "UNIX SYSTEM V RELEASE 4, + Programmers guide: Ansi C and Programming Support Tools", which did + a more than adequate job of explaining everything required to get this + working. */ + +extern int _dl_linux_resolve(void); + +unsigned long +_dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry) +{ + ELF_RELOC *this_reloc; + char *strtab; + ElfW(Sym) *symtab; + int symtab_index; + char *rel_addr; + char *new_addr; + char **got_addr; + ElfW(Addr) instr_addr; + char *symname; + + rel_addr = (char *)tpnt->dynamic_info[DT_JMPREL]; + this_reloc = (ELF_RELOC *)(rel_addr + reloc_entry); + symtab_index = ELF_R_SYM(this_reloc->r_info); + + symtab = (ElfW(Sym) *)tpnt->dynamic_info[DT_SYMTAB]; + strtab = (char *)tpnt->dynamic_info[DT_STRTAB]; + symname = strtab + symtab[symtab_index].st_name; + + /* Address of the jump instruction to fix up. */ + instr_addr = (this_reloc->r_offset + tpnt->loadaddr); + got_addr = (char **)instr_addr; + + /* Get the address of the GOT entry. */ + new_addr = _dl_find_hash(symname, &_dl_loaded_modules->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT, NULL); + if (unlikely(!new_addr)) { + _dl_dprintf(2, "%s: Can't resolve symbol '%s'\n", _dl_progname, symname); + _dl_exit(1); + } + +#if defined (__SUPPORT_LD_DEBUG__) + if ((unsigned long)got_addr < 0x40000000) { + if (_dl_debug_bindings) { + _dl_dprintf(_dl_debug_file, "\nresolve function: %s", symname); + if (_dl_debug_detail) + _dl_dprintf(_dl_debug_file, + "\tpatched: %x ==> %x @ %x\n", + *got_addr, new_addr, got_addr); + } + } + if (!_dl_debug_nofixups) +#endif + *got_addr = new_addr; + + return (unsigned long)new_addr; +} + +static int +_dl_parse(struct elf_resolve *tpnt, struct r_scope_elem *scope, + unsigned long rel_addr, unsigned long rel_size, + int (*reloc_fnc)(struct elf_resolve *tpnt, struct r_scope_elem *scope, + ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab)) +{ + unsigned int i; + char *strtab; + ElfW(Sym) *symtab; + ELF_RELOC *rpnt; + int symtab_index; + + /* Parse the relocation information. */ + rpnt = (ELF_RELOC *)rel_addr; + rel_size /= sizeof(ELF_RELOC); + + symtab = (ElfW(Sym) *)tpnt->dynamic_info[DT_SYMTAB]; + strtab = (char *)tpnt->dynamic_info[DT_STRTAB]; + + for (i = 0; i < rel_size; i++, rpnt++) { + int res; + + symtab_index = ELF_R_SYM(rpnt->r_info); + + debug_sym(symtab, strtab, symtab_index); + debug_reloc(symtab, strtab, rpnt); + + res = reloc_fnc(tpnt, scope, rpnt, symtab, strtab); + + if (res == 0) + continue; + + _dl_dprintf(2, "\n%s: ", _dl_progname); + + if (symtab_index) + _dl_dprintf(2, "symbol '%s': ", + strtab + symtab[symtab_index].st_name); + + if (unlikely(res < 0)) { + int reloc_type = ELF_R_TYPE(rpnt->r_info); + + _dl_dprintf(2, "can't handle reloc type " +#if defined (__SUPPORT_LD_DEBUG__) + "%s\n", _dl_reltypes(reloc_type)); +#else + "%x\n", reloc_type); +#endif + _dl_exit(-res); + } else if (unlikely(res > 0)) { + _dl_dprintf(2, "can't resolve symbol\n"); + return res; + } + } + + return 0; +} + +static int +_dl_do_reloc(struct elf_resolve *tpnt, struct r_scope_elem *scope, + ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab) +{ + int reloc_type; + int symtab_index; + char *symname; +#if defined USE_TLS && USE_TLS + struct elf_resolve *tls_tpnt; +#endif + struct symbol_ref sym_ref; + ElfW(Addr) *reloc_addr; + ElfW(Addr) symbol_addr; +#if defined (__SUPPORT_LD_DEBUG__) + ElfW(Addr) old_val; +#endif + + reloc_addr = (ElfW(Addr)*)(tpnt->loadaddr + (unsigned long)rpnt->r_offset); + reloc_type = ELF_R_TYPE(rpnt->r_info); + symtab_index = ELF_R_SYM(rpnt->r_info); + sym_ref.sym = &symtab[symtab_index]; + sym_ref.tpnt = NULL; + symbol_addr = 0; + symname = strtab + sym_ref.sym->st_name; + + if (symtab_index) { + symbol_addr = (ElfW(Addr))_dl_find_hash(symname, scope, tpnt, + elf_machine_type_class(reloc_type), &sym_ref); + /* + * We want to allow undefined references to weak symbols - this + * might have been intentional. We should not be linking local + * symbols here, so all bases should be covered. + */ + if (unlikely(!symbol_addr && (ELF_ST_TYPE(sym_ref.sym->st_info) != STT_TLS) + && (ELF_ST_BIND(sym_ref.sym->st_info) != STB_WEAK))) { + /* This may be non-fatal if called from dlopen. */ + return 1; + } +#if defined USE_TLS && USE_TLS + tls_tpnt = sym_ref.tpnt; +#endif + } else { + /* Relocs against STN_UNDEF are usually treated as using a + * symbol value of zero, and using the module containing the + * reloc itself. */ + symbol_addr = sym_ref.sym->st_value; +#if defined USE_TLS && USE_TLS + tls_tpnt = tpnt; +#endif + } + + +#if defined (__SUPPORT_LD_DEBUG__) + if (reloc_addr) { + old_val = *reloc_addr; + } else { + old_val = 0; + } +#endif + + switch (reloc_type) { + case R_MICROBLAZE_NONE: + case R_MICROBLAZE_64_NONE: + break; + + case R_MICROBLAZE_64: + *reloc_addr = symbol_addr + rpnt->r_addend; + break; + + case R_MICROBLAZE_32: + case R_MICROBLAZE_32_LO: + *reloc_addr = symbol_addr + rpnt->r_addend; + break; + + case R_MICROBLAZE_32_PCREL: + case R_MICROBLAZE_32_PCREL_LO: + case R_MICROBLAZE_64_PCREL: + case R_MICROBLAZE_SRO32: + case R_MICROBLAZE_SRW32: + *reloc_addr = symbol_addr + rpnt->r_addend; + break; + + case R_MICROBLAZE_GLOB_DAT: + case R_MICROBLAZE_JUMP_SLOT: + *reloc_addr = symbol_addr + rpnt->r_addend; + break; +/* Handled by elf_machine_relative */ + case R_MICROBLAZE_REL: + *reloc_addr = (unsigned long)tpnt->loadaddr + rpnt->r_addend; + break; + + case R_MICROBLAZE_COPY: + if (symbol_addr) { +#if defined (__SUPPORT_LD_DEBUG__) + if (_dl_debug_move) + _dl_dprintf(_dl_debug_file, + "\t%s move %d bytes from %x to %x\n", + symname, sym_ref.sym->st_size, + symbol_addr, reloc_addr); +#endif + + _dl_memcpy((char *)reloc_addr, + (char *)symbol_addr, + sym_ref.sym->st_size); + } +#if defined (__SUPPORT_LD_DEBUG__) + else + _dl_dprintf(_dl_debug_file, "no symbol_addr to copy !?\n"); +#endif + break; + + default: + return -1; /* Calls _dl_exit(1). */ + } + +#if defined (__SUPPORT_LD_DEBUG__) + if (_dl_debug_reloc && _dl_debug_detail) + _dl_dprintf(_dl_debug_file, "\tpatched: %x ==> %x @ %x\n", + old_val, *reloc_addr, reloc_addr); +#endif + + return 0; +} + +static int +_dl_do_lazy_reloc(struct elf_resolve *tpnt, struct r_scope_elem *scope, + ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab) +{ + int reloc_type; + int symtab_index; + ElfW(Addr) *reloc_addr; +#if defined (__SUPPORT_LD_DEBUG__) + ElfW(Addr) old_val; +#endif + + (void)scope; + symtab_index = ELF_R_SYM(rpnt->r_info); + (void)strtab; + + reloc_addr = (ElfW(Addr)*)(tpnt->loadaddr + rpnt->r_offset); + reloc_type = ELF_R_TYPE(rpnt->r_info); + +#if defined (__SUPPORT_LD_DEBUG__) + old_val = *reloc_addr; +#endif + + switch (reloc_type) { + case R_MICROBLAZE_NONE: + break; + case R_MICROBLAZE_JUMP_SLOT: + *reloc_addr += (unsigned long)tpnt->loadaddr; + break; + default: + _dl_exit(1); + } + +#if defined (__SUPPORT_LD_DEBUG__) + if (_dl_debug_reloc && _dl_debug_detail) + _dl_dprintf(_dl_debug_file, "\tpatched_lazy: %x ==> %x @ %x\n", + old_val, *reloc_addr, reloc_addr); +#endif + + return 0; +} + +void +_dl_parse_lazy_relocation_information(struct dyn_elf *rpnt, + unsigned long rel_addr, unsigned long rel_size) +{ + (void)_dl_parse(rpnt->dyn, NULL, rel_addr, rel_size, _dl_do_lazy_reloc); +} + +int +_dl_parse_relocation_information(struct dyn_elf *rpnt, + struct r_scope_elem *scope, unsigned long rel_addr, unsigned long rel_size) +{ + return _dl_parse(rpnt->dyn, scope, rel_addr, rel_size, _dl_do_reloc); +} diff --git a/ldso/ldso/microblaze/resolve.S b/ldso/ldso/microblaze/resolve.S new file mode 100644 index 000000000..67164d27c --- /dev/null +++ b/ldso/ldso/microblaze/resolve.S @@ -0,0 +1,51 @@ + +/* This code is used in dl-runtime.c to call the `fixup' function + and then redirect to the address it returns. */ +/* We assume that R3 contain relocation offset and R4 contains + link_map (_DYNAMIC). This must be consistent with the JUMP_SLOT + layout generated by binutils. */ + +/* Based on glibc 2.3.6, dl-machine.h */ +/* + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ + +.text +.align 4 +.globl _dl_linux_resolver +.globl _dl_linux_resolve +.type _dl_linux_resolve,@function + +_dl_linux_resolve: + addik r1,r1,-40 + swi r5,r1,12 + swi r6,r1,16 + swi r7,r1,20 + swi r8,r1,24 + swi r9,r1,28 + swi r10,r1,32 + swi r15,r1,0 + addk r5,r0,r4 + brlid r15, _dl_linux_resolver + addk r6,r0,r3; /* delay slot */ + lwi r10,r1,32 + lwi r9,r1,28 + lwi r8,r1,24 + lwi r7,r1,20 + lwi r6,r1,16 + lwi r5,r1,12 + lwi r15,r1,0 + brad r3 + addik r1,r1,40; /* delay slot */ + .size _dl_linux_resolve, . - _dl_linux_resolve diff --git a/ldso/ldso/mips/README b/ldso/ldso/mips/README index 9ca6a869b..c47109d3d 100644 --- a/ldso/ldso/mips/README +++ b/ldso/ldso/mips/README @@ -13,7 +13,7 @@ The code is taken from the function 'RTLD_START' in the file elfinterp.c ----------- Contains the runtime resolver code taken from the function -'__dl_runtime_resolve' in 'sysdeps/mips/dl-machine.h'. Also +'__dl_runtime_resolve' in 'sysdeps/mips/dl-trampoline.h'. Also contains the function to perform relocations for objects other than the linker itself. The code was taken from the function 'elf_machine_rel' in 'sysdeps/mips/dl-machine.h'. @@ -47,6 +47,6 @@ resolve.S --------- Contains the low-level assembly code for the dynamic runtime resolver. The code is taken from the assembly code function -'_dl_runtime_resolve' in the file 'sysdeps/mips/dl-machine.h'. +'_dl_runtime_resolve' in the file 'sysdeps/mips/dl-trampoline.h'. The code looks a bit different since we only need to pass the symbol index and the old GP register. diff --git a/ldso/ldso/mips/dl-debug.h b/ldso/ldso/mips/dl-debug.h index 07a2addfa..e71aaf739 100644 --- a/ldso/ldso/mips/dl-debug.h +++ b/ldso/ldso/mips/dl-debug.h @@ -27,7 +27,7 @@ * SUCH DAMAGE. */ -static const char *_dl_reltypes_tab[] = +static const char * const _dl_reltypes_tab[] = { [0] "R_MIPS_NONE", "R_MIPS_16", "R_MIPS_32", [3] "R_MIPS_REL32", "R_MIPS_26", "R_MIPS_HI16", @@ -40,6 +40,15 @@ static const char *_dl_reltypes_tab[] = [25] "R_MIPS_INSERT_A", "R_MIPS_INSERT_B", "R_MIPS_DELETE", [28] "R_MIPS_HIGHER", "R_MIPS_HIGHEST", "R_MIPS_CALL_HI16", [31] "R_MIPS_CALL_LO16", "R_MIPS_SCN_DISP", "R_MIPS_REL16", - [34] "R_MIPS_ADD_IMMEDIATE", "R_MIPS_PJUMP", "R_MIPS_RELGOT", - [37] "R_MIPS_JALR", + [34] "R_MIPS_ADD_IMMEDIATE", "R_MIPS_PJUMP", "R_MIPS_RELGOT", + [37] "R_MIPS_JALR", "R_MIPS_TLS_DTPMOD32", "R_MIPS_TLS_DTPREL32", + [40] "R_MIPS_TLS_DTPMOD64", "R_MIPS_TLS_DTPREL64", "R_MIPS_TLS_GD", + [43] "R_MIPS_TLS_LDM", "R_MIPS_TLS_DTPREL_HI16", + [45] "R_MIPS_TLS_DTPREL_LO16", + [46] "R_MIPS_TLS_GOTTPREL", "R_MIPS_TLS_TPREL32", "R_MIPS_TLS_TPREL64", + [49] "R_MIPS_TLS_TPREL_HI16", + [50] "R_MIPS_TLS_TPREL_LO16", + [51] "R_MIPS_GLOB_DAT", + [126] "R_MIPS_COPY", "R_MIPS_JUMP_SLOT", + }; diff --git a/ldso/ldso/mips/dl-startup.h b/ldso/ldso/mips/dl-startup.h index 606b162a3..0cab7be32 100644 --- a/ldso/ldso/mips/dl-startup.h +++ b/ldso/ldso/mips/dl-startup.h @@ -12,6 +12,7 @@ __asm__("" " .globl _start\n" " .ent _start\n" " .type _start,@function\n" + " .hidden _start\n" "_start:\n" " .set noreorder\n" " move $25, $31\n" @@ -36,6 +37,7 @@ __asm__("" #if _MIPS_SIM == _MIPS_SIM_ABI32 " subu $29, 16\n" #endif +# if !defined __mips_isa_rev || __mips_isa_rev < 6 #if _MIPS_SIM == _MIPS_SIM_ABI64 " dla $8, .coff\n" #else /* O32 || N32 */ @@ -43,6 +45,10 @@ __asm__("" #endif /* O32 || N32 */ " bltzal $8, .coff\n" ".coff:\n" +# else + ".coff:\n" + " lapc $31, .coff\n" +# endif #if _MIPS_SIM == _MIPS_SIM_ABI64 " dsubu $8, $31, $8\n" " dla $25, _dl_start\n" @@ -111,7 +117,7 @@ __asm__("" /* * Get a pointer to the argv array. On many platforms this can be just - * the address if the first argument, on other platforms we need to + * the address of the first argument, on other platforms we need to * do something a little more subtle here. */ #define GET_ARGV(ARGVP, ARGS) ARGVP = (((unsigned long *) ARGS)+1) diff --git a/ldso/ldso/mips/dl-syscalls.h b/ldso/ldso/mips/dl-syscalls.h index 996bb87c6..f40c4fd31 100644 --- a/ldso/ldso/mips/dl-syscalls.h +++ b/ldso/ldso/mips/dl-syscalls.h @@ -1,6 +1 @@ -/* We can't use the real errno in ldso, since it has not yet - * been dynamicly linked in yet. */ -#include "sys/syscall.h" -extern int _dl_errno; -#undef __set_errno -#define __set_errno(X) {(_dl_errno) = (X);} +/* stub for arch-specific syscall issues */ diff --git a/ldso/ldso/mips/dl-sysdep.h b/ldso/ldso/mips/dl-sysdep.h index 312b9e858..0122199e4 100644 --- a/ldso/ldso/mips/dl-sysdep.h +++ b/ldso/ldso/mips/dl-sysdep.h @@ -93,10 +93,11 @@ typedef struct #include <link.h> -#define ARCH_NUM 3 +#define ARCH_NUM 4 #define DT_MIPS_GOTSYM_IDX (DT_NUM + OS_NUM) #define DT_MIPS_LOCAL_GOTNO_IDX (DT_NUM + OS_NUM +1) #define DT_MIPS_SYMTABNO_IDX (DT_NUM + OS_NUM +2) +#define DT_MIPS_PLTGOT_IDX (DT_NUM + OS_NUM +3) #define ARCH_DYNAMIC_INFO(dpnt, dynamic, debug_addr) \ do { \ @@ -106,14 +107,20 @@ else if (dpnt->d_tag == DT_MIPS_LOCAL_GOTNO) \ dynamic[DT_MIPS_LOCAL_GOTNO_IDX] = dpnt->d_un.d_val; \ else if (dpnt->d_tag == DT_MIPS_SYMTABNO) \ dynamic[DT_MIPS_SYMTABNO_IDX] = dpnt->d_un.d_val; \ -else if (dpnt->d_tag == DT_MIPS_RLD_MAP) \ +else if (dpnt->d_tag == DT_MIPS_PLTGOT) \ + dynamic[DT_MIPS_PLTGOT_IDX] = dpnt->d_un.d_val; \ +else if ((dpnt->d_tag == DT_MIPS_RLD_MAP) && (dpnt->d_un.d_ptr)) \ *(ElfW(Addr) *)(dpnt->d_un.d_ptr) = (ElfW(Addr)) debug_addr; \ } while (0) +#define ARCH_SKIP_RELOC(type_class, sym) \ + ((sym)->st_shndx == SHN_UNDEF && !((sym)->st_other & STO_MIPS_PLT)) + /* Initialization sequence for the application/library GOT. */ #define INIT_GOT(GOT_BASE,MODULE) \ do { \ unsigned long idx; \ + unsigned long *pltgot; \ \ /* Check if this is the dynamic linker itself */ \ if (MODULE->libtype == program_interpreter) \ @@ -123,6 +130,12 @@ do { \ GOT_BASE[0] = (unsigned long) _dl_runtime_resolve; \ GOT_BASE[1] = (unsigned long) MODULE; \ \ + pltgot = (unsigned long *) MODULE->dynamic_info[DT_MIPS_PLTGOT_IDX]; \ + if (pltgot) { \ + pltgot[0] = (unsigned long) _dl_runtime_pltresolve; \ + pltgot[1] = (unsigned long) MODULE; \ + } \ + \ /* Add load address displacement to all local GOT entries */ \ idx = 2; \ while (idx < MODULE->dynamic_info[DT_MIPS_LOCAL_GOTNO_IDX]) \ @@ -139,31 +152,41 @@ do { \ /* Used for error messages */ #define ELF_TARGET "MIPS" +/* Need bootstrap relocations */ +#define ARCH_NEEDS_BOOTSTRAP_RELOCS unsigned long __dl_runtime_resolve(unsigned long sym_index, unsigned long old_gpreg); struct elf_resolve; +unsigned long __dl_runtime_pltresolve(struct elf_resolve *tpnt, + int reloc_entry); + void _dl_perform_mips_global_got_relocations(struct elf_resolve *tpnt, int lazy); -/* 4096 bytes alignment */ -#if _MIPS_SIM == _MIPS_SIM_ABI64 -#define PAGE_ALIGN (~0xfffUL) -#define ADDR_ALIGN 0xfffUL -#define OFFS_ALIGN (0x10000000000UL-0x1000) -#else /* O32 || N32 */ -#define PAGE_ALIGN 0xfffff000 -#define ADDR_ALIGN 0xfff -#define OFFS_ALIGN 0x7ffff000 -#endif /* O32 || N32 */ - -#define elf_machine_type_class(type) ELF_RTYPE_CLASS_PLT -/* MIPS does not have COPY relocs */ -#define DL_NO_COPY_RELOCS +#if defined USE_TLS +# if _MIPS_SIM == _MIPS_SIM_ABI64 +# define elf_machine_type_class(type) \ + ((((type) == R_MIPS_JUMP_SLOT || (type) == R_MIPS_TLS_DTPMOD64 \ + || (type) == R_MIPS_TLS_DTPREL64 || (type) == R_MIPS_TLS_TPREL64) \ + * ELF_RTYPE_CLASS_PLT) \ + | (((type) == R_MIPS_COPY) * ELF_RTYPE_CLASS_COPY)) +# else +# define elf_machine_type_class(type) \ + ((((type) == R_MIPS_JUMP_SLOT || (type) == R_MIPS_TLS_DTPMOD32 \ + || (type) == R_MIPS_TLS_DTPREL32 || (type) == R_MIPS_TLS_TPREL32) \ + * ELF_RTYPE_CLASS_PLT) \ + | (((type) == R_MIPS_COPY) * ELF_RTYPE_CLASS_COPY)) +# endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ +#else +#define elf_machine_type_class(type) \ + ((((type) == R_MIPS_JUMP_SLOT) * ELF_RTYPE_CLASS_PLT) \ + | (((type) == R_MIPS_COPY) * ELF_RTYPE_CLASS_COPY)) +#endif /* USE_TLS */ #define OFFSET_GP_GOT 0x7ff0 -static __inline__ ElfW(Addr) * +static __always_inline ElfW(Addr) * elf_mips_got_from_gpreg (ElfW(Addr) gpreg) { /* FIXME: the offset of gp from GOT may be system-dependent. */ @@ -173,7 +196,7 @@ elf_mips_got_from_gpreg (ElfW(Addr) gpreg) /* Return the link-time address of _DYNAMIC. Conveniently, this is the first element of the GOT. This must be inlined in a function which uses global data. We assume its $gp points to the primary GOT. */ -static __inline__ ElfW(Addr) +static __always_inline ElfW(Addr) elf_machine_dynamic (void) { register ElfW(Addr) gp __asm__ ("$28"); @@ -192,15 +215,20 @@ elf_machine_dynamic (void) #endif /* Return the run-time load address of the shared object. */ -static __inline__ ElfW(Addr) +static __always_inline ElfW(Addr) elf_machine_load_address (void) { ElfW(Addr) addr; __asm__ (" .set noreorder\n" +# if !defined __mips_isa_rev || __mips_isa_rev < 6 " " STRINGXP (PTR_LA) " %0, 0f\n" " bltzal $0, 0f\n" " nop\n" "0: " STRINGXP (PTR_SUBU) " %0, $31, %0\n" +#else + "0: lapc $31, 0\n" + " " STRINGXP (PTR_SUBU) " %0, $31, %0\n" +#endif " .set reorder\n" : "=r" (addr) : /* No inputs */ @@ -208,7 +236,7 @@ elf_machine_load_address (void) return addr; } -static __inline__ void +static __always_inline void elf_machine_relative (ElfW(Addr) load_off, const ElfW(Addr) rel_addr, ElfW(Word) relative_count) { diff --git a/ldso/ldso/mips/elfinterp.c b/ldso/ldso/mips/elfinterp.c index 1b03d9412..6310c7735 100644 --- a/ldso/ldso/mips/elfinterp.c +++ b/ldso/ldso/mips/elfinterp.c @@ -30,6 +30,7 @@ #include "ldso.h" extern int _dl_runtime_resolve(void); +extern int _dl_runtime_pltresolve(void); #define OFFSET_GP_GOT 0x7ff0 @@ -55,7 +56,7 @@ unsigned long __dl_runtime_resolve(unsigned long sym_index, symname = strtab + sym->st_name; new_addr = (unsigned long) _dl_find_hash(symname, - tpnt->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT); + &_dl_loaded_modules->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT, NULL); if (unlikely(!new_addr)) { _dl_dprintf (2, "%s: can't resolve symbol '%s'\n", _dl_progname, symname); @@ -83,6 +84,59 @@ unsigned long __dl_runtime_resolve(unsigned long sym_index, return new_addr; } +unsigned long +__dl_runtime_pltresolve(struct elf_resolve *tpnt, int reloc_entry) +{ + ELF_RELOC *this_reloc; + char *strtab; + ElfW(Sym) *symtab; + int symtab_index; + char *rel_addr; + char *new_addr; + char **got_addr; + unsigned long instr_addr; + char *symname; + + rel_addr = (char *)tpnt->dynamic_info[DT_JMPREL]; + this_reloc = (ELF_RELOC *)(intptr_t)(rel_addr + reloc_entry); + symtab_index = ELF_R_SYM(this_reloc->r_info); + + symtab = (ElfW(Sym) *)(intptr_t)tpnt->dynamic_info[DT_SYMTAB]; + strtab = (char *)tpnt->dynamic_info[DT_STRTAB]; + symname = strtab + symtab[symtab_index].st_name; + + /* Address of the jump instruction to fix up. */ + instr_addr = ((unsigned long)this_reloc->r_offset + + (unsigned long)tpnt->loadaddr); + got_addr = (char **)instr_addr; + + /* Get the address of the GOT entry. */ + new_addr = _dl_find_hash(symname, &_dl_loaded_modules->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT, NULL); + if (unlikely(!new_addr)) { + _dl_dprintf(2, "%s: can't resolve symbol '%s' in lib '%s'.\n", _dl_progname, symname, tpnt->libname); + _dl_exit(1); + } + +#if defined (__SUPPORT_LD_DEBUG__) + if ((unsigned long)got_addr < 0x40000000) { + if (_dl_debug_bindings) { + _dl_dprintf(_dl_debug_file, "\nresolve function: %s", symname); + if (_dl_debug_detail) + _dl_dprintf(_dl_debug_file, + "\n\tpatched: %x ==> %x @ %x", + *got_addr, new_addr, got_addr); + } + } + if (!_dl_debug_nofixups) { + *got_addr = new_addr; + } +#else + *got_addr = new_addr; +#endif + + return (unsigned long)new_addr; +} + void _dl_parse_lazy_relocation_information(struct dyn_elf *rpnt, unsigned long rel_addr, unsigned long rel_size) { @@ -91,10 +145,10 @@ void _dl_parse_lazy_relocation_information(struct dyn_elf *rpnt, } int _dl_parse_relocation_information(struct dyn_elf *xpnt, - unsigned long rel_addr, unsigned long rel_size) + struct r_scope_elem *scope, unsigned long rel_addr, unsigned long rel_size) { ElfW(Sym) *symtab; - ElfW(Rel) *rpnt; + ELF_RELOC *rpnt; char *strtab; unsigned long i; unsigned long *got; @@ -102,18 +156,21 @@ int _dl_parse_relocation_information(struct dyn_elf *xpnt, unsigned long symbol_addr; int reloc_type, symtab_index; struct elf_resolve *tpnt = xpnt->dyn; + char *symname = NULL; #if defined (__SUPPORT_LD_DEBUG__) unsigned long old_val=0; #endif + struct symbol_ref sym_ref; /* Now parse the relocation information */ rel_size = rel_size / sizeof(ElfW(Rel)); - rpnt = (ElfW(Rel) *) rel_addr; + rpnt = (ELF_RELOC *) rel_addr; symtab = (ElfW(Sym) *) tpnt->dynamic_info[DT_SYMTAB]; strtab = (char *) tpnt->dynamic_info[DT_STRTAB]; got = (unsigned long *) tpnt->dynamic_info[DT_PLTGOT]; + for (i = 0; i < rel_size; i++, rpnt++) { reloc_addr = (unsigned long *) (tpnt->loadaddr + (unsigned long) rpnt->r_offset); @@ -123,12 +180,85 @@ int _dl_parse_relocation_information(struct dyn_elf *xpnt, debug_sym(symtab,strtab,symtab_index); debug_reloc(symtab,strtab,rpnt); + symname = strtab + symtab[symtab_index].st_name; #if defined (__SUPPORT_LD_DEBUG__) if (reloc_addr) old_val = *reloc_addr; #endif + if (reloc_type == R_MIPS_JUMP_SLOT || reloc_type == R_MIPS_COPY) { + sym_ref.tpnt = NULL; + sym_ref.sym = &symtab[symtab_index]; + symbol_addr = (unsigned long)_dl_find_hash(symname, + scope, + tpnt, + elf_machine_type_class(reloc_type), &sym_ref); + if (unlikely(!symbol_addr && ELF_ST_BIND(symtab[symtab_index].st_info) != STB_WEAK)) + return 1; + if (_dl_trace_prelink) { + _dl_debug_lookup (symname, tpnt, &symtab[symtab_index], + &sym_ref, elf_machine_type_class(reloc_type)); + } + } + if (!symtab_index) { + /* Relocs against STN_UNDEF are usually treated as using a + * symbol value of zero, and using the module containing the + * reloc itself. + */ + symbol_addr = symtab[symtab_index].st_value; + } + switch (reloc_type) { +#if defined USE_TLS && USE_TLS +# if _MIPS_SIM == _MIPS_SIM_ABI64 + case R_MIPS_TLS_DTPMOD64: + case R_MIPS_TLS_DTPREL64: + case R_MIPS_TLS_TPREL64: +# else + case R_MIPS_TLS_DTPMOD32: + case R_MIPS_TLS_DTPREL32: + case R_MIPS_TLS_TPREL32: +# endif + { + struct elf_resolve *tls_tpnt = NULL; + sym_ref.sym = &symtab[symtab_index]; + sym_ref.tpnt = NULL; + + if (ELF_ST_BIND(symtab[symtab_index].st_info) != STB_LOCAL) { + symbol_addr = (unsigned long) _dl_find_hash(symname, scope, + tpnt, elf_machine_type_class(reloc_type), &sym_ref); + tls_tpnt = sym_ref.tpnt; + } + /* In case of a TLS reloc, tls_tpnt NULL means we have an 'anonymous' + symbol. This is the case for a static tls variable, so the lookup + module is just that one is referencing the tls variable. */ + if (!tls_tpnt) + tls_tpnt = tpnt; + + switch (reloc_type) { + case R_MIPS_TLS_DTPMOD64: + case R_MIPS_TLS_DTPMOD32: + if (tls_tpnt) + *(ElfW(Addr) *)reloc_addr = tls_tpnt->l_tls_modid; + break; + + case R_MIPS_TLS_DTPREL64: + case R_MIPS_TLS_DTPREL32: + *(ElfW(Addr) *)reloc_addr += + TLS_DTPREL_VALUE (symbol_addr); + break; + + case R_MIPS_TLS_TPREL32: + case R_MIPS_TLS_TPREL64: + CHECK_STATIC_TLS((struct link_map *)tls_tpnt); + *(ElfW(Addr) *)reloc_addr += + TLS_TPREL_VALUE (tls_tpnt, symbol_addr); + break; + } + + break; + } +#endif /* USE_TLS */ #if _MIPS_SIM == _MIPS_SIM_ABI64 case (R_MIPS_64 << 8) | R_MIPS_REL32: #else /* O32 || N32 */ @@ -148,6 +278,24 @@ int _dl_parse_relocation_information(struct dyn_elf *xpnt, *reloc_addr += (unsigned long) tpnt->loadaddr; } break; + case R_MIPS_JUMP_SLOT: + *reloc_addr = symbol_addr; + break; + case R_MIPS_COPY: + if (symbol_addr) { +#if defined (__SUPPORT_LD_DEBUG__) + if (_dl_debug_move) + _dl_dprintf(_dl_debug_file, + "\n%s move %d bytes from %x to %x", + symname, symtab[symtab_index].st_size, + symbol_addr, reloc_addr); +#endif + + _dl_memcpy((char *)reloc_addr, + (char *)symbol_addr, + symtab[symtab_index].st_size); + } + break; case R_MIPS_NONE: break; default: @@ -155,22 +303,21 @@ int _dl_parse_relocation_information(struct dyn_elf *xpnt, _dl_dprintf(2, "\n%s: ",_dl_progname); if (symtab_index) - _dl_dprintf(2, "symbol '%s': ", strtab + symtab[symtab_index].st_name); + _dl_dprintf(2, "symbol '%s': ", symname); #if defined (__SUPPORT_LD_DEBUG__) - _dl_dprintf(2, "can't handle reloc type %s\n ", _dl_reltypes(reloc_type)); + _dl_dprintf(2, "can't handle reloc type '%s' in lib '%s'\n", _dl_reltypes(reloc_type), tpnt->libname); #else - _dl_dprintf(2, "can't handle reloc type %x\n", reloc_type); + _dl_dprintf(2, "can't handle reloc type %x in lib '%s'\n", reloc_type, tpnt->libname); #endif _dl_exit(1); } } - - } #if defined (__SUPPORT_LD_DEBUG__) - if (_dl_debug_reloc && _dl_debug_detail && reloc_addr) - _dl_dprintf(_dl_debug_file, "\tpatched: %x ==> %x @ %x\n", old_val, *reloc_addr, reloc_addr); + if (_dl_debug_reloc && _dl_debug_detail && reloc_addr) + _dl_dprintf(_dl_debug_file, "\tpatched: %x ==> %x @ %x\n", old_val, *reloc_addr, reloc_addr); #endif + } return 0; } @@ -209,12 +356,12 @@ void _dl_perform_mips_global_got_relocations(struct elf_resolve *tpnt, int lazy) } else { *got_entry = (unsigned long) _dl_find_hash(strtab + - sym->st_name, tpnt->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT); + sym->st_name, &_dl_loaded_modules->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT, NULL); } } else if (sym->st_shndx == SHN_COMMON) { *got_entry = (unsigned long) _dl_find_hash(strtab + - sym->st_name, tpnt->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT); + sym->st_name, &_dl_loaded_modules->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT, NULL); } else if (ELF_ST_TYPE(sym->st_info) == STT_FUNC && *got_entry != sym->st_value && tmp_lazy) { @@ -225,8 +372,11 @@ void _dl_perform_mips_global_got_relocations(struct elf_resolve *tpnt, int lazy) *got_entry += (unsigned long) tpnt->loadaddr; } else { + struct symbol_ref sym_ref; + sym_ref.sym = sym; + sym_ref.tpnt = NULL; *got_entry = (unsigned long) _dl_find_hash(strtab + - sym->st_name, tpnt->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT); + sym->st_name, &_dl_loaded_modules->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT, &sym_ref); } got_entry++; diff --git a/ldso/ldso/mips/resolve.S b/ldso/ldso/mips/resolve.S index f5d988a80..d7951a1b4 100644 --- a/ldso/ldso/mips/resolve.S +++ b/ldso/ldso/mips/resolve.S @@ -112,3 +112,54 @@ _dl_runtime_resolve: .end _dl_runtime_resolve .previous +/* Assembler veneer called from the PLT header code when using the + non-PIC ABI. + + Code in each PLT entry puts the caller's return address into t7 ($15), + the PLT entry index into t8 ($24), the address of _dl_runtime_pltresolve + into t9 ($25) and the address of .got.plt into gp ($28). __dl_runtime_pltresolve + needs a0 ($4) to hold the link map and a1 ($5) to hold the index into + .rel.plt (== PLT entry index * 4). */ + + .text + .align 2 + .globl _dl_runtime_pltresolve + .type _dl_runtime_pltresolve,@function + .ent _dl_runtime_pltresolve +_dl_runtime_pltresolve: + .frame $29, 40, $31 + .set noreorder + # Save arguments and sp value in stack. + subu $29, 40 + lw $10, 4($28) + # Modify t9 ($25) so as to point .cpload instruction. + addiu $25, 12 + # Compute GP. + .cpload $25 + .set reorder + + /* Store function arguments from registers to stack */ + sw $15, 36($29) + sw $4, 16($29) + sw $5, 20($29) + sw $6, 24($29) + sw $7, 28($29) + + /* Setup functions args and call __dl_runtime_pltresolve. */ + move $4, $10 + sll $5, $24, 3 + jal __dl_runtime_pltresolve + + /* Restore function arguments from stack to registers */ + lw $31, 36($29) + lw $4, 16($29) + lw $5, 20($29) + lw $6, 24($29) + lw $7, 28($29) + + /* Do a tail call to the original function */ + addiu $29, 40 + move $25, $2 + jr $25 + .end _dl_runtime_pltresolve + .previous diff --git a/ldso/ldso/or1k/dl-debug.h b/ldso/ldso/or1k/dl-debug.h new file mode 100644 index 000000000..d925577cd --- /dev/null +++ b/ldso/ldso/or1k/dl-debug.h @@ -0,0 +1,53 @@ +/* OpenRISC 1000 shared library loader suppport + * + * Copyright (C) 2012 Stefan Kristansson + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. The name of the above contributors may not be + * used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +static const char * const _dl_reltypes_tab[] = + { + "R_OR1K_NONE", + "R_OR1K_32", + "R_OR1K_16", + "R_OR1K_8", + "R_OR1K_LO_16_IN_INSN", + "R_OR1K_HI_16_IN_INSN", + "R_OR1K_INSN_REL_26", + "R_OR1K_GNU_VTENTRY", + "R_OR1K_GNU_VTINHERIT", + "R_OR1K_32_PCREL", + "R_OR1K_16_PCREL", + "R_OR1K_8_PCREL", + "R_OR1K_GOTPC_HI16", + "R_OR1K_GOTPC_LO16", + "R_OR1K_GOT16", + "R_OR1K_PLT26", + "R_OR1K_GOTOFF_HI16", + "R_OR1K_GOTOFF_LO16", + "R_OR1K_COPY", + "R_OR1K_GLOB_DAT", + "R_OR1K_JMP_SLOT", + "R_OR1K_RELATIVE", + }; diff --git a/ldso/ldso/or1k/dl-startup.h b/ldso/ldso/or1k/dl-startup.h new file mode 100644 index 000000000..3c99bcd5c --- /dev/null +++ b/ldso/ldso/or1k/dl-startup.h @@ -0,0 +1,106 @@ +/* Startup code for the OpenRISC 1000 platform, + based on microblaze implementation */ +/* + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +__asm__ ("\ + .text\n\ + .globl _start\n\ + .type _start,@function\n\ + .hidden _start\n\ +_start:\n\ + l.ori r3, r9, 0\n\ + l.ori r3, r1, 0\n\ + l.movhi r11, 0\n\ +1:\n\ + l.addi r3, r3, 4\n\ + l.lwz r12, 0(r3)\n\ + l.sfnei r12, 0\n\ + l.addi r11, r11, 1\n\ + l.bf 1b\n\ + l.nop\n\ + l.ori r3, r11, 0\n\ + l.ori r3, r1, 0\n\ + l.addi r11, r11, -1\n\ + /* store argument counter to stack */\n\ + l.sw 0(r3), r11\n\ + l.addi r1, r1, -24\n\ + l.sw 0(r1), r9\n\ +\n\ + l.jal .LPC0\n\ +#ifndef __OR1K_NODELAY__\n\ + l.nop\n\ +#endif\n\ + /* Load the PIC register */\n\ +.LPC0:\n\ + l.movhi r16, gotpchi(_GLOBAL_OFFSET_TABLE_+(.-.LPC0))\n\ + l.ori r16, r16, gotpclo(_GLOBAL_OFFSET_TABLE_+(.-.LPC0))\n\ + l.add r16, r16, r9\n\ +\n\ + l.jal _dl_start\n\ + l.nop\n\ + /* FALLTHRU */\n\ +\n\ + .globl _dl_start_user\n\ + .type _dl_start_user,@function\n\ +_dl_start_user:\n\ + l.movhi r12, gotoffhi(_dl_skip_args)\n\ + l.ori r12, r12, gotofflo(_dl_skip_args)\n\ + l.add r12, r12, r16\n\ + l.lwz r12, 0(r12)\n\ + l.lwz r3, 24(r1)\n\ +\n\ + l.movhi r9, gotoffhi(_dl_fini)\n\ + l.ori r9, r9, gotofflo(_dl_fini)\n\ + l.add r9, r9, r16\n\ +\n\ + l.addi r9, r9, -8\n\ + l.addi r1, r1, 24\n\ + l.jr r11\n\ + l.nop\n\ + .size _dl_start_user, . - _dl_start_user\n\ + .previous\n\ +"); +/* + * Get a pointer to the argv array. On many platforms this can be just + * the address of the first argument, on other platforms we need to + * do something a little more subtle here. + */ +#define GET_ARGV(ARGVP, ARGS) ARGVP = (((unsigned long*) ARGS)+1) + +/* The ld.so library requires relocations */ +#define ARCH_NEEDS_BOOTSTRAP_RELOCS + +static __always_inline +void PERFORM_BOOTSTRAP_RELOC(ELF_RELOC *rpnt, unsigned long *reloc_addr, + unsigned long symbol_addr, unsigned long load_addr, + attribute_unused Elf32_Sym *symtab) +{ + + switch (ELF_R_TYPE(rpnt->r_info)) + { + case R_OR1K_RELATIVE: + + *reloc_addr = load_addr + rpnt->r_addend; + break; + + default: + _dl_exit(1); + break; + + } + +} diff --git a/ldso/ldso/or1k/dl-syscalls.h b/ldso/ldso/or1k/dl-syscalls.h new file mode 100644 index 000000000..f40c4fd31 --- /dev/null +++ b/ldso/ldso/or1k/dl-syscalls.h @@ -0,0 +1 @@ +/* stub for arch-specific syscall issues */ diff --git a/ldso/ldso/or1k/dl-sysdep.h b/ldso/ldso/or1k/dl-sysdep.h new file mode 100644 index 000000000..21ca028c8 --- /dev/null +++ b/ldso/ldso/or1k/dl-sysdep.h @@ -0,0 +1,105 @@ +/* elf reloc code for the or1k platform, based on glibc 2.3.6, dl-machine.h */ + +/* + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +/* Use reloca */ +#define ELF_USES_RELOCA + +#include <elf.h> + + +/* Initialise the GOT */ +#define INIT_GOT(GOT_BASE,MODULE) \ +do { \ + GOT_BASE[2] = (unsigned long) _dl_linux_resolve; \ + GOT_BASE[1] = (unsigned long) MODULE; \ +} while(0) + +/* Here we define the magic numbers that this dynamic loader should accept */ + +#define MAGIC1 EM_OR1K +#undef MAGIC2 +/* Used for error messages */ +#define ELF_TARGET "or1k" + +#define elf_machine_type_class(type) \ + (((type) == R_OR1K_JMP_SLOT) * ELF_RTYPE_CLASS_PLT \ + | ((type) == R_OR1K_COPY) * ELF_RTYPE_CLASS_COPY) + +static inline Elf32_Addr * +or1k_get_got (void) +{ + Elf32_Addr *got; + Elf32_Addr linkreg; + __asm__("l.ori %0, r9, 0\n" + "l.jal .LPC1\n" +#ifndef __OR1K_NODELAY__ + "l.nop\n" +#endif + ".LPC1:\n" + "l.movhi %1, gotpchi(_GLOBAL_OFFSET_TABLE_+(.-.LPC1))\n" + "l.ori %1, %1, gotpclo(_GLOBAL_OFFSET_TABLE_+(.-.LPC1))\n" + "l.add %1, %1, r9\n" + "l.ori r9, %0, 0\n" + : "=r" (linkreg), "=r" (got)); + return got; +} + +/* Return the link-time address of _DYNAMIC. Conveniently, this is the + first element of the GOT. */ +static inline Elf32_Addr +elf_machine_dynamic (void) +{ + Elf32_Addr *got = or1k_get_got(); + return *got; +} + + +/* Return the run-time load address of the shared object. */ +static inline Elf32_Addr +elf_machine_load_address (void) +{ + /* Compute the difference between the runtime address of _DYNAMIC as seen + by a GOTOFF reference, and the link-time address found in the special + unrelocated first GOT entry. */ + Elf32_Addr dyn; + Elf32_Addr *got = or1k_get_got(); + + __asm__ __volatile__ ( + "l.movhi %0, gotoffhi(_DYNAMIC);" + "l.ori %0, %0, gotofflo(_DYNAMIC);" + "l.add %0, %0, %1;" + : "=r"(dyn), "=r"(got) + ); + return dyn - *got; +} + + + +static __always_inline void +elf_machine_relative (Elf32_Addr load_off, const Elf32_Addr rel_addr, + Elf32_Word relative_count) +{ + Elf32_Rela * rpnt = (void *) rel_addr; + --rpnt; + do { + Elf32_Addr *const reloc_addr = (void *) (load_off + + (++rpnt)->r_offset); + + *reloc_addr += load_off; + } while (--relative_count); +} diff --git a/ldso/ldso/or1k/elfinterp.c b/ldso/ldso/or1k/elfinterp.c new file mode 100644 index 000000000..928e95ba1 --- /dev/null +++ b/ldso/ldso/or1k/elfinterp.c @@ -0,0 +1,333 @@ +/* vi: set sw=4 ts=4: */ +/* OpenRISC 1000 ELF shared library loader suppport + * + * Copyright (c) 1994-2000 Eric Youngdale, Peter MacDonald, + * David Engel, Hongjiu Lu and Mitch D'Souza + * Copyright (C) 2001-2004 Erik Andersen + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. The name of the above contributors may not be + * used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include "ldso.h" + +/* Program to load an ELF binary on a linux system, and run it. + References to symbols in sharable libraries can be resolved by either + an ELF sharable library or a linux style of shared library. */ + +/* Disclaimer: I have never seen any AT&T source code for SVr4, nor have + I ever taken any courses on internals. This program was developed using + information available through the book "UNIX SYSTEM V RELEASE 4, + Programmers guide: Ansi C and Programming Support Tools", which did + a more than adequate job of explaining everything required to get this + working. */ + +extern int _dl_linux_resolve(void); + +unsigned long +_dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry) +{ + ELF_RELOC *this_reloc; + char *strtab; + ElfW(Sym) *symtab; + int symtab_index; + char *rel_addr; + char *new_addr; + char **got_addr; + ElfW(Addr) instr_addr; + char *symname; + + rel_addr = (char *)tpnt->dynamic_info[DT_JMPREL]; + this_reloc = (ELF_RELOC *)(rel_addr + reloc_entry); + symtab_index = ELF_R_SYM(this_reloc->r_info); + + symtab = (ElfW(Sym) *)tpnt->dynamic_info[DT_SYMTAB]; + strtab = (char *)tpnt->dynamic_info[DT_STRTAB]; + symname = strtab + symtab[symtab_index].st_name; + + /* Address of the jump instruction to fix up. */ + instr_addr = (this_reloc->r_offset + tpnt->loadaddr); + got_addr = (char **)instr_addr; + + /* Get the address of the GOT entry. */ + new_addr = _dl_find_hash(symname, &_dl_loaded_modules->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT, NULL); + if (unlikely(!new_addr)) { + _dl_dprintf(2, "%s: can't resolve symbol '%s' in lib '%s'.\n", + _dl_progname, symname, tpnt->libname); + _dl_exit(1); + } + +#if defined (__SUPPORT_LD_DEBUG__) + if ((unsigned long)got_addr < 0x40000000) { + if (_dl_debug_bindings) { + _dl_dprintf(_dl_debug_file, "\nresolve function: %s", symname); + if (_dl_debug_detail) + _dl_dprintf(_dl_debug_file, + "\tpatched: %x ==> %x @ %x\n", + *got_addr, new_addr, got_addr); + } + } + if (!_dl_debug_nofixups) +#endif + *got_addr = new_addr; + + return (unsigned long)new_addr; +} + +static int +_dl_parse(struct elf_resolve *tpnt, struct r_scope_elem *scope, + unsigned long rel_addr, unsigned long rel_size, + int (*reloc_fnc)(struct elf_resolve *tpnt, struct r_scope_elem *scope, + ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab)) +{ + unsigned int i; + char *strtab; + ElfW(Sym) *symtab; + ELF_RELOC *rpnt; + int symtab_index; + + /* Parse the relocation information. */ + rpnt = (ELF_RELOC *)rel_addr; + rel_size /= sizeof(ELF_RELOC); + + symtab = (ElfW(Sym) *)tpnt->dynamic_info[DT_SYMTAB]; + strtab = (char *)tpnt->dynamic_info[DT_STRTAB]; + + for (i = 0; i < rel_size; i++, rpnt++) { + int res; + + symtab_index = ELF_R_SYM(rpnt->r_info); + + debug_sym(symtab, strtab, symtab_index); + debug_reloc(symtab, strtab, rpnt); + + res = reloc_fnc(tpnt, scope, rpnt, symtab, strtab); + + if (res == 0) + continue; + + _dl_dprintf(2, "\n%s: ", _dl_progname); + + if (symtab_index) + _dl_dprintf(2, "symbol '%s': ", + strtab + symtab[symtab_index].st_name); + + if (unlikely(res < 0)) { + int reloc_type = ELF_R_TYPE(rpnt->r_info); + + _dl_dprintf(2, "can't handle reloc type " +#if defined (__SUPPORT_LD_DEBUG__) + "%s\n", _dl_reltypes(reloc_type)); +#else + "%x\n", reloc_type); +#endif + _dl_exit(-res); + } else if (unlikely(res > 0)) { + _dl_dprintf(2, "can't resolve symbol\n"); + return res; + } + } + + return 0; +} + +static int +_dl_do_reloc(struct elf_resolve *tpnt, struct r_scope_elem *scope, + ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab) +{ + int reloc_type; + int symtab_index; + char *symname; +#if defined USE_TLS && USE_TLS + struct elf_resolve *tls_tpnt; +#endif + struct symbol_ref sym_ref; + ElfW(Addr) *reloc_addr; + ElfW(Addr) symbol_addr; +#if defined (__SUPPORT_LD_DEBUG__) + ElfW(Addr) old_val; +#endif + + struct unaligned { + Elf32_Addr x; + } __attribute__ ((packed, may_alias)); + + reloc_addr = (ElfW(Addr)*)(tpnt->loadaddr + (unsigned long)rpnt->r_offset); + reloc_type = ELF_R_TYPE(rpnt->r_info); + symtab_index = ELF_R_SYM(rpnt->r_info); + sym_ref.sym = &symtab[symtab_index]; + sym_ref.tpnt = NULL; + symbol_addr = 0; + symname = strtab + sym_ref.sym->st_name; + + if (symtab_index) { + symbol_addr = (ElfW(Addr))_dl_find_hash(symname, scope, tpnt, + elf_machine_type_class(reloc_type), &sym_ref); + /* + * We want to allow undefined references to weak symbols - this + * might have been intentional. We should not be linking local + * symbols here, so all bases should be covered. + */ + if (unlikely(!symbol_addr && (ELF_ST_TYPE(sym_ref.sym->st_info) != STT_TLS) + && (ELF_ST_BIND(sym_ref.sym->st_info) != STB_WEAK))) { + /* This may be non-fatal if called from dlopen. */ + return 1; + } +#if defined USE_TLS && USE_TLS + tls_tpnt = sym_ref.tpnt; +#endif + } else { + /* Relocs against STN_UNDEF are usually treated as using a + * symbol value of zero, and using the module containing the + * reloc itself. */ + symbol_addr = sym_ref.sym->st_value; +#if defined USE_TLS && USE_TLS + tls_tpnt = tpnt; +#endif + } + + +#if defined (__SUPPORT_LD_DEBUG__) + if (reloc_addr) { + old_val = ((struct unaligned *)reloc_addr)->x; + } else { + old_val = 0; + } +#endif + + switch (reloc_type) { + case R_OR1K_NONE: + break; + + case R_OR1K_8: + case R_OR1K_16: + case R_OR1K_32: + /* Support relocations on mis-aligned offsets. */ + ((struct unaligned *)reloc_addr)->x = symbol_addr + + rpnt->r_addend; + break; + + case R_OR1K_8_PCREL: + case R_OR1K_16_PCREL: + case R_OR1K_32_PCREL: + case R_OR1K_INSN_REL_26: + *reloc_addr = symbol_addr + rpnt->r_addend; + break; + + case R_OR1K_GLOB_DAT: + case R_OR1K_JMP_SLOT: + *reloc_addr = symbol_addr + rpnt->r_addend; + break; +/* Handled by elf_machine_relative */ + case R_OR1K_RELATIVE: + *reloc_addr = (unsigned long)tpnt->loadaddr + rpnt->r_addend; + break; + + case R_OR1K_COPY: + if (symbol_addr) { +#if defined (__SUPPORT_LD_DEBUG__) + if (_dl_debug_move) + _dl_dprintf(_dl_debug_file, + "\t%s move %d bytes from %x to %x\n", + symname, sym_ref.sym->st_size, + symbol_addr, reloc_addr); +#endif + + _dl_memcpy((char *)reloc_addr, + (char *)symbol_addr, + sym_ref.sym->st_size); + } +#if defined (__SUPPORT_LD_DEBUG__) + else + _dl_dprintf(_dl_debug_file, "no symbol_addr to copy !?\n"); +#endif + break; + + default: + return -1; /* Calls _dl_exit(1). */ + } + +#if defined (__SUPPORT_LD_DEBUG__) + if (_dl_debug_reloc && _dl_debug_detail) + _dl_dprintf(_dl_debug_file, "\tpatched: %x ==> %x @ %x\n", + old_val, ((struct unaligned *)reloc_addr)->x, + reloc_addr); +#endif + + return 0; +} + +static int +_dl_do_lazy_reloc(struct elf_resolve *tpnt, struct r_scope_elem *scope, + ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab) +{ + int reloc_type; + int symtab_index; + ElfW(Addr) *reloc_addr; +#if defined (__SUPPORT_LD_DEBUG__) + ElfW(Addr) old_val; +#endif + + (void)scope; + symtab_index = ELF_R_SYM(rpnt->r_info); + (void)strtab; + + reloc_addr = (ElfW(Addr)*)(tpnt->loadaddr + rpnt->r_offset); + reloc_type = ELF_R_TYPE(rpnt->r_info); + +#if defined (__SUPPORT_LD_DEBUG__) + old_val = *reloc_addr; +#endif + + switch (reloc_type) { + case R_OR1K_NONE: + break; + case R_OR1K_JMP_SLOT: + *reloc_addr += (unsigned long)tpnt->loadaddr; + break; + default: + _dl_exit(1); + } + +#if defined (__SUPPORT_LD_DEBUG__) + if (_dl_debug_reloc && _dl_debug_detail) + _dl_dprintf(_dl_debug_file, "\tpatched_lazy: %x ==> %x @ %x\n", + old_val, *reloc_addr, reloc_addr); +#endif + + return 0; +} + +void +_dl_parse_lazy_relocation_information(struct dyn_elf *rpnt, + unsigned long rel_addr, unsigned long rel_size) +{ + (void)_dl_parse(rpnt->dyn, NULL, rel_addr, rel_size, _dl_do_lazy_reloc); +} + +int +_dl_parse_relocation_information(struct dyn_elf *rpnt, + struct r_scope_elem *scope, unsigned long rel_addr, unsigned long rel_size) +{ + return _dl_parse(rpnt->dyn, scope, rel_addr, rel_size, _dl_do_reloc); +} diff --git a/ldso/ldso/or1k/resolve.S b/ldso/ldso/or1k/resolve.S new file mode 100644 index 000000000..4a156d529 --- /dev/null +++ b/ldso/ldso/or1k/resolve.S @@ -0,0 +1,54 @@ +/* This code is used in dl-runtime.c to call the `fixup' function + and then redirect to the address it returns. */ +/* We assume that R11 contain relocation offset and R12 contains + link_map (_DYNAMIC). This must be consistent with the JUMP_SLOT + layout generated by binutils. */ + +/* Based on microblaze implementation */ +/* + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +.text +.align 4 +.globl _dl_linux_resolver +.globl _dl_linux_resolve +.type _dl_linux_resolve,@function + +_dl_linux_resolve: + l.addi r1, r1, -32 + l.sw 0(r1), r9 + /* save function arguments */ + l.sw 8(r1), r3 + l.sw 12(r1), r4 + l.sw 16(r1), r5 + l.sw 20(r1), r6 + l.sw 24(r1), r7 + l.sw 28(r1), r8 + l.ori r3, r12, 0 + l.ori r4, r11, 0 + l.jal _dl_linux_resolver + l.nop + l.lwz r8, 28(r1) + l.lwz r7, 24(r1) + l.lwz r6, 20(r1) + l.lwz r5, 16(r1) + l.lwz r4, 12(r1) + l.lwz r3, 8(r1) + l.lwz r9, 0(r1) + l.addi r1, r1, 32 + l.jr r11 + l.nop + .size _dl_linux_resolve, . - _dl_linux_resolve diff --git a/ldso/ldso/powerpc/dl-debug.h b/ldso/ldso/powerpc/dl-debug.h index cf203d25e..720536e72 100644 --- a/ldso/ldso/powerpc/dl-debug.h +++ b/ldso/ldso/powerpc/dl-debug.h @@ -29,7 +29,7 @@ * SUCH DAMAGE. */ -static const char *_dl_reltypes_tab[] = +static const char * const _dl_reltypes_tab[] = { "R_PPC_NONE", "R_PPC_ADDR32", "R_PPC_ADDR24", "R_PPC_ADDR16", "R_PPC_ADDR16_LO", "R_PPC_ADDR16_HI", "R_PPC_ADDR16_HA", "R_PPC_ADDR14", "R_PPC_ADDR14_BRTAKEN", "R_PPC_ADDR14_BRNTAKEN", diff --git a/ldso/ldso/powerpc/dl-startup.h b/ldso/ldso/powerpc/dl-startup.h index 50e72c3ee..8b2a517e2 100644 --- a/ldso/ldso/powerpc/dl-startup.h +++ b/ldso/ldso/powerpc/dl-startup.h @@ -8,6 +8,7 @@ __asm__( " .text\n" " .globl _start\n" " .type _start,@function\n" + " .hidden _start\n" "_start:\n" " mr 3,1\n" /* Pass SP to _dl_start in r3 */ " li 0,0\n" @@ -60,7 +61,7 @@ __asm__( /* * Get a pointer to the argv array. On many platforms this can be just - * the address if the first argument, on other platforms we need to + * the address of the first argument, on other platforms we need to * do something a little more subtle here. */ #define GET_ARGV(ARGVP, ARGS) ARGVP = (((unsigned long*) ARGS)+1) @@ -73,7 +74,7 @@ __asm__( * load address. */ #define PERFORM_BOOTSTRAP_RELOC(RELP,REL,SYMBOL,LOAD,SYMTAB) \ - {int type=ELF32_R_TYPE((RELP)->r_info); \ + {int type=ELF_R_TYPE((RELP)->r_info); \ Elf32_Addr finaladdr=(SYMBOL)+(RELP)->r_addend;\ if (type==R_PPC_RELATIVE) { \ *REL=(Elf32_Word)(LOAD)+(RELP)->r_addend;\ @@ -84,6 +85,6 @@ __asm__( *REL=OPCODE_B(delta); \ PPC_DCBST(REL); PPC_SYNC; PPC_ICBI(REL);\ } else { \ - _dl_exit(100+ELF32_R_TYPE((RELP)->r_info));\ + _dl_exit(100+ELF_R_TYPE((RELP)->r_info));\ } \ } diff --git a/ldso/ldso/powerpc/dl-syscalls.h b/ldso/ldso/powerpc/dl-syscalls.h index 996bb87c6..f40c4fd31 100644 --- a/ldso/ldso/powerpc/dl-syscalls.h +++ b/ldso/ldso/powerpc/dl-syscalls.h @@ -1,6 +1 @@ -/* We can't use the real errno in ldso, since it has not yet - * been dynamicly linked in yet. */ -#include "sys/syscall.h" -extern int _dl_errno; -#undef __set_errno -#define __set_errno(X) {(_dl_errno) = (X);} +/* stub for arch-specific syscall issues */ diff --git a/ldso/ldso/powerpc/dl-sysdep.h b/ldso/ldso/powerpc/dl-sysdep.h index 768482272..a665d4e75 100644 --- a/ldso/ldso/powerpc/dl-sysdep.h +++ b/ldso/ldso/powerpc/dl-sysdep.h @@ -67,11 +67,6 @@ struct elf_resolve; extern unsigned long _dl_linux_resolver(struct elf_resolve * tpnt, int reloc_entry); void _dl_init_got(unsigned long *lpnt,struct elf_resolve *tpnt); -/* 4096 bytes alignment */ -#define PAGE_ALIGN 0xfffff000 -#define ADDR_ALIGN 0xfff -#define OFFS_ALIGN 0x7ffff000 - /* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry, so PLT entries should not be allowed to define the value. ELF_RTYPE_CLASS_NOCOPY iff TYPE should not be allowed to resolve to one @@ -82,6 +77,8 @@ void _dl_init_got(unsigned long *lpnt,struct elf_resolve *tpnt); #define elf_machine_type_class(type) \ ((((type) == R_PPC_JMP_SLOT \ || (type) == R_PPC_REL24 \ + || ((type) >= R_PPC_DTPMOD32 /* contiguous TLS */ \ + && (type) <= R_PPC_DTPREL32) \ || (type) == R_PPC_ADDR24) * ELF_RTYPE_CLASS_PLT) \ | (((type) == R_PPC_COPY) * ELF_RTYPE_CLASS_COPY)) @@ -90,7 +87,7 @@ void _dl_init_got(unsigned long *lpnt,struct elf_resolve *tpnt); #define ELF_MACHINE_PLTREL_OVERLAP 1 /* Return the value of the GOT pointer. */ -static __inline__ Elf32_Addr * __attribute__ ((const)) +static __always_inline Elf32_Addr * __attribute__ ((const)) ppc_got (void) { Elf32_Addr *got; @@ -109,14 +106,14 @@ ppc_got (void) /* Return the link-time address of _DYNAMIC, stored as the first value in the GOT. */ -static __inline__ Elf32_Addr __attribute__ ((const)) +static __always_inline Elf32_Addr __attribute__ ((const)) elf_machine_dynamic (void) { return *ppc_got(); } /* Return the run-time load address of the shared object. */ -static __inline__ Elf32_Addr __attribute__ ((const)) +static __always_inline Elf32_Addr __attribute__ ((const)) elf_machine_load_address (void) { Elf32_Addr *branchaddr; @@ -164,7 +161,7 @@ elf_machine_load_address (void) return runtime_dynamic - elf_machine_dynamic (); } -static __inline__ void +static __always_inline void elf_machine_relative (Elf32_Addr load_off, const Elf32_Addr rel_addr, Elf32_Word relative_count) { diff --git a/ldso/ldso/powerpc/elfinterp.c b/ldso/ldso/powerpc/elfinterp.c index eeb325092..81587a6af 100644 --- a/ldso/ldso/powerpc/elfinterp.c +++ b/ldso/ldso/powerpc/elfinterp.c @@ -30,6 +30,8 @@ */ #include "ldso.h" +#define TLS_DTV_OFFSET 0x8000 +#define TLS_TP_OFFSET 0x7000 extern int _dl_linux_resolve(void); @@ -107,44 +109,37 @@ unsigned long _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry) { ELF_RELOC *this_reloc; char *strtab; - Elf32_Sym *symtab; + ElfW(Sym) *symtab; ELF_RELOC *rel_addr; int symtab_index; char *symname; - Elf32_Addr *reloc_addr; - Elf32_Addr finaladdr; + ElfW(Addr) *reloc_addr; + ElfW(Addr) finaladdr; Elf32_Sword delta; rel_addr = (ELF_RELOC *)tpnt->dynamic_info[DT_JMPREL]; this_reloc = (void *)rel_addr + reloc_entry; - symtab_index = ELF32_R_SYM(this_reloc->r_info); + symtab_index = ELF_R_SYM(this_reloc->r_info); - symtab = (Elf32_Sym *)tpnt->dynamic_info[DT_SYMTAB]; + symtab = (ElfW(Sym) *)tpnt->dynamic_info[DT_SYMTAB]; strtab = (char *)tpnt->dynamic_info[DT_STRTAB]; symname = strtab + symtab[symtab_index].st_name; debug_sym(symtab,strtab,symtab_index); debug_reloc(symtab,strtab,this_reloc); -#if defined (__SUPPORT_LD_DEBUG__) - if (unlikely(ELF32_R_TYPE(this_reloc->r_info) != R_PPC_JMP_SLOT)) { - _dl_dprintf(2, "%s: Incorrect relocation type in jump relocation\n", _dl_progname); - _dl_exit(1); - } -#endif - /* Address of dump instruction to fix up */ - reloc_addr = (Elf32_Addr *) (tpnt->loadaddr + this_reloc->r_offset); + reloc_addr = (ElfW(Addr) *) (tpnt->loadaddr + this_reloc->r_offset); #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_reloc && _dl_debug_detail) - _dl_dprintf(_dl_debug_file, "\n\tResolving symbol %s %x --> ", symname, (Elf32_Addr)reloc_addr); + _dl_dprintf(_dl_debug_file, "\n\tResolving symbol %s %x --> ", symname, (ElfW(Addr))reloc_addr); #endif /* Get the address of the GOT entry */ - finaladdr = (Elf32_Addr) _dl_find_hash(symname, - tpnt->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT); + finaladdr = (ElfW(Addr)) _dl_find_hash(symname, + &_dl_loaded_modules->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT, NULL); if (unlikely(!finaladdr)) { _dl_dprintf(2, "%s: can't resolve symbol '%s' in lib '%s'.\n", _dl_progname, symname, tpnt->libname); _dl_exit(1); @@ -164,15 +159,15 @@ unsigned long _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry) *reloc_addr = OPCODE_BA (finaladdr); } else { /* Warning: we don't handle double-sized PLT entries */ - Elf32_Word *plt, *data_words, index, offset; + Elf32_Word *plt, *data_words, idx, offset; plt = (Elf32_Word *)tpnt->dynamic_info[DT_PLTGOT]; offset = reloc_addr - plt; - index = (offset - PLT_INITIAL_ENTRY_WORDS)/2; + idx = (offset - PLT_INITIAL_ENTRY_WORDS)/2; data_words = (Elf32_Word *)tpnt->data_words; reloc_addr += 1; - data_words[index] = finaladdr; + data_words[idx] = finaladdr; PPC_SYNC; *reloc_addr = OPCODE_B ((PLT_LONGBRANCH_ENTRY_WORDS - (offset+1)) * 4); } @@ -187,50 +182,64 @@ unsigned long _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry) } static __inline__ int -_dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, - ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab) +_dl_do_reloc (struct elf_resolve *tpnt,struct r_scope_elem *scope, + ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab) { int reloc_type; int symtab_index; - char *symname; - Elf32_Addr *reloc_addr; - Elf32_Addr finaladdr; - + struct symbol_ref sym_ref; + ElfW(Addr) *reloc_addr; + ElfW(Addr) finaladdr; + struct elf_resolve *tls_tpnt = NULL; unsigned long symbol_addr; + char *symname; #if defined (__SUPPORT_LD_DEBUG__) unsigned long old_val; #endif - reloc_addr = (Elf32_Addr *)(intptr_t) (tpnt->loadaddr + (unsigned long) rpnt->r_offset); - reloc_type = ELF32_R_TYPE(rpnt->r_info); + symbol_addr = tpnt->loadaddr; /* For R_PPC_RELATIVE */ - symtab_index = ELF32_R_SYM(rpnt->r_info); - symname = strtab + symtab[symtab_index].st_name; + reloc_addr = (ElfW(Addr) *)(intptr_t) (symbol_addr + (unsigned long) rpnt->r_offset); + reloc_type = ELF_R_TYPE(rpnt->r_info); + symtab_index = ELF_R_SYM(rpnt->r_info); + sym_ref.sym = &symtab[symtab_index]; + sym_ref.tpnt = NULL; + symname = strtab + sym_ref.sym->st_name; if (symtab_index) { symbol_addr = (unsigned long) _dl_find_hash(symname, scope, tpnt, - elf_machine_type_class(reloc_type)); + elf_machine_type_class(reloc_type), &sym_ref); /* We want to allow undefined references to weak symbols - this might * have been intentional. We should not be linking local symbols * here, so all bases should be covered. */ - if (unlikely(!symbol_addr && ELF32_ST_BIND(symtab[symtab_index].st_info) != STB_WEAK)) + if (unlikely(!symbol_addr + && (ELF_ST_TYPE(sym_ref.sym->st_info) != STT_TLS + && ELF_ST_BIND(sym_ref.sym->st_info) != STB_WEAK))) return 1; + if (_dl_trace_prelink) { + _dl_debug_lookup (symname, tpnt, &symtab[symtab_index], + &sym_ref, elf_machine_type_class(reloc_type)); + } + tls_tpnt = sym_ref.tpnt; + } else { + symbol_addr = sym_ref.sym->st_value; + tls_tpnt = tpnt; } #if defined (__SUPPORT_LD_DEBUG__) old_val = *reloc_addr; #endif - finaladdr = (Elf32_Addr) (symbol_addr + rpnt->r_addend); + finaladdr = (ElfW(Addr)) (symbol_addr + rpnt->r_addend); switch (reloc_type) { case R_PPC_RELATIVE: case R_PPC_ADDR32: case R_PPC_GLOB_DAT: *reloc_addr = finaladdr; - goto out_nocode; /* No code code modified */ + goto out_nocode; /* No code modified */ case R_PPC_JMP_SLOT: { if (tpnt->dynamic_info[DT_PPC_GOT_IDX] != 0) { *reloc_addr = finaladdr; - goto out_nocode; /* No code code modified */ + goto out_nocode; /* No code modified */ } else { Elf32_Sword delta = finaladdr - (Elf32_Word)reloc_addr; if (delta<<6>>6 == delta) { @@ -239,15 +248,15 @@ _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, *reloc_addr = OPCODE_BA (finaladdr); } else { /* Warning: we don't handle double-sized PLT entries */ - Elf32_Word *plt, *data_words, index, offset; + Elf32_Word *plt, *data_words, idx, offset; plt = (Elf32_Word *)tpnt->dynamic_info[DT_PLTGOT]; offset = reloc_addr - plt; - index = (offset - PLT_INITIAL_ENTRY_WORDS)/2; + idx = (offset - PLT_INITIAL_ENTRY_WORDS)/2; data_words = (Elf32_Word *)tpnt->data_words; - data_words[index] = finaladdr; - reloc_addr[0] = OPCODE_LI(11,index*4); + data_words[idx] = finaladdr; + reloc_addr[0] = OPCODE_LI(11,idx*4); reloc_addr[1] = OPCODE_B((PLT_LONGBRANCH_ENTRY_WORDS - (offset+1)) * 4); /* instructions were modified */ @@ -262,11 +271,11 @@ _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_move) _dl_dprintf(_dl_debug_file,"\n%s move %x bytes from %x to %x", - symname, symtab[symtab_index].st_size, + symname, sym_ref.sym->st_size, symbol_addr, reloc_addr); #endif - _dl_memcpy((char *) reloc_addr, (char *) finaladdr, symtab[symtab_index].st_size); - goto out_nocode; /* No code code modified */ + _dl_memcpy((char *) reloc_addr, (char *) finaladdr, sym_ref.sym->st_size); + goto out_nocode; /* No code modified */ case R_PPC_ADDR16_HA: finaladdr += 0x8000; /* fall through. */ case R_PPC_ADDR16_HI: @@ -274,6 +283,19 @@ _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, case R_PPC_ADDR16_LO: *(short *)reloc_addr = finaladdr; break; +#if USE_TLS + case R_PPC_DTPMOD32: + *reloc_addr = tls_tpnt->l_tls_modid; + break; + case R_PPC_DTPREL32: + /* During relocation all TLS symbols are defined and used. + Therefore the offset is already correct. */ + *reloc_addr = finaladdr - TLS_DTV_OFFSET; + break; + case R_PPC_TPREL32: + *reloc_addr = tls_tpnt->l_tls_offset + finaladdr - TLS_TP_OFFSET; + break; +#endif case R_PPC_REL24: #if 0 { @@ -292,7 +314,7 @@ _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, return -1; #endif case R_PPC_NONE: - goto out_nocode; /* No code code modified */ + goto out_nocode; /* No code modified */ default: _dl_dprintf(2, "%s: can't handle reloc type ", _dl_progname); #if defined (__SUPPORT_LD_DEBUG__) @@ -311,7 +333,7 @@ _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, out_nocode: #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_reloc && _dl_debug_detail) - _dl_dprintf(_dl_debug_file, "\tpatched: %x ==> %x @ %x", old_val, *reloc_addr, reloc_addr); + _dl_dprintf(_dl_debug_file, "\tpatched: %x ==> %x @ %x\n", old_val, *reloc_addr, reloc_addr); #endif return 0; } @@ -326,11 +348,11 @@ void _dl_parse_lazy_relocation_information(struct dyn_elf *rpnt, plt = (Elf32_Word *)tpnt->dynamic_info[DT_PLTGOT]; if (tpnt->dynamic_info[DT_PPC_GOT_IDX] != 0) { /* Secure PLT */ - Elf32_Addr *got = (Elf32_Addr *)tpnt->dynamic_info[DT_PPC_GOT_IDX]; + ElfW(Addr) *got = (ElfW(Addr) *)tpnt->dynamic_info[DT_PPC_GOT_IDX]; Elf32_Word dlrr = (Elf32_Word) _dl_linux_resolve; - got[1] = (Elf32_Addr) dlrr; - got[2] = (Elf32_Addr) tpnt; + got[1] = (ElfW(Addr)) dlrr; + got[2] = (ElfW(Addr)) tpnt; /* Relocate everything in .plt by the load address offset. */ while (num_plt_entries-- != 0) @@ -369,14 +391,14 @@ void _dl_parse_lazy_relocation_information(struct dyn_elf *rpnt, } static __inline__ int -_dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, +_dl_parse(struct elf_resolve *tpnt, struct r_scope_elem *scope, unsigned long rel_addr, unsigned long rel_size, - int (*reloc_fnc) (struct elf_resolve *tpnt, struct dyn_elf *scope, - ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab)) + int (*reloc_fnc) (struct elf_resolve *tpnt, struct r_scope_elem *scope, + ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab)) { unsigned int i; char *strtab; - Elf32_Sym *symtab; + ElfW(Sym) *symtab; ELF_RELOC *rpnt; int symtab_index; @@ -384,13 +406,13 @@ _dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, rpnt = (ELF_RELOC *)(intptr_t)rel_addr; rel_size = rel_size / sizeof(ELF_RELOC); - symtab = (Elf32_Sym *)(intptr_t)tpnt->dynamic_info[DT_SYMTAB]; + symtab = (ElfW(Sym) *)(intptr_t)tpnt->dynamic_info[DT_SYMTAB]; strtab = (char *)tpnt->dynamic_info[DT_STRTAB]; for (i = 0; i < rel_size; i++, rpnt++) { int res; - symtab_index = ELF32_R_SYM(rpnt->r_info); + symtab_index = ELF_R_SYM(rpnt->r_info); debug_sym(symtab,strtab,symtab_index); debug_reloc(symtab,strtab,rpnt); @@ -406,7 +428,7 @@ _dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, if (unlikely(res <0)) { - int reloc_type = ELF32_R_TYPE(rpnt->r_info); + int reloc_type = ELF_R_TYPE(rpnt->r_info); #if defined (__SUPPORT_LD_DEBUG__) _dl_dprintf(2, "can't handle reloc type '%s' in lib '%s'\n", _dl_reltypes(reloc_type), tpnt->libname); #else @@ -424,7 +446,7 @@ _dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, } int _dl_parse_relocation_information(struct dyn_elf *rpnt, - unsigned long rel_addr, unsigned long rel_size) + struct r_scope_elem *scope, unsigned long rel_addr, unsigned long rel_size) { - return _dl_parse(rpnt->dyn, rpnt->dyn->symbol_scope, rel_addr, rel_size, _dl_do_reloc); + return _dl_parse(rpnt->dyn, scope, rel_addr, rel_size, _dl_do_reloc); } diff --git a/ldso/ldso/powerpc/resolve.S b/ldso/ldso/powerpc/resolve.S index 03c6a79b8..c83337ccd 100644 --- a/ldso/ldso/powerpc/resolve.S +++ b/ldso/ldso/powerpc/resolve.S @@ -11,19 +11,19 @@ .type _dl_linux_resolve,@function _dl_linux_resolve: -// We need to save the registers used to pass parameters, and register 0, -// which is used by _mcount; the registers are saved in a stack frame. +/* We need to save the registers used to pass parameters, and register 0, + which is used by _mcount; the registers are saved in a stack frame. */ stwu 1,-64(1) stw 0,12(1) stw 3,16(1) stw 4,20(1) -// The code that calls this has put parameters for 'fixup' in r12 and r11. +/* The code that calls this has put parameters for 'fixup' in r12 and r11. */ mr 3,12 stw 5,24(1) mr 4,11 stw 6,28(1) mflr 0 -// We also need to save some of the condition register fields. +/* We also need to save some of the condition register fields. */ stw 7,32(1) stw 0,48(1) stw 8,36(1) @@ -32,9 +32,9 @@ _dl_linux_resolve: stw 10,44(1) stw 0,8(1) bl _dl_linux_resolver@local -// 'fixup' returns the address we want to branch to. +/* 'fixup' returns the address we want to branch to. */ mtctr 3 -// Put the registers back... +/* Put the registers back... */ lwz 0,48(1) lwz 10,44(1) lwz 9,40(1) @@ -48,7 +48,7 @@ _dl_linux_resolve: lwz 4,20(1) lwz 3,16(1) lwz 0,12(1) -// ...unwind the stack frame, and jump to the PLT entry we updated. +/* ...unwind the stack frame, and jump to the PLT entry we updated. */ addi 1,1,64 bctr diff --git a/ldso/ldso/sh/dl-debug.h b/ldso/ldso/sh/dl-debug.h index e862da1ee..ac442bf35 100644 --- a/ldso/ldso/sh/dl-debug.h +++ b/ldso/ldso/sh/dl-debug.h @@ -28,7 +28,7 @@ * SUCH DAMAGE. */ -static const char *_dl_reltypes_tab[] = +static const char * const _dl_reltypes_tab[] = { [0] "R_SH_NONE", "R_SH_DIR32", "R_SH_REL32", "R_SH_DIR8WPN", [4] "R_SH_IND12W", "R_SH_DIR8WPL", "R_SH_DIR8WPZ", "R_SH_DIR8BP", @@ -36,6 +36,8 @@ static const char *_dl_reltypes_tab[] = [25] "R_SH_SWITCH16","R_SH_SWITCH32","R_SH_USES", [28] "R_SH_COUNT", "R_SH_ALIGN", "R_SH_CODE", "R_SH_DATA", [32] "R_SH_LABEL", "R_SH_SWITCH8", "R_SH_GNU_VTINHERIT","R_SH_GNU_VTENTRY", +[144] "R_SH_TLS_GD_32","R_SH_TLS_LD_32", "R_SH_TLS_LDO_32", "R_SH_TLS_IE_32", +[148] "R_SH_TLS_LE_32","R_SH_TLS_DTPMOD32", "R_SH_TLS_DTPOFF32", "R_SH_TLS_TPOFF32", [160] "R_SH_GOT32", "R_SH_PLT32", "R_SH_COPY", "R_SH_GLOB_DAT", [164] "R_SH_JMP_SLOT","R_SH_RELATIVE","R_SH_GOTOFF", "R_SH_GOTPC", }; diff --git a/ldso/ldso/sh/dl-startup.h b/ldso/ldso/sh/dl-startup.h index 7a3fdf235..b0b9b021d 100644 --- a/ldso/ldso/sh/dl-startup.h +++ b/ldso/ldso/sh/dl-startup.h @@ -6,6 +6,7 @@ __asm__( " .text\n" " .globl _start\n" " .type _start,@function\n" + " .hidden _start\n" "_start:\n" " mov r15, r4\n" " mov.l .L_dl_start, r0\n" @@ -16,12 +17,22 @@ __asm__( " mov.l .L_got, r12 ! Load the GOT on r12\n" " mova .L_got, r0\n" " add r0, r12\n" + " mov.l .L_dl_skip_args,r0\n" + " mov.l @(r0,r12),r0\n" + " mov.l @r0,r0\n" + " mov.l @r15,r5 ! Get the original argument count\n" + " sub r0,r5 ! Subtract _dl_skip_args from it\n" + " shll2 r0\n" + " add r0,r15 ! Adjust the stack pointer to skip _dl_skip_args words\n" + " mov.l r5,@r15 ! Store back the modified argument count\n" " mov.l .L_dl_fini, r0\n" " mov.l @(r0,r12), r4 ! Pass the finalizer in r4\n" " jmp @r8\n" " nop\n" ".L_dl_start:\n" " .long _dl_start-.jmp_loc\n" + ".L_dl_skip_args:\n" + " .long _dl_skip_args@GOT\n" ".L_dl_fini:\n" " .long _dl_fini@GOT\n" ".L_got:\n" @@ -32,7 +43,7 @@ __asm__( /* * Get a pointer to the argv array. On many platforms this can be just - * the address if the first argument, on other platforms we need to + * the address of the first argument, on other platforms we need to * do something a little more subtle here. */ #define GET_ARGV(ARGVP, ARGS) ARGVP = ((unsigned long*) ARGS) @@ -48,7 +59,7 @@ __asm__( * load address. */ #define PERFORM_BOOTSTRAP_RELOC(RELP,REL,SYMBOL,LOAD,SYMTAB) \ - switch(ELF32_R_TYPE((RELP)->r_info)){ \ + switch(ELF_R_TYPE((RELP)->r_info)){ \ case R_SH_REL32: \ *(REL) = (SYMBOL) + (RELP)->r_addend \ - (unsigned long)(REL); \ diff --git a/ldso/ldso/sh/dl-syscalls.h b/ldso/ldso/sh/dl-syscalls.h index d3672512f..b99a9b5a0 100644 --- a/ldso/ldso/sh/dl-syscalls.h +++ b/ldso/ldso/sh/dl-syscalls.h @@ -1,14 +1,7 @@ -/* We can't use the real errno in ldso, since it has not yet - * been dynamicly linked in yet. */ -#include "sys/syscall.h" -extern int _dl_errno; -#undef __set_errno -#define __set_errno(X) {(_dl_errno) = (X);} - -#if __GNUC_PREREQ (4, 1) +#if __GNUC_PREREQ (4, 1) && !__GNUC_PREREQ (4, 9) #warning !!! gcc 4.1 and later have problems with __always_inline so redefined as inline # ifdef __always_inline # undef __always_inline -# define __always_inline inline +# define __always_inline __inline__ # endif #endif diff --git a/ldso/ldso/sh/dl-sysdep.h b/ldso/ldso/sh/dl-sysdep.h index fd0f236a2..21244ec1f 100644 --- a/ldso/ldso/sh/dl-sysdep.h +++ b/ldso/ldso/sh/dl-sysdep.h @@ -25,7 +25,7 @@ struct elf_resolve; extern unsigned long _dl_linux_resolver(struct elf_resolve * tpnt, int reloc_entry); -static __inline__ unsigned int +static __always_inline unsigned int _dl_urem(unsigned int n, unsigned int base) { int res; @@ -83,24 +83,21 @@ _dl_urem(unsigned int n, unsigned int base) #define do_rem(result, n, base) ((result) = _dl_urem((n), (base))) -/* 4096 bytes alignment */ -#define PAGE_ALIGN 0xfffff000 -#define ADDR_ALIGN 0xfff -#define OFFS_ALIGN 0x7ffff000 - /* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry or TLS variable, so undefined references should not be allowed to define the value. ELF_RTYPE_CLASS_NOCOPY iff TYPE should not be allowed to resolve to one of the main executable's symbols, as for a COPY reloc. */ -#define elf_machine_type_class(type) \ - ((((type) == R_SH_JMP_SLOT) * ELF_RTYPE_CLASS_PLT) \ +# define elf_machine_type_class(type) \ + ((((type) == R_SH_JMP_SLOT || (type) == R_SH_TLS_DTPMOD32 \ + || (type) == R_SH_TLS_DTPOFF32 || (type) == R_SH_TLS_TPOFF32) \ + * ELF_RTYPE_CLASS_PLT) \ | (((type) == R_SH_COPY) * ELF_RTYPE_CLASS_COPY)) /* Return the link-time address of _DYNAMIC. Conveniently, this is the first element of the GOT. This must be inlined in a function which uses global data. */ -static __inline__ Elf32_Addr __attribute__ ((unused)) +static __always_inline Elf32_Addr __attribute__ ((unused)) elf_machine_dynamic (void) { register Elf32_Addr *got; @@ -109,7 +106,7 @@ elf_machine_dynamic (void) } /* Return the run-time load address of the shared object. */ -static __inline__ Elf32_Addr __attribute__ ((unused)) +static __always_inline Elf32_Addr __attribute__ ((unused)) elf_machine_load_address (void) { Elf32_Addr addr; @@ -151,7 +148,7 @@ elf_machine_load_address (void) } \ } -static __inline__ void +static __always_inline void elf_machine_relative (Elf32_Addr load_off, const Elf32_Addr rel_addr, Elf32_Word relative_count) { diff --git a/ldso/ldso/sh/elfinterp.c b/ldso/ldso/sh/elfinterp.c index c34acdf95..e6ff6a37a 100644 --- a/ldso/ldso/sh/elfinterp.c +++ b/ldso/ldso/sh/elfinterp.c @@ -45,10 +45,9 @@ extern int _dl_linux_resolve(void); unsigned long _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry) { - int reloc_type; ELF_RELOC *this_reloc; char *strtab; - Elf32_Sym *symtab; + ElfW(Sym) *symtab; int symtab_index; char *rel_addr; char *new_addr; @@ -59,25 +58,19 @@ unsigned long _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry) rel_addr = (char *)tpnt->dynamic_info[DT_JMPREL]; this_reloc = (ELF_RELOC *)(intptr_t)(rel_addr + reloc_entry); - reloc_type = ELF32_R_TYPE(this_reloc->r_info); - symtab_index = ELF32_R_SYM(this_reloc->r_info); + symtab_index = ELF_R_SYM(this_reloc->r_info); - symtab = (Elf32_Sym *)(intptr_t) tpnt->dynamic_info[DT_SYMTAB]; + symtab = (ElfW(Sym) *)(intptr_t) tpnt->dynamic_info[DT_SYMTAB]; strtab = (char *)tpnt->dynamic_info[DT_STRTAB]; symname = strtab + symtab[symtab_index].st_name; - if (unlikely(reloc_type != R_SH_JMP_SLOT)) { - _dl_dprintf(2, "%s: Incorrect relocation type in jump relocations\n", - _dl_progname); - _dl_exit(1); - } - /* Address of jump instruction to fix up */ instr_addr = (unsigned long) (this_reloc->r_offset + tpnt->loadaddr); got_addr = (char **) instr_addr; /* Get the address of the GOT entry */ - new_addr = _dl_find_hash(symname, tpnt->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT); + new_addr = _dl_find_hash(symname, &_dl_loaded_modules->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT, NULL); + if (unlikely(!new_addr)) { _dl_dprintf(2, "%s: can't resolve symbol '%s'\n", _dl_progname, symname); _dl_exit(1); @@ -102,14 +95,14 @@ unsigned long _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry) static int -_dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, +_dl_parse(struct elf_resolve *tpnt, struct r_scope_elem *scope, unsigned long rel_addr, unsigned long rel_size, - int (*reloc_fnc) (struct elf_resolve *tpnt, struct dyn_elf *scope, - ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab)) + int (*reloc_fnc) (struct elf_resolve *tpnt, struct r_scope_elem *scope, + ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab)) { unsigned int i; char *strtab; - Elf32_Sym *symtab; + ElfW(Sym) *symtab; ELF_RELOC *rpnt; int symtab_index; /* Now parse the relocation information */ @@ -117,13 +110,13 @@ _dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, rpnt = (ELF_RELOC *)(intptr_t) rel_addr; rel_size = rel_size / sizeof(ELF_RELOC); - symtab = (Elf32_Sym *)(intptr_t)tpnt->dynamic_info[DT_SYMTAB]; + symtab = (ElfW(Sym) *)(intptr_t)tpnt->dynamic_info[DT_SYMTAB]; strtab = (char *)tpnt->dynamic_info[DT_STRTAB]; for (i = 0; i < rel_size; i++, rpnt++) { int res; - symtab_index = ELF32_R_SYM(rpnt->r_info); + symtab_index = ELF_R_SYM(rpnt->r_info); debug_sym(symtab,strtab,symtab_index); debug_reloc(symtab,strtab,rpnt); @@ -137,7 +130,7 @@ _dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, _dl_dprintf(2, "symbol '%s': ", strtab + symtab[symtab_index].st_name); if (unlikely(res < 0)) { - int reloc_type = ELF32_R_TYPE(rpnt->r_info); + int reloc_type = ELF_R_TYPE(rpnt->r_info); #if defined (__SUPPORT_LD_DEBUG__) _dl_dprintf(2, "can't handle reloc type %s\n ", _dl_reltypes(reloc_type)); #else @@ -155,8 +148,8 @@ _dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, static int -_dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, - ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab) +_dl_do_reloc (struct elf_resolve *tpnt, struct r_scope_elem *scope, + ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab) { int reloc_type; int symtab_index; @@ -167,31 +160,57 @@ _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, unsigned long old_val; #endif +#if defined USE_TLS && USE_TLS + struct elf_resolve *tls_tpnt = NULL; +#endif + struct symbol_ref sym_ref; + reloc_addr = (unsigned long *)(intptr_t) (tpnt->loadaddr + (unsigned long) rpnt->r_offset); - reloc_type = ELF32_R_TYPE(rpnt->r_info); - symtab_index = ELF32_R_SYM(rpnt->r_info); + reloc_type = ELF_R_TYPE(rpnt->r_info); + symtab_index = ELF_R_SYM(rpnt->r_info); symbol_addr = 0; - symname = strtab + symtab[symtab_index].st_name; + sym_ref.sym = &symtab[symtab_index]; + sym_ref.tpnt = NULL; if (symtab_index) { + symname = strtab + symtab[symtab_index].st_name; symbol_addr = (unsigned long) _dl_find_hash(symname, scope, tpnt, - elf_machine_type_class(reloc_type)); - + elf_machine_type_class(reloc_type), &sym_ref); /* * We want to allow undefined references to weak symbols - this might * have been intentional. We should not be linking local symbols * here, so all bases should be covered. */ - if (!symbol_addr && ELF32_ST_BIND(symtab[symtab_index].st_info) != STB_WEAK) { + + if (!symbol_addr + && (ELF_ST_TYPE(symtab[symtab_index].st_info) != STT_TLS) + && (ELF_ST_BIND(symtab[symtab_index].st_info) != STB_WEAK)) { _dl_dprintf(2, "%s: can't resolve symbol '%s'\n", - _dl_progname, strtab + symtab[symtab_index].st_name); - _dl_exit (1); + _dl_progname, symname); + + /* Let the caller to handle the error: it may be non fatal if called from dlopen */ + return 1; } + if (_dl_trace_prelink) { + _dl_debug_lookup (symname, tpnt, &symtab[symtab_index], + &sym_ref, elf_machine_type_class(reloc_type)); + } +#if defined USE_TLS && USE_TLS + tls_tpnt = sym_ref.tpnt; +#endif } #if defined (__SUPPORT_LD_DEBUG__) old_val = *reloc_addr; #endif + +#if defined USE_TLS && USE_TLS + /* In case of a TLS reloc, tls_tpnt NULL means we have an 'anonymous' + symbol. This is the case for a static tls variable, so the lookup + module is just that one is referencing the tls variable. */ + if (!tls_tpnt) + tls_tpnt = tpnt; +#endif switch (reloc_type) { case R_SH_NONE: break; @@ -218,12 +237,25 @@ _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, case R_SH_RELATIVE: *reloc_addr = (unsigned long) tpnt->loadaddr + rpnt->r_addend; break; +#if defined USE_TLS && USE_TLS + case R_SH_TLS_DTPMOD32: + *reloc_addr = tls_tpnt->l_tls_modid; + break; + case R_SH_TLS_DTPOFF32: + *reloc_addr = symbol_addr; + break; + case R_SH_TLS_TPOFF32: + CHECK_STATIC_TLS ((struct link_map *) tls_tpnt); + *reloc_addr = tls_tpnt->l_tls_offset + symbol_addr + rpnt->r_addend; + break; +#endif default: - return -1; /*call _dl_exit(1) */ + + return -1; } #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_reloc && _dl_debug_detail) - _dl_dprintf(_dl_debug_file, "\tpatched: %x ==> %x @ %x", old_val, *reloc_addr, reloc_addr); + _dl_dprintf(_dl_debug_file, "\tpatched: %x ==> %x @ %x\n", old_val, *reloc_addr, reloc_addr); #endif return 0; @@ -231,8 +263,8 @@ _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, static int -_dl_do_lazy_reloc (struct elf_resolve *tpnt, struct dyn_elf *scope, - ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab) +_dl_do_lazy_reloc (struct elf_resolve *tpnt, struct r_scope_elem *scope, + ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab) { int reloc_type; unsigned long *reloc_addr; @@ -244,7 +276,7 @@ _dl_do_lazy_reloc (struct elf_resolve *tpnt, struct dyn_elf *scope, (void)strtab; reloc_addr = (unsigned long *)(intptr_t) (tpnt->loadaddr + (unsigned long) rpnt->r_offset); - reloc_type = ELF32_R_TYPE(rpnt->r_info); + reloc_type = ELF_R_TYPE(rpnt->r_info); #if defined (__SUPPORT_LD_DEBUG__) old_val = *reloc_addr; @@ -256,11 +288,11 @@ _dl_do_lazy_reloc (struct elf_resolve *tpnt, struct dyn_elf *scope, *reloc_addr += (unsigned long) tpnt->loadaddr; break; default: - return -1; /*call _dl_exit(1) */ + return -1; } #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_reloc && _dl_debug_detail) - _dl_dprintf(_dl_debug_file, "\tpatched: %x ==> %x @ %x", old_val, *reloc_addr, reloc_addr); + _dl_dprintf(_dl_debug_file, "\tpatched: %x ==> %x @ %x\n", old_val, *reloc_addr, reloc_addr); #endif return 0; @@ -273,7 +305,7 @@ void _dl_parse_lazy_relocation_information(struct dyn_elf *rpnt, } int _dl_parse_relocation_information(struct dyn_elf *rpnt, - unsigned long rel_addr, unsigned long rel_size) + struct r_scope_elem *scope, unsigned long rel_addr, unsigned long rel_size) { - return _dl_parse(rpnt->dyn, rpnt->dyn->symbol_scope, rel_addr, rel_size, _dl_do_reloc); + return _dl_parse(rpnt->dyn, scope, rel_addr, rel_size, _dl_do_reloc); } diff --git a/ldso/ldso/sh64/dl-debug.h b/ldso/ldso/sh64/dl-debug.h deleted file mode 100644 index 485aac7fa..000000000 --- a/ldso/ldso/sh64/dl-debug.h +++ /dev/null @@ -1,79 +0,0 @@ -/* vi: set sw=8 ts=8: */ -/* - * ldso/ldso/sh64/elfinterp.c - * - * SuperH (sh64) ELF shared library loader suppport - * - * Copyright (C) 2003, 2004, 2005 Paul Mundt <lethal@linux-sh.org> - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. The name of the above contributors may not be - * used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -static const char *_dl_reltypes_tab[] = { - /* SHcompact relocs */ - [0] = "R_SH_NONE", "R_SH_DIR32", - "R_SH_REL32", "R_SH_DIR8WPN", - [4] = "R_SH_IND12W", "R_SH_DIR8WPL", - "R_SH_DIR8WPZ", "R_SH_DIR8BP", - [8] = "R_SH_DIR8W", "R_SH_DIR8L", - [25] = "R_SH_SWITCH16", "R_SH_SWITCH32", - "R_SH_USES", "R_SH_COUNT", - [29] = "R_SH_ALIGN", "R_SH_CODE", - "R_SH_DATA", "R_SH_LABEL", - [33] = "R_SH_SWITCH8", "R_SH_GNU_VTINHERIT", - "R_SH_GNU_VTENTRY", - [160] = "R_SH_GOT32", "R_SH_PLT32", - "R_SH_COPY", "R_SH_GLOB_DAT", - [164] = "R_SH_JMP_SLOT", "R_SH_RELATIVE", - "R_SH_GOTOFF", "R_SH_GOTPC", - - /* SHmedia relocs */ - [45] = "R_SH_DIR5U", "R_SH_DIR6U", - "R_SH_DIR6S", "R_SH_DIR10S", - [49] = "R_SH_DIR10SW", "R_SH_DIR10SL", - "R_SH_DIR10SQ", - [169] = "R_SH_GOT_LOW16", "R_SH_GOT_MEDLOW16", - "R_SH_GOT_MEDHI16", "R_SH_GOT_HI16", - [173] = "R_SH_GOTPLT_LOW16", "R_SH_GOTPLT_MEDLOW16", - "R_SH_GOTPLT_MEDHI16", "R_SH_GOTPLT_HI16", - [177] = "R_SH_PLT_LOW16", "R_SH_PLT_MEDLOW16", - "R_SH_PLT_MEDHI16", "R_SH_PLT_HI16", - [181] = "R_SH_GOTOFF_LOW16", "R_SH_GOTOFF_MEDLOW16", - "R_SH_GOTOFF_MEDHI16", "R_SH_GOTOFF_HI16", - [185] = "R_SH_GOTPC_LOW16", "R_SH_GOTPC_MEDLOW16", - "R_SH_GOTPC_MEDHI16", "R_SH_GOTPC_HI16", - [189] = "R_SH_GOT10BY4", "R_SH_GOTPLT10BY4", - "R_SH_GOT10BY8", "R_SH_GOTPLT10BY8", - [193] = "R_SH_COPY64", "R_SH_GLOB_DAT64", - "R_SH_JMP_SLOT64", "R_SH_RELATIVE64", - [197] = "R_SH_RELATIVE_LOW16", "R_SH_RELATIVE_MEDLOW16", - "R_SH_RELATIVE_MEDHI16","R_SH_RELATIVE_HI16", - [242] = "R_SH_SHMEDIA_CODE", "R_SH_PT_16", - "R_SH_IMMS16", "R_SH_IMMU16", - [246] = "R_SH_IMM_LOW16", "R_SH_IMM_LOW16_PCREL", - "R_SH_IMM_MEDLOW16", "R_SH_IMM_MEDLOW16_PCREL", - [250] = "R_SH_IMM_MEDHI16", "R_SH_IMM_MEDHI16_PCREL", - "R_SH_IMM_HI16", "R_SH_IMM_HI16_PCREL", - [254] = "R_SH_64", "R_SH_64_PCREL", -}; diff --git a/ldso/ldso/sh64/dl-startup.h b/ldso/ldso/sh64/dl-startup.h deleted file mode 100644 index 33512a9bc..000000000 --- a/ldso/ldso/sh64/dl-startup.h +++ /dev/null @@ -1,117 +0,0 @@ -/* Any assembly language/system dependent hacks needed to setup boot1.c so it - * will work as expected and cope with whatever platform specific wierdness is - * needed for this architecture. - */ - -__asm__("" \ -" .section .text..SHmedia32,\"ax\"\n" \ -" .globl _start\n" \ -" .type _start, @function\n" \ -" .align 5\n" \ -"_start:\n" \ -" ! Set r12 to point to GOT\n" \ -" movi (((datalabel _GLOBAL_OFFSET_TABLE_-(.LZZZ3-.)) >> 16) & 0xffff), r12\n" \ -" shori ((datalabel _GLOBAL_OFFSET_TABLE_-(.LZZZ3-.)) & 0xffff), r12\n" \ -".LZZZ3:\n" \ -" ptrel/u r12, tr0\n" \ -" gettr tr0, r12 ! GOT address\n" \ -" add r18, r63, r11 ! save return address - needed?\n" \ -" add r15, r63, r2 ! arg = stack pointer\n" \ -" pt _dl_start, tr0 ! should work even if PIC\n" \ -" blink tr0, r18 ! call _dl_start - user EP is in r2\n" \ -" add r2, r63, r28\n" \ -" movi (((_dl_fini@GOT) >> 16) & 0xffff), r1\n" \ -" shori ((_dl_fini@GOT) & 0xffff), r1\n" \ -" ldx.l r1, r12, r2\n" \ -" add r11, r63, r18\n" \ -" ptabs/l r28, tr0\n" \ -" blink tr0, r63\n" \ -" .size _start,.-_start\n" -" .previous\n" -); - -/* - * Get a pointer to the argv array. On many platforms this can be just - * the address if the first argument, on other platforms we need to - * do something a little more subtle here. - */ -#define GET_ARGV(ARGVP, ARGS) ARGVP = (((unsigned long *)ARGS)+1) - -/* - * Here is a macro to perform a relocation. This is only used when - * bootstrapping the dynamic loader. RELP is the relocation that we - * are performing, REL is the pointer to the address we are relocating. - * SYMBOL is the symbol involved in the relocation, and LOAD is the - * load address. - */ - -#include <elf.h> - -#define PERFORM_BOOTSTRAP_RELOC(RELP,REL,SYMBOL,LOAD,SYMTAB) \ - const unsigned int r_type = ELF32_R_TYPE((RELP)->r_info); \ - int lsb = !!((SYMTAB)->st_other & STO_SH5_ISA32); \ - \ - switch (r_type) { \ - case R_SH_REL32: \ - *(REL) = (SYMBOL) + (RELP)->r_addend \ - - (unsigned long)(REL); \ - break; \ - case R_SH_DIR32: \ - case R_SH_GLOB_DAT: \ - case R_SH_JMP_SLOT: \ - *(REL) = ((SYMBOL) + (RELP)->r_addend) | lsb; \ - break; \ - case R_SH_RELATIVE: \ - *(REL) = (LOAD) + (RELP)->r_addend; \ - break; \ - case R_SH_RELATIVE_LOW16: \ - case R_SH_RELATIVE_MEDLOW16: \ - { \ - unsigned long word, value; \ - \ - word = (unsigned long)(REL) & ~0x3fffc00; \ - value = (LOAD) + (RELP)->r_addend; \ - \ - if (r_type == R_SH_RELATIVE_MEDLOW16) \ - value >>= 16; \ - \ - word |= (value & 0xffff) << 10; \ - *(REL) = word; \ - break; \ - } \ - case R_SH_IMM_LOW16: \ - case R_SH_IMM_MEDLOW16: \ - { \ - unsigned long word, value; \ - \ - word = (unsigned long)(REL) & ~0x3fffc00; \ - value = ((SYMBOL) + (RELP)->r_addend) | lsb; \ - \ - if (r_type == R_SH_IMM_MEDLOW16) \ - value >>= 16; \ - \ - word |= (value & 0xffff) << 10; \ - *(REL) = word; \ - break; \ - } \ - case R_SH_IMM_LOW16_PCREL: \ - case R_SH_IMM_MEDLOW16_PCREL: \ - { \ - unsigned long word, value; \ - \ - word = (unsigned long)(REL) & ~0x3fffc00; \ - value = (SYMBOL) + (RELP)->r_addend \ - - (unsigned long)(REL); \ - \ - if (r_type == R_SH_IMM_MEDLOW16_PCREL) \ - value >>= 16; \ - \ - word |= (value & 0xffff) << 10; \ - *(REL) = word; \ - break; \ - } \ - case R_SH_NONE: \ - break; \ - default: \ - _dl_exit(1); \ - } diff --git a/ldso/ldso/sh64/dl-syscalls.h b/ldso/ldso/sh64/dl-syscalls.h deleted file mode 100644 index 4fe50fac4..000000000 --- a/ldso/ldso/sh64/dl-syscalls.h +++ /dev/null @@ -1,25 +0,0 @@ -/* We can't use the real errno in ldso, since it has not yet - * been dynamicly linked in yet. */ -#include "sys/syscall.h" -extern int _dl_errno; -#undef __set_errno -#define __set_errno(X) {(_dl_errno) = (X);} - -#undef __syscall_return -#define __syscall_return(type, res) \ -do { \ - /* \ - * Note: when returning from kernel the return value is in r9 \ - * \ - * This prevents conflicts between return value and arg1 \ - * when dispatching signal handler, in other words makes \ - * life easier in the system call epilogue (see entry.S) \ - */ \ - register unsigned long __sr2 __asm__ ("r2") = res; \ - if ((unsigned long)(res) >= (unsigned long)(-125)) { \ - _dl_errno = -(res); \ - __sr2 = -1; \ - } \ - return (type)(__sr2); \ -} while (0) - diff --git a/ldso/ldso/sh64/dl-sysdep.h b/ldso/ldso/sh64/dl-sysdep.h deleted file mode 100644 index 21bfffcb6..000000000 --- a/ldso/ldso/sh64/dl-sysdep.h +++ /dev/null @@ -1,169 +0,0 @@ -/* vi: set sw=8 ts=8: */ -/* - * Various assembly language/system dependent hacks that are required - * so that we can minimize the amount of platform specific code. - */ - -/* Define this if the system uses RELOCA. */ -#define ELF_USES_RELOCA -#include <elf.h> -/* - * Initialization sequence for a GOT. - */ -#define INIT_GOT(GOT_BASE,MODULE) \ -{ \ - GOT_BASE[2] = (unsigned long)_dl_linux_resolve; \ - GOT_BASE[1] = (unsigned long)(MODULE); \ -} - -/* Here we define the magic numbers that this dynamic loader should accept */ -#define MAGIC1 EM_SH -#undef MAGIC2 -/* Used for error messages */ -#define ELF_TARGET "sh64" - -struct elf_resolve; -extern unsigned long _dl_linux_resolver(struct elf_resolve * tpnt, int reloc_entry); - -/* 4096 bytes alignment */ -#define PAGE_ALIGN 0xfffff000 -#define ADDR_ALIGN 0xfff -#define OFFS_ALIGN 0x7ffff000 - -/* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry or - TLS variable, so undefined references should not be allowed to - define the value. - ELF_RTYPE_CLASS_NOCOPY iff TYPE should not be allowed to resolve to one - of the main executable's symbols, as for a COPY reloc. */ -#define elf_machine_type_class(type) \ - ((((type) == R_SH_JMP_SLOT) * ELF_RTYPE_CLASS_PLT) \ - | (((type) == R_SH_COPY) * ELF_RTYPE_CLASS_COPY)) - -/* Return the link-time address of _DYNAMIC. Conveniently, this is the - first element of the GOT. This must be inlined in a function which - uses global data. */ -static __inline__ Elf32_Addr elf_machine_dynamic(void) -{ - register Elf32_Addr *got; - - /* - * The toolchain adds 32768 to the GOT address, we compensate for - * that in the movi/sub pair. - * - * XXX: If this is cleaned up in the toolchain, we can end up - * saving 2 instructions and subsequently free up r1 from the - * clobber list.. - */ - __asm__ ( - "movi\t(((datalabel _GLOBAL_OFFSET_TABLE_-(.LZZZ1-.)) >> 16) & 0xffff), r2\n\t" - "shori\t((datalabel _GLOBAL_OFFSET_TABLE_-(.LZZZ1-.)) & 0xffff), r2\n\t" - ".LZZZ1:\tptrel/u r2, tr0\n\t" - "movi\t32768, r1\n\t" - "gettr\ttr0, r2\n\t" - "sub\tr2, r1, %0\n\t" - : "=r" (got) - : /* no inputs */ - : "r1", "r2", "tr0" - ); - - return *got; -} - -/* Return the run-time load address of the shared object. */ -static __inline__ Elf32_Addr elf_machine_load_address(void) -{ - Elf32_Addr addr; - - __asm__ ( - "movi\t(((datalabel _GLOBAL_OFFSET_TABLE_-(.LZZZ2-.)) >> 16) & 0xffff), r0\n\t" - "shori\t((datalabel _GLOBAL_OFFSET_TABLE_-(.LZZZ2-.)) & 0xffff), r0\n\t" - ".LZZZ2:\tptrel/u r0, tr0\n\t" - "movi\t(((_dl_start@GOTOFF) >> 16) & 0xffff), r2\n\t" - "shori\t((_dl_start@GOTOFF) & 0xffff), r2\n\t" - "gettr\ttr0, r0\n\t" - "add\tr2, r0, r2\n\t" - "movi\t(((_dl_start@GOT) >> 16) & 0xffff), r1\n\t" - "shori\t((_dl_start@GOT) & 0xffff), r1\n\t" - "ldx.l\tr1, r0, r1\n\t" - "sub\tr2, r1, %0\n\t" - : "=r" (addr) - : /* no inputs */ - : "r0", "r1", "r2", "tr0" - ); - - return addr; -} - -/* - * XXX: As we don't need to worry about r25 clobbering, we could probably - * get away with inlining {st,ld}{x,}.l and friends here instead and - * forego gcc's idea of code generation. - */ -#define COPY_UNALIGNED_WORD(swp, twp, align) \ -{ \ - void *__s = (swp), *__t = (twp); \ - unsigned char *__s1 = __s, *__t1 = __t; \ - unsigned short *__s2 = __s, *__t2 = __t; \ - unsigned long *__s4 = __s, *__t4 = __t; \ - \ - switch ((align)) { \ - case 0: \ - *__t4 = *__s4; \ - break; \ - case 2: \ - *__t2++ = *__s2++; \ - *__t2 = *__s2; \ - break; \ - default: \ - *__t1++ = *__s1++; \ - *__t1++ = *__s1++; \ - *__t1++ = *__s1++; \ - *__t1 = *__s1; \ - break; \ - } \ -} - -static __inline__ void -elf_machine_relative(Elf32_Addr load_off, const Elf32_Addr rel_addr, - Elf32_Word relative_count) -{ - Elf32_Addr value, word; - Elf32_Rela *rpnt = (void *)rel_addr; - int reloc_type = ELF32_R_TYPE(rpnt->r_info); - - do { - Elf32_Addr *const reloc_addr = - (void *)(load_off + rpnt->r_offset); - int align = (int)reloc_addr & 3; - - switch (reloc_type) { - case R_SH_RELATIVE_LOW16: - COPY_UNALIGNED_WORD(reloc_addr, &word, align); - word &= ~0x3fffc00; - value = (rpnt->r_addend + load_off); - word |= (value & 0xffff) << 10; - COPY_UNALIGNED_WORD(&word, reloc_addr, align); - break; - case R_SH_RELATIVE_MEDLOW16: - COPY_UNALIGNED_WORD(reloc_addr, &word, align); - word &= ~0x3fffc00; - value = (rpnt->r_addend + load_off) >> 16; - word |= (value & 0xffff) << 10; - COPY_UNALIGNED_WORD(&word, reloc_addr, align); - break; - default: - if (rpnt->r_addend) { - value = load_off + rpnt->r_addend; - } else { - COPY_UNALIGNED_WORD(reloc_addr, &value, align); - value += load_off; - } - - COPY_UNALIGNED_WORD(&value, reloc_addr, align); - break; - } - - rpnt++; - } while (--relative_count); -#undef COPY_UNALIGNED_WORD -} diff --git a/ldso/ldso/sh64/elfinterp.c b/ldso/ldso/sh64/elfinterp.c deleted file mode 100644 index 845ff4fd0..000000000 --- a/ldso/ldso/sh64/elfinterp.c +++ /dev/null @@ -1,346 +0,0 @@ -/* vi: set sw=8 ts=8: */ -/* - * ldso/ldso/sh64/elfinterp.c - * - * SuperH (sh64) ELF shared library loader suppport - * - * Copyright (C) 2003, 2004, 2005 Paul Mundt <lethal@linux-sh.org> - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. The name of the above contributors may not be - * used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -/* Program to load an ELF binary on a linux system, and run it. - References to symbols in sharable libraries can be resolved by either - an ELF sharable library or a linux style of shared library. */ - -/* Disclaimer: I have never seen any AT&T source code for SVr4, nor have - I ever taken any courses on internals. This program was developed using - information available through the book "UNIX SYSTEM V RELEASE 4, - Programmers guide: Ansi C and Programming Support Tools", which did - a more than adequate job of explaining everything required to get this - working. */ - -#include "ldso.h" - -extern int _dl_linux_resolve(void); - -unsigned long _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry) -{ - int reloc_type; - ELF_RELOC *this_reloc; - char *strtab; - Elf32_Sym *symtab; - int symtab_index; - char *rel_addr; - char *new_addr; - char **got_addr; - unsigned long instr_addr; - char *symname; - - rel_addr = (char *)tpnt->dynamic_info[DT_JMPREL]; - - this_reloc = (ELF_RELOC *)(intptr_t)(rel_addr + reloc_entry); - reloc_type = ELF32_R_TYPE(this_reloc->r_info); - symtab_index = ELF32_R_SYM(this_reloc->r_info); - - symtab = (Elf32_Sym *)(intptr_t)tpnt->dynamic_info[DT_SYMTAB]; - strtab = (char *)tpnt->dynamic_info[DT_STRTAB]; - symname = strtab + symtab[symtab_index].st_name; - - if (unlikely(reloc_type != R_SH_JMP_SLOT)) { - _dl_dprintf(2, "%s: Incorrect relocation type in jump reloc\n", - _dl_progname); - _dl_exit(1); - } - - /* Address of jump instruction to fix up */ - instr_addr = ((unsigned long)this_reloc->r_offset + - (unsigned long)tpnt->loadaddr); - got_addr = (char **)instr_addr; - - - /* Get the address of the GOT entry */ - new_addr = _dl_find_hash(symname, tpnt->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT); - if (unlikely(!new_addr)) { - _dl_dprintf(2, "%s: can't resolve symbol '%s'\n", - _dl_progname, symname); - _dl_exit(1); - } - -#ifdef __SUPPORT_LD_DEBUG__ - if ((unsigned long)got_addr < 0x20000000) { - if (_dl_debug_bindings) { - _dl_dprintf(_dl_debug_file, "\nresolve function: %s", - symname); - - if (_dl_debug_detail) - _dl_dprintf(_dl_debug_file, - "\n\tpatched %x ==> %x @ %x\n", - *got_addr, new_addr, got_addr); - } - } - - if (!_dl_debug_nofixups) - *got_addr = new_addr; -#else - *got_addr = new_addr; -#endif - - return (unsigned long)new_addr; -} - -static int _dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, - unsigned long rel_addr, unsigned long rel_size, - int (*reloc_fnc)(struct elf_resolve *tpnt, - struct dyn_elf *scope, - ELF_RELOC *rpnt, Elf32_Sym *symtab, - char *strtab)) -{ - unsigned int i; - char *strtab; - Elf32_Sym *symtab; - ELF_RELOC *rpnt; - int symtab_index; - - /* Now parse the relocation information */ - rpnt = (ELF_RELOC *)(intptr_t)rel_addr; - rel_size = rel_size / sizeof(ELF_RELOC); - - symtab = (Elf32_Sym *)(intptr_t)tpnt->dynamic_info[DT_SYMTAB]; - strtab = (char *)tpnt->dynamic_info[DT_STRTAB]; - - for (i = 0; i < rel_size; i++, rpnt++) { - int res; - - symtab_index = ELF32_R_SYM(rpnt->r_info); - debug_sym(symtab,strtab,symtab_index); - debug_reloc(symtab,strtab,rpnt); - - res = reloc_fnc (tpnt, scope, rpnt, symtab, strtab); - if (res == 0) - continue; - - _dl_dprintf(2, "\n%s: ",_dl_progname); - - if (symtab_index) - _dl_dprintf(2, "symbol '%s': ", - strtab + symtab[symtab_index].st_name); - - if (unlikely(res < 0)) { - int reloc_type = ELF32_R_TYPE(rpnt->r_info); - - _dl_dprintf(2, "can't handle reloc type " -#ifdef __SUPPORT_LD_DEBUG__ - "%s\n", _dl_reltypes(reloc_type) -#else - "%x\n", reloc_type -#endif - ); - - _dl_exit(-res); - } - if (unlikely(res > 0)) { - _dl_dprintf(2, "can't resolve symbol\n"); - - return res; - } - } - - return 0; -} - -static int _dl_do_reloc(struct elf_resolve *tpnt,struct dyn_elf *scope, - ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab) -{ - int reloc_type; - int symtab_index, lsb; - char *symname; - unsigned long *reloc_addr; - unsigned long symbol_addr; -#ifdef __SUPPORT_LD_DEBUG__ - unsigned long old_val; -#endif - - reloc_type = ELF32_R_TYPE(rpnt->r_info); - symtab_index = ELF32_R_SYM(rpnt->r_info); - symbol_addr = 0; - lsb = !!(symtab[symtab_index].st_other & STO_SH5_ISA32); - symname = strtab + symtab[symtab_index].st_name; - reloc_addr = (unsigned long *)(intptr_t) - (tpnt->loadaddr + (unsigned long)rpnt->r_offset); - - if (symtab_index) { - int stb; - - symbol_addr = (unsigned long)_dl_find_hash(symname, scope, tpnt, - elf_machine_type_class(reloc_type)); - - /* - * We want to allow undefined references to weak symbols - this - * might have been intentional. We should not be linking local - * symbols here, so all bases should be covered. - */ - stb = ELF32_ST_BIND(symtab[symtab_index].st_info); - - if (stb != STB_WEAK && !symbol_addr) { - _dl_dprintf (2, "%s: can't resolve symbol '%s'\n", - _dl_progname, strtab + symtab[symtab_index].st_name); - _dl_exit (1); - } - } - -#ifdef __SUPPORT_LD_DEBUG__ - old_val = *reloc_addr; -#endif - - switch (reloc_type) { - case R_SH_NONE: - break; - case R_SH_COPY: - _dl_memcpy((char *)reloc_addr, - (char *)symbol_addr, symtab[symtab_index].st_size); - break; - case R_SH_DIR32: - case R_SH_GLOB_DAT: - case R_SH_JMP_SLOT: - *reloc_addr = (symbol_addr + rpnt->r_addend) | lsb; - break; - case R_SH_REL32: - *reloc_addr = symbol_addr + rpnt->r_addend - - (unsigned long)reloc_addr; - break; - case R_SH_RELATIVE: - *reloc_addr = (unsigned long)tpnt->loadaddr + rpnt->r_addend; - break; - case R_SH_RELATIVE_LOW16: - case R_SH_RELATIVE_MEDLOW16: - { - unsigned long word, value; - - word = (unsigned long)reloc_addr & ~0x3fffc00; - value = (unsigned long)tpnt->loadaddr + rpnt->r_addend; - - if (reloc_type == R_SH_RELATIVE_MEDLOW16) - value >>= 16; - - word |= (value & 0xffff) << 10; - *reloc_addr = word; - - break; - } - case R_SH_IMM_LOW16: - case R_SH_IMM_MEDLOW16: - { - unsigned long word, value; - - word = (unsigned long)reloc_addr & ~0x3fffc00; - value = (symbol_addr + rpnt->r_addend) | lsb; - - if (reloc_type == R_SH_IMM_MEDLOW16) - value >>= 16; - - word |= (value & 0xffff) << 10; - *reloc_addr = word; - - break; - } - case R_SH_IMM_LOW16_PCREL: - case R_SH_IMM_MEDLOW16_PCREL: - { - unsigned long word, value; - - word = (unsigned long)reloc_addr & ~0x3fffc00; - value = symbol_addr + rpnt->r_addend - - (unsigned long)reloc_addr; - - if (reloc_type == R_SH_IMM_MEDLOW16_PCREL) - value >>= 16; - - word |= (value & 0xffff) << 10; - *reloc_addr = word; - - break; - } - default: - return -1; /*call _dl_exit(1) */ - } - -#ifdef __SUPPORT_LD_DEBUG__ - if (_dl_debug_reloc && _dl_debug_detail) - _dl_dprintf(_dl_debug_file, "\tpatched: %x ==> %x @ %x", - old_val, *reloc_addr, reloc_addr); -#endif - - return 0; -} - -static int _dl_do_lazy_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, - ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab) -{ - int reloc_type, symtab_index, lsb; - unsigned long *reloc_addr; -#ifdef __SUPPORT_LD_DEBUG__ - unsigned long old_val; -#endif - - reloc_type = ELF32_R_TYPE(rpnt->r_info); - symtab_index = ELF32_R_SYM(rpnt->r_info); - lsb = !!(symtab[symtab_index].st_other & STO_SH5_ISA32); - reloc_addr = (unsigned long *)(intptr_t) - (tpnt->loadaddr + (unsigned long)rpnt->r_offset); - -#ifdef __SUPPORT_LD_DEBUG__ - old_val = *reloc_addr; -#endif - - switch (reloc_type) { - case R_SH_NONE: - break; - case R_SH_JMP_SLOT: - *reloc_addr += (unsigned long)tpnt->loadaddr | lsb; - break; - default: - return -1; /*call _dl_exit(1) */ - } - -#ifdef __SUPPORT_LD_DEBUG__ - if (_dl_debug_reloc && _dl_debug_detail) - _dl_dprintf(_dl_debug_file, "\tpatched: %x ==> %x @ %x", - old_val, *reloc_addr, reloc_addr); -#endif - - return 0; -} - -void _dl_parse_lazy_relocation_information(struct dyn_elf *rpnt, - unsigned long rel_addr, unsigned long rel_size) -{ - (void)_dl_parse(rpnt->dyn, NULL, rel_addr, rel_size, _dl_do_lazy_reloc); -} - -int _dl_parse_relocation_information(struct dyn_elf *rpnt, - unsigned long rel_addr, unsigned long rel_size) -{ - return _dl_parse(rpnt->dyn, rpnt->dyn->symbol_scope, rel_addr, rel_size, _dl_do_reloc); -} diff --git a/ldso/ldso/sh64/resolve.S b/ldso/ldso/sh64/resolve.S deleted file mode 100644 index ca915d2ef..000000000 --- a/ldso/ldso/sh64/resolve.S +++ /dev/null @@ -1,95 +0,0 @@ -/* vi: set sw=8 ts=8: */ -/* - * ldso/ldso/sh64/resolve.S - * - * SuperH (sh64) dynamic resolver support - * - * Copyright (C) 2003 Paul Mundt <lethal@linux-sh.org> - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. The name of the above contributors may not be - * used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - - .section .text..SHmedia32,"ax" - .globl _dl_linux_resolver - .globl _dl_linux_resolve - .type _dl_linux_resolve, @function - - .balign 16 -_dl_linux_resolve: - addi r15, -72, r15 ! make room on the stack - pt _dl_linux_resolver, tr0 - st.q r15, 0, r2 ! save regs - st.q r15, 8, r3 - st.q r15, 16, r4 - st.q r15, 24, r5 - st.q r15, 32, r6 - st.q r15, 40, r7 - st.q r15, 48, r8 - st.q r15, 56, r9 - st.q r15, 64, r18 - -#ifdef HAVE_FPU - addi r15, -48, r15 ! make room for FP regs - fst.d r15, 0, dr0 ! save FP regs - fst.d r15, 8, dr2 - fst.d r15, 16, dr4 - fst.d r15, 24, dr6 - fst.d r15, 32, dr8 - fst.d r15, 40, dr10 -#endif - - /* - * Args for _dl_linux_resolver(), set in r17/r21 by PLT code - */ - - add r17, r63, r2 ! link map address - add r21, r63, r3 ! GOT offset - blink tr0, r18 ! call _dl_linux_resolver() - ptabs/l r2, tr0 ! save result = addr of function called - -#ifdef HAVE_FPU - fld.d r15, 0, dr0 ! restore FP regs - fld.d r15, 8, dr2 - fld.d r15, 16, dr4 - fld.d r15, 24, dr6 - fld.d r15, 32, dr8 - fld.d r15, 40, dr10 - addi r15, 48, r15 -#endif - - ld.q r15, 0, r2 ! restore regs - ld.q r15, 8, r3 - ld.q r15, 16, r4 - ld.q r15, 24, r5 - ld.q r15, 32, r6 - ld.q r15, 40, r7 - ld.q r15, 48, r8 - ld.q r15, 56, r9 - ld.q r15, 64, r18 - - addi r15, 72, r15 - blink tr0, r63 ! jump to function address - - .size _dl_linux_resolve, . - _dl_linux_resolve - diff --git a/ldso/ldso/sparc/dl-debug.h b/ldso/ldso/sparc/dl-debug.h index 1249f7798..5c62cefad 100644 --- a/ldso/ldso/sparc/dl-debug.h +++ b/ldso/ldso/sparc/dl-debug.h @@ -28,7 +28,7 @@ * SUCH DAMAGE. */ -static const char * _dl_reltypes_tab[] = { +static const char * const _dl_reltypes_tab[] = { "R_SPARC_NONE", "R_SPARC_8", "R_SPARC_16", "R_SPARC_32", "R_SPARC_DISP8", "R_SPARC_DISP16", "R_SPARC_DISP32", "R_SPARC_WDISP30", "R_SPARC_WDISP22", diff --git a/ldso/ldso/sparc/dl-startup.h b/ldso/ldso/sparc/dl-startup.h index cc11ec103..8c8c2c4f3 100644 --- a/ldso/ldso/sparc/dl-startup.h +++ b/ldso/ldso/sparc/dl-startup.h @@ -8,6 +8,7 @@ __asm__ ("\ .text\n\ .global _start\n\ .type _start,%function\n\ + .hidden _start\n\ .align 32\n\ .register %g2, #scratch\n\ _start:\n\ @@ -47,7 +48,7 @@ _dl_start_user:\n\ /* * Get a pointer to the argv array. On many platforms this can be just - * the address if the first argument, on other platforms we need to + * the address of the first argument, on other platforms we need to * do something a little more subtle here. We assume that argc is stored * at the word just below the argvp that we return here. */ diff --git a/ldso/ldso/sparc/dl-syscalls.h b/ldso/ldso/sparc/dl-syscalls.h index 996bb87c6..f40c4fd31 100644 --- a/ldso/ldso/sparc/dl-syscalls.h +++ b/ldso/ldso/sparc/dl-syscalls.h @@ -1,6 +1 @@ -/* We can't use the real errno in ldso, since it has not yet - * been dynamicly linked in yet. */ -#include "sys/syscall.h" -extern int _dl_errno; -#undef __set_errno -#define __set_errno(X) {(_dl_errno) = (X);} +/* stub for arch-specific syscall issues */ diff --git a/ldso/ldso/sparc/dl-sysdep.h b/ldso/ldso/sparc/dl-sysdep.h index 27cb97e38..d35a39147 100644 --- a/ldso/ldso/sparc/dl-sysdep.h +++ b/ldso/ldso/sparc/dl-sysdep.h @@ -29,7 +29,7 @@ /* Here we define the magic numbers that this dynamic loader should accept * Note that SPARCV9 doesn't use EM_SPARCV9 since the userland is still 32-bit. */ -#if defined(__sparc_v9__) || defined(__sparc_v8__) +#if defined(__sparc_v9__) #define MAGIC1 EM_SPARC32PLUS #else #define MAGIC1 EM_SPARC @@ -40,6 +40,9 @@ /* Used for error messages */ #define ELF_TARGET "sparc" +/* Need bootstrap relocations */ +#define ARCH_NEEDS_BOOTSTRAP_RELOCS + struct elf_resolve; unsigned long _dl_linux_resolver(struct elf_resolve * tpnt, int reloc_entry); @@ -49,7 +52,7 @@ unsigned long _dl_linux_resolver(struct elf_resolve * tpnt, int reloc_entry); #ifndef COMPILE_ASM /* Cheap modulo implementation, taken from arm/ld_sysdep.h. */ -static __inline__ unsigned long +static __always_inline unsigned long sparc_mod(unsigned long m, unsigned long p) { unsigned long i, t, inc; @@ -89,24 +92,14 @@ sparc_mod(unsigned long m, unsigned long p) #define do_rem(result, n, base) ((result) = sparc_mod(n, base)) #endif -/* 4096 bytes alignment */ -#if defined(__sparc_v9__) -/* ...but 8192 is required for mmap() on sparc64 kernel */ -#define PAGE_ALIGN 0xffffe000 -#define ADDR_ALIGN 0x1fff -#define OFFS_ALIGN 0x7fffe000 -#elif defined(__sparc_v8__) -#define PAGE_ALIGN 0xfffff000 -#define ADDR_ALIGN 0xfff -#define OFFS_ALIGN 0x7ffff000 -#endif - /* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry, so PLT entries should not be allowed to define the value. ELF_RTYPE_CLASS_NOCOPY iff TYPE should not be allowed to resolve to one of the main executable's symbols, as for a COPY reloc. */ #define elf_machine_type_class(type) \ - ((((type) == R_SPARC_JMP_SLOT) * ELF_RTYPE_CLASS_PLT) \ + ((((type) == R_SPARC_JMP_SLOT || (type) == R_SPARC_TLS_DTPMOD32 \ + || (type) == R_SPARC_TLS_DTPOFF32 || (type) == R_SPARC_TLS_TPOFF32) \ + * ELF_RTYPE_CLASS_PLT) \ | (((type) == R_SPARC_COPY) * ELF_RTYPE_CLASS_COPY)) /* The SPARC overlaps DT_RELA and DT_PLTREL. */ @@ -127,7 +120,7 @@ do { register Elf32_Addr pc __asm__("o7"); \ /* Return the link-time address of _DYNAMIC. Conveniently, this is the first element of the GOT. This must be inlined in a function which uses global data. */ -static __inline__ Elf32_Addr +static __always_inline Elf32_Addr elf_machine_dynamic (void) { register Elf32_Addr *got __asm__ ("%l7"); @@ -138,7 +131,7 @@ elf_machine_dynamic (void) } /* Return the run-time load address of the shared object. */ -static __inline__ Elf32_Addr +static __always_inline Elf32_Addr elf_machine_load_address (void) { register Elf32_Addr *pc __asm__ ("%o7"), *got __asm__ ("%l7"); @@ -157,7 +150,7 @@ elf_machine_load_address (void) return (Elf32_Addr) got - *got + (pc[2] - pc[3]) * 4 - 4; } -static __inline__ void +static __always_inline void elf_machine_relative (Elf32_Addr load_off, const Elf32_Addr rel_addr, Elf32_Word relative_count) { diff --git a/ldso/ldso/sparc/elfinterp.c b/ldso/ldso/sparc/elfinterp.c index 9b425fcee..bb61be9eb 100644 --- a/ldso/ldso/sparc/elfinterp.c +++ b/ldso/ldso/sparc/elfinterp.c @@ -52,7 +52,6 @@ extern int _dl_linux_resolve(void); unsigned long _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry) { - int reloc_type; ELF_RELOC *this_reloc; char *strtab; ElfW(Sym) *symtab; @@ -70,25 +69,18 @@ _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry) reloc_entry = (reloc_entry >> 10) - 0xc; this_reloc = (ELF_RELOC *)(rel_addr + reloc_entry); - reloc_type = ELF_R_TYPE(this_reloc->r_info); symtab_index = ELF_R_SYM(this_reloc->r_info); symtab = (ElfW(Sym) *)tpnt->dynamic_info[DT_SYMTAB]; strtab = (char *)tpnt->dynamic_info[DT_STRTAB]; symname = strtab + symtab[symtab_index].st_name; - if (unlikely(reloc_type != R_SPARC_JMP_SLOT)) { - _dl_dprintf(2, "%s: Incorrect relocation type in jump relocations\n", - _dl_progname); - _dl_exit(1); - } - /* Address of the jump instruction to fix up. */ instr_addr = (this_reloc->r_offset + tpnt->loadaddr); got_addr = (char **)instr_addr; /* Get the address of the GOT entry */ - new_addr = _dl_find_hash(symname, tpnt->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT); + new_addr = _dl_find_hash(symname, &_dl_loaded_modules->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT, NULL); if (unlikely(!new_addr)) { _dl_dprintf(2, "%s: Can't resolve symbol '%s'\n", _dl_progname, symname); _dl_exit(1); @@ -107,17 +99,17 @@ _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry) if (!_dl_debug_nofixups) #endif { - got_addr[1] = (char *) (0x03000000 | (((unsigned int) new_addr >> 10) & 0x3fffff)); - got_addr[2] = (char *) (0x81c06000 | ((unsigned int) new_addr & 0x3ff)); + got_addr[1] = (char *) (OPCODE_SETHI_G1 | (((unsigned int) new_addr >> 10) & 0x3fffff)); + got_addr[2] = (char *) (OPCODE_JMP_G1 | ((unsigned int) new_addr & 0x3ff)); } return (unsigned long)new_addr; } static int -_dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, +_dl_parse(struct elf_resolve *tpnt, struct r_scope_elem *scope, unsigned long rel_addr, unsigned long rel_size, - int (*reloc_fnc)(struct elf_resolve *tpnt, struct dyn_elf *scope, + int (*reloc_fnc)(struct elf_resolve *tpnt, struct r_scope_elem *scope, ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab)) { unsigned int i; @@ -172,13 +164,14 @@ _dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, } static int -_dl_do_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, +_dl_do_reloc(struct elf_resolve *tpnt, struct r_scope_elem *scope, ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab) { int reloc_type; int symtab_index; char *symname; - ElfW(Sym) *sym; + struct elf_resolve *tls_tpnt = NULL; + struct symbol_ref sym_ref; ElfW(Addr) *reloc_addr; ElfW(Addr) symbol_addr; #if defined (__SUPPORT_LD_DEBUG__) @@ -188,22 +181,36 @@ _dl_do_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, reloc_addr = (ElfW(Addr)*)(tpnt->loadaddr + (unsigned long)rpnt->r_offset); reloc_type = ELF_R_TYPE(rpnt->r_info); symtab_index = ELF_R_SYM(rpnt->r_info); - sym = &symtab[symtab_index]; + sym_ref.sym = &symtab[symtab_index]; + sym_ref.tpnt = NULL; symbol_addr = 0; - symname = strtab + sym->st_name; + symname = strtab + sym_ref.sym->st_name; if (symtab_index) { symbol_addr = (ElfW(Addr))_dl_find_hash(symname, scope, tpnt, - elf_machine_type_class(reloc_type)); + elf_machine_type_class(reloc_type), &sym_ref); /* * We want to allow undefined references to weak symbols - this * might have been intentional. We should not be linking local * symbols here, so all bases should be covered. */ - if (unlikely(!symbol_addr && ELF_ST_BIND(sym->st_info) != STB_WEAK)) { - _dl_dprintf(2, "%s: can't resolve symbol '%s'\n", _dl_progname, symname); - _dl_exit(1); + if (unlikely(!symbol_addr && (ELF_ST_TYPE(sym_ref.sym->st_info) != STT_TLS) + && (ELF_ST_BIND(sym_ref.sym->st_info) != STB_WEAK))) { + /* This may be non-fatal if called from dlopen. */ + return 1; + } + if (_dl_trace_prelink) { + _dl_debug_lookup (symname, tpnt, &symtab[symtab_index], + &sym_ref, elf_machine_type_class(reloc_type)); + } + tls_tpnt = sym_ref.tpnt; + } else { + /* Relocs against STN_UNDEF are usually treated as using a + * symbol value of zero, and using the module containing the + * reloc itself. */ + symbol_addr = sym_ref.sym->st_value; + tls_tpnt = tpnt; } #if defined (__SUPPORT_LD_DEBUG__) @@ -216,21 +223,6 @@ _dl_do_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, case R_SPARC_NONE: break; -#if 0 /* these dont really seem to be useful */ - case R_SPARC_8: - *(char *) reloc_addr = symbol_addr; - break; - case R_SPARC_16: - *(short *) reloc_addr = symbol_addr; - break; - case R_SPARC_DISP8: - *(char *) reloc_addr = (symbol_addr) - (Elf32_Addr) reloc_addr; - break; - case R_SPARC_DISP16: - *(short *) reloc_addr = (symbol_addr) - (Elf32_Addr) reloc_addr; - break; -#endif - case R_SPARC_DISP32: *reloc_addr = symbol_addr - (unsigned int) reloc_addr; break; @@ -240,7 +232,7 @@ _dl_do_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, symbol_addr = tpnt->loadaddr + rpnt->r_addend; else symbol_addr += rpnt->r_addend; - *reloc_addr = (*reloc_addr & ~0x3ff)|(symbol_addr & 0x3ff); + *reloc_addr = (*reloc_addr & ~0x3ff) | (symbol_addr & 0x3ff); break; case R_SPARC_GLOB_DAT: @@ -249,17 +241,8 @@ _dl_do_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, break; case R_SPARC_JMP_SLOT: -/* -value = symbol_addr; -value += reloc->r_addend; -disp = value - reloc_addr; -reloc_addr[1] = OPCODE_JMP_G1 | (value & 0x3ff); -reloc_addr[0] = OPCODE_SETHI_G1 | (value >> 10); - reloc_addr[1] = OPCODE_JMP_G1 | ((symbol_addr-(Elf32_Addr)reloc_addr) & 0x3ff); - reloc_addr[0] = OPCODE_SETHI_G1 | ((symbol_addr-(Elf32_Addr)reloc_addr) >> 10); -*/ - reloc_addr[1] = 0x03000000 | ((symbol_addr >> 10) & 0x3fffff); - reloc_addr[2] = 0x81c06000 | (symbol_addr & 0x3ff); + reloc_addr[1] = OPCODE_SETHI_G1 | (( symbol_addr >> 10 ) & 0x3fffff); + reloc_addr[2] = OPCODE_JMP_G1 | ( symbol_addr & 0x3ff ); break; case R_SPARC_RELATIVE: @@ -285,16 +268,39 @@ reloc_addr[0] = OPCODE_SETHI_G1 | (value >> 10); if (_dl_debug_move) _dl_dprintf(_dl_debug_file, "\t%s move %d bytes from %x to %x\n", - symname, sym->st_size, + symname, sym_ref.sym->st_size, symbol_addr, reloc_addr); #endif _dl_memcpy((char *)reloc_addr, (char *)symbol_addr, - sym->st_size); - } else + sym_ref.sym->st_size); + } +#if defined (__SUPPORT_LD_DEBUG__) + else _dl_dprintf(_dl_debug_file, "no symbol_addr to copy !?\n"); +#endif + break; +#if defined USE_TLS && USE_TLS + case R_SPARC_TLS_DTPMOD32: + *reloc_addr = tls_tpnt->l_tls_modid; break; + + case R_SPARC_TLS_DTPOFF32: + /* During relocation all TLS symbols are defined and used. + * Therefore the offset is already correct. */ + *reloc_addr = sym_ref.sym->st_value + rpnt->r_addend; + break; + + case R_SPARC_TLS_TPOFF32: + /* The offset is negative, forward from the thread pointer. + * We know the offset of the object the symbol is contained in. + * It is a negative value which will be added to the + * thread pointer. */ + CHECK_STATIC_TLS ((struct link_map *) tls_tpnt); + *reloc_addr = sym_ref.sym->st_value - tls_tpnt->l_tls_offset + rpnt->r_addend; + break; +#endif default: return -1; /* Calls _dl_exit(1). */ } @@ -311,7 +317,7 @@ reloc_addr[0] = OPCODE_SETHI_G1 | (value >> 10); #undef __SPARC_LAZY_RELOC_WORKS #ifdef __SPARC_LAZY_RELOC_WORKS static int -_dl_do_lazy_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, +_dl_do_lazy_reloc(struct elf_resolve *tpnt, struct r_scope_elem *scope, ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab) { int reloc_type; @@ -359,14 +365,16 @@ _dl_parse_lazy_relocation_information(struct dyn_elf *rpnt, #ifdef __SPARC_LAZY_RELOC_WORKS (void)_dl_parse(rpnt->dyn, NULL, rel_addr, rel_size, _dl_do_lazy_reloc); #else - _dl_parse_relocation_information(rpnt, rel_addr, rel_size); + _dl_parse_relocation_information(rpnt, &_dl_loaded_modules->symbol_scope, + rel_addr, rel_size); #endif } int _dl_parse_relocation_information(struct dyn_elf *rpnt, + struct r_scope_elem *scope, unsigned long rel_addr, unsigned long rel_size) { - return _dl_parse(rpnt->dyn, rpnt->dyn->symbol_scope, rel_addr, rel_size, _dl_do_reloc); + return _dl_parse(rpnt->dyn, scope, rel_addr, rel_size, _dl_do_reloc); } diff --git a/ldso/ldso/x86_64/dl-debug.h b/ldso/ldso/x86_64/dl-debug.h index d605a0385..c47062b55 100644 --- a/ldso/ldso/x86_64/dl-debug.h +++ b/ldso/ldso/x86_64/dl-debug.h @@ -29,7 +29,7 @@ * SUCH DAMAGE. */ -static const char *_dl_reltypes_tab[] = { +static const char * const _dl_reltypes_tab[] = { [ 0] "R_X86_64_NONE", "R_X86_64_64", "R_X86_64_PC32", "R_X86_64_GOT32", [ 4] "R_X86_64_PLT32", "R_X86_64_COPY", "R_X86_64_GLOB_DAT", "R_X86_64_JUMP_SLOT", [ 8] "R_X86_64_RELATIVE", "R_X86_64_GOTPCREL", "R_X86_64_32", "R_X86_64_32S", diff --git a/ldso/ldso/x86_64/dl-startup.h b/ldso/ldso/x86_64/dl-startup.h index 6da888068..2f5e6ece7 100644 --- a/ldso/ldso/x86_64/dl-startup.h +++ b/ldso/ldso/x86_64/dl-startup.h @@ -10,6 +10,7 @@ __asm__ ( " .text\n" " .global _start\n" " .type _start,%function\n" + " .hidden _start\n" "_start:\n" " movq %rsp, %rdi\n" " call _dl_start\n" @@ -35,7 +36,7 @@ __asm__ ( ); /* Get a pointer to the argv array. On many platforms this can be just - * the address if the first argument, on other platforms we need to + * the address of the first argument, on other platforms we need to * do something a little more subtle here. */ #define GET_ARGV(ARGVP, ARGS) ARGVP = (((unsigned long*) ARGS)+1) @@ -58,6 +59,9 @@ void PERFORM_BOOTSTRAP_RELOC(ELF_RELOC *rpnt, ElfW(Addr) *reloc_addr, case R_X86_64_TPOFF64: *reloc_addr = sym->st_value + rpnt->r_addend - symbol_addr; break; +/*TODO: case R_X86_64_RELATIVE: + *reloc_addr = load_addr + rpnt->r_addend; + break; */ default: _dl_exit(1); } diff --git a/ldso/ldso/x86_64/dl-syscalls.h b/ldso/ldso/x86_64/dl-syscalls.h index 996bb87c6..f40c4fd31 100644 --- a/ldso/ldso/x86_64/dl-syscalls.h +++ b/ldso/ldso/x86_64/dl-syscalls.h @@ -1,6 +1 @@ -/* We can't use the real errno in ldso, since it has not yet - * been dynamicly linked in yet. */ -#include "sys/syscall.h" -extern int _dl_errno; -#undef __set_errno -#define __set_errno(X) {(_dl_errno) = (X);} +/* stub for arch-specific syscall issues */ diff --git a/ldso/ldso/x86_64/dl-sysdep.h b/ldso/ldso/x86_64/dl-sysdep.h index 202eab19d..3706c0d00 100644 --- a/ldso/ldso/x86_64/dl-sysdep.h +++ b/ldso/ldso/x86_64/dl-sysdep.h @@ -16,9 +16,8 @@ Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, write to the Free - Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA - 02111-1307 USA. */ + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ /* Define this if the system uses RELOCA. */ #define ELF_USES_RELOCA @@ -41,18 +40,17 @@ do { \ struct elf_resolve; extern unsigned long _dl_linux_resolver(struct elf_resolve * tpnt, int reloc_entry); -/* 4096 bytes alignment */ -#define PAGE_ALIGN 0xfffff000 -#define ADDR_ALIGN 0xfff -#define OFFS_ALIGN 0x7ffff000 - /* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry or TLS variable, so undefined references should not be allowed to define the value. ELF_RTYPE_CLASS_NOCOPY iff TYPE should not be allowed to resolve to one of the main executable's symbols, as for a COPY reloc. */ -#define elf_machine_type_class(type) \ - ((((type) == R_X86_64_JUMP_SLOT) * ELF_RTYPE_CLASS_PLT) \ +#define elf_machine_type_class(type) \ + ((((type) == R_X86_64_JUMP_SLOT \ + || (type) == R_X86_64_DTPMOD64 \ + || (type) == R_X86_64_DTPOFF64 \ + || (type) == R_X86_64_TPOFF64) \ + * ELF_RTYPE_CLASS_PLT) \ | (((type) == R_X86_64_COPY) * ELF_RTYPE_CLASS_COPY)) /* Return the link-time address of _DYNAMIC. Conveniently, this is the diff --git a/ldso/ldso/x86_64/elfinterp.c b/ldso/ldso/x86_64/elfinterp.c index e78a80969..75666a799 100644 --- a/ldso/ldso/x86_64/elfinterp.c +++ b/ldso/ldso/x86_64/elfinterp.c @@ -47,7 +47,6 @@ extern int _dl_linux_resolve(void); unsigned long _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry) { - int reloc_type; ELF_RELOC *this_reloc; char *strtab; ElfW(Sym) *symtab; @@ -60,25 +59,18 @@ _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry) rel_addr = (char *)tpnt->dynamic_info[DT_JMPREL]; this_reloc = (ELF_RELOC *)(rel_addr + reloc_entry); - reloc_type = ELF_R_TYPE(this_reloc->r_info); symtab_index = ELF_R_SYM(this_reloc->r_info); symtab = (ElfW(Sym) *)tpnt->dynamic_info[DT_SYMTAB]; strtab = (char *)tpnt->dynamic_info[DT_STRTAB]; symname = strtab + symtab[symtab_index].st_name; - if (unlikely(reloc_type != R_X86_64_JUMP_SLOT)) { - _dl_dprintf(2, "%s: Incorrect relocation type in jump relocations\n", - _dl_progname); - _dl_exit(1); - } - /* Address of the jump instruction to fix up. */ instr_addr = (this_reloc->r_offset + tpnt->loadaddr); got_addr = (char **)instr_addr; /* Get the address of the GOT entry. */ - new_addr = _dl_find_hash(symname, tpnt->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT); + new_addr = _dl_find_hash(symname, &_dl_loaded_modules->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT, NULL); if (unlikely(!new_addr)) { _dl_dprintf(2, "%s: Can't resolve symbol '%s'\n", _dl_progname, symname); _dl_exit(1); @@ -102,9 +94,9 @@ _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry) } static int -_dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, +_dl_parse(struct elf_resolve *tpnt, struct r_scope_elem *scope, unsigned long rel_addr, unsigned long rel_size, - int (*reloc_fnc)(struct elf_resolve *tpnt, struct dyn_elf *scope, + int (*reloc_fnc)(struct elf_resolve *tpnt, struct r_scope_elem *scope, ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab)) { unsigned int i; @@ -159,13 +151,16 @@ _dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope, } static int -_dl_do_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, +_dl_do_reloc(struct elf_resolve *tpnt, struct r_scope_elem *scope, ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab) { int reloc_type; int symtab_index; char *symname; - ElfW(Sym) *sym; +#if defined USE_TLS && USE_TLS + struct elf_resolve *tls_tpnt; +#endif + struct symbol_ref sym_ref; ElfW(Addr) *reloc_addr; ElfW(Addr) symbol_addr; #if defined (__SUPPORT_LD_DEBUG__) @@ -175,22 +170,39 @@ _dl_do_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, reloc_addr = (ElfW(Addr)*)(tpnt->loadaddr + (unsigned long)rpnt->r_offset); reloc_type = ELF_R_TYPE(rpnt->r_info); symtab_index = ELF_R_SYM(rpnt->r_info); - sym = &symtab[symtab_index]; + sym_ref.sym = &symtab[symtab_index]; + sym_ref.tpnt = NULL; symbol_addr = 0; - symname = strtab + sym->st_name; + symname = strtab + sym_ref.sym->st_name; if (symtab_index) { symbol_addr = (ElfW(Addr))_dl_find_hash(symname, scope, tpnt, - elf_machine_type_class(reloc_type)); + elf_machine_type_class(reloc_type), &sym_ref); /* * We want to allow undefined references to weak symbols - this * might have been intentional. We should not be linking local * symbols here, so all bases should be covered. */ - if (unlikely(!symbol_addr && ELF_ST_BIND(sym->st_info) != STB_WEAK)) { - _dl_dprintf(2, "%s: can't resolve symbol '%s'\n", _dl_progname, symname); - _dl_exit(1); + if (unlikely(!symbol_addr && (ELF_ST_TYPE(sym_ref.sym->st_info) != STT_TLS) + && (ELF_ST_BIND(sym_ref.sym->st_info) != STB_WEAK))) { + /* This may be non-fatal if called from dlopen. */ + return 1; } + if (_dl_trace_prelink) { + _dl_debug_lookup (symname, tpnt, &symtab[symtab_index], + &sym_ref, elf_machine_type_class(reloc_type)); + } +#if defined USE_TLS && USE_TLS + tls_tpnt = sym_ref.tpnt; +#endif + } else { + /* Relocs against STN_UNDEF are usually treated as using a + * symbol value of zero, and using the module containing the + * reloc itself. */ + symbol_addr = sym_ref.sym->st_value; +#if defined USE_TLS && USE_TLS + tls_tpnt = tpnt; +#endif } #if defined (__SUPPORT_LD_DEBUG__) @@ -219,15 +231,24 @@ _dl_do_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, *reloc_addr = map->l_addr + rpnt->r_addend; break; */ +#if defined USE_TLS && USE_TLS case R_X86_64_DTPMOD64: - *reloc_addr = 1; + *reloc_addr = tls_tpnt->l_tls_modid; break; case R_X86_64_DTPOFF64: - *reloc_addr = sym->st_value + rpnt->r_addend; + /* During relocation all TLS symbols are defined and used. + * Therefore the offset is already correct. */ + *reloc_addr = symbol_addr + rpnt->r_addend; break; case R_X86_64_TPOFF64: - *reloc_addr = sym->st_value + rpnt->r_addend - symbol_addr; + /* The offset is negative, forward from the thread pointer. + * We know the offset of the object the symbol is contained in. + * It is a negative value which will be added to the + * thread pointer. */ + CHECK_STATIC_TLS ((struct link_map *) tls_tpnt); + *reloc_addr = symbol_addr - tls_tpnt->l_tls_offset + rpnt->r_addend; break; +#endif case R_X86_64_32: *(unsigned int *) reloc_addr = symbol_addr + rpnt->r_addend; /* XXX: should check for overflow eh ? */ @@ -239,15 +260,18 @@ _dl_do_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, if (_dl_debug_move) _dl_dprintf(_dl_debug_file, "\t%s move %d bytes from %x to %x\n", - symname, sym->st_size, + symname, sym_ref.sym->st_size, symbol_addr, reloc_addr); #endif _dl_memcpy((char *)reloc_addr, (char *)symbol_addr, - sym->st_size); - } else + sym_ref.sym->st_size); + } +#if defined (__SUPPORT_LD_DEBUG__) + else _dl_dprintf(_dl_debug_file, "no symbol_addr to copy !?\n"); +#endif break; default: @@ -264,18 +288,16 @@ _dl_do_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, } static int -_dl_do_lazy_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, +_dl_do_lazy_reloc(struct elf_resolve *tpnt, struct r_scope_elem *scope, ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab) { int reloc_type; - int symtab_index; ElfW(Addr) *reloc_addr; #if defined (__SUPPORT_LD_DEBUG__) ElfW(Addr) old_val; #endif (void)scope; - symtab_index = ELF_R_SYM(rpnt->r_info); (void)strtab; reloc_addr = (ElfW(Addr)*)(tpnt->loadaddr + rpnt->r_offset); @@ -314,8 +336,9 @@ _dl_parse_lazy_relocation_information(struct dyn_elf *rpnt, int _dl_parse_relocation_information(struct dyn_elf *rpnt, + struct r_scope_elem *scope, unsigned long rel_addr, unsigned long rel_size) { - return _dl_parse(rpnt->dyn, rpnt->dyn->symbol_scope, rel_addr, rel_size, _dl_do_reloc); + return _dl_parse(rpnt->dyn, scope, rel_addr, rel_size, _dl_do_reloc); } diff --git a/ldso/ldso/xtensa/dl-debug.h b/ldso/ldso/xtensa/dl-debug.h index 327defc07..18beae5ca 100644 --- a/ldso/ldso/xtensa/dl-debug.h +++ b/ldso/ldso/xtensa/dl-debug.h @@ -6,56 +6,33 @@ * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball. */ -static const char *_dl_reltypes_tab[] = +static const char * const _dl_reltypes_tab[] = { - "R_XTENSA_NONE", - "R_XTENSA_32", - "R_XTENSA_RTLD", - "R_XTENSA_GLOB_DAT", - "R_XTENSA_JMP_SLOT", - "R_XTENSA_RELATIVE", - "R_XTENSA_PLT", - "R_XTENSA_UNUSED7", - "R_XTENSA_OP0", - "R_XTENSA_OP1", - "R_XTENSA_OP2", - "R_XTENSA_ASM_EXPAND", - "R_XTENSA_ASM_SIMPLIFY", - "R_XTENSA_UNUSED13", - "R_XTENSA_UNUSED14", - "R_XTENSA_GNU_VTINHERIT", - "R_XTENSA_GNU_VTENTRY", - "R_XTENSA_DIFF8", - "R_XTENSA_DIFF16", - "R_XTENSA_DIFF32", - "R_XTENSA_SLOT0_OP", - "R_XTENSA_SLOT1_OP", - "R_XTENSA_SLOT2_OP", - "R_XTENSA_SLOT3_OP", - "R_XTENSA_SLOT4_OP", - "R_XTENSA_SLOT5_OP", - "R_XTENSA_SLOT6_OP", - "R_XTENSA_SLOT7_OP", - "R_XTENSA_SLOT8_OP", - "R_XTENSA_SLOT9_OP", - "R_XTENSA_SLOT10_OP", - "R_XTENSA_SLOT11_OP", - "R_XTENSA_SLOT12_OP", - "R_XTENSA_SLOT13_OP", - "R_XTENSA_SLOT14_OP", - "R_XTENSA_SLOT0_ALT", - "R_XTENSA_SLOT1_ALT", - "R_XTENSA_SLOT2_ALT", - "R_XTENSA_SLOT3_ALT", - "R_XTENSA_SLOT4_ALT", - "R_XTENSA_SLOT5_ALT", - "R_XTENSA_SLOT6_ALT", - "R_XTENSA_SLOT7_ALT", - "R_XTENSA_SLOT8_ALT", - "R_XTENSA_SLOT9_ALT", - "R_XTENSA_SLOT10_ALT", - "R_XTENSA_SLOT11_ALT", - "R_XTENSA_SLOT12_ALT", - "R_XTENSA_SLOT13_ALT", - "R_XTENSA_SLOT14_ALT" + [0] "R_XTENSA_NONE", "R_XTENSA_32", + [2] "R_XTENSA_RTLD", "R_XTENSA_GLOB_DAT", + [4] "R_XTENSA_JMP_SLOT", "R_XTENSA_RELATIVE", + [6] "R_XTENSA_PLT", "R_XTENSA_UNUSED7", + [8] "R_XTENSA_OP0", "R_XTENSA_OP1", + [10] "R_XTENSA_OP2", "R_XTENSA_ASM_EXPAND", + [12] "R_XTENSA_ASM_SIMPLIFY", "R_XTENSA_UNUSED13", + [14] "R_XTENSA_UNUSED14", "R_XTENSA_GNU_VTINHERIT", + [16] "R_XTENSA_GNU_VTENTRY", "R_XTENSA_DIFF8", + [18] "R_XTENSA_DIFF16", "R_XTENSA_DIFF32", + [20] "R_XTENSA_SLOT0_OP", "R_XTENSA_SLOT1_OP", + [22] "R_XTENSA_SLOT2_OP", "R_XTENSA_SLOT3_OP", + [24] "R_XTENSA_SLOT4_OP", "R_XTENSA_SLOT5_OP", + [26] "R_XTENSA_SLOT6_OP", "R_XTENSA_SLOT7_OP", + [28] "R_XTENSA_SLOT8_OP", "R_XTENSA_SLOT9_OP", + [30] "R_XTENSA_SLOT10_OP", "R_XTENSA_SLOT11_OP", + [32] "R_XTENSA_SLOT12_OP", "R_XTENSA_SLOT13_OP", + [34] "R_XTENSA_SLOT14_OP", "R_XTENSA_SLOT0_ALT", + [36] "R_XTENSA_SLOT1_ALT", "R_XTENSA_SLOT2_ALT", + [38] "R_XTENSA_SLOT3_ALT", "R_XTENSA_SLOT4_ALT", + [40] "R_XTENSA_SLOT5_ALT", "R_XTENSA_SLOT6_ALT", + [42] "R_XTENSA_SLOT7_ALT", "R_XTENSA_SLOT8_ALT", + [44] "R_XTENSA_SLOT9_ALT", "R_XTENSA_SLOT10_ALT", + [46] "R_XTENSA_SLOT11_ALT", "R_XTENSA_SLOT12_ALT", + [48] "R_XTENSA_SLOT13_ALT", "R_XTENSA_SLOT14_ALT", + [50] "R_XTENSA_TLSDESC_FN", "R_XTENSA_TLSDESC_ARG", + [52] "R_XTENSA_TLS_TPOFF" }; diff --git a/ldso/ldso/xtensa/dl-startup.h b/ldso/ldso/xtensa/dl-startup.h index 8ae962408..aece0cd96 100644 --- a/ldso/ldso/xtensa/dl-startup.h +++ b/ldso/ldso/xtensa/dl-startup.h @@ -11,8 +11,10 @@ __asm__ ( " .text\n" " .align 4\n" + " .literal_position\n" " .global _start\n" " .type _start, @function\n" + " .hidden _start\n" "_start:\n" " # Compute load offset in a2: the GOT has not yet been relocated\n" " # but the entries for local symbols contain the relative offsets\n" @@ -21,6 +23,7 @@ __asm__ ( " .align 4\n" "0: movi a3, _start+3\n" " sub a2, a0, a3\n" +#if defined(__XTENSA_WINDOWED_ABI__) " # Make sure a0 is cleared to mark the top of stack.\n" " movi a0, 0\n" " # user_entry_point = _dl_start(pointer to argument block)\n" @@ -30,6 +33,17 @@ __asm__ ( " callx4 a4\n" " # Save user_entry_point so we can jump to it.\n" " mov a3, a6\n" +#elif defined(__XTENSA_CALL0_ABI__) + " # user_entry_point = _dl_start(pointer to argument block)\n" + " movi a0, _dl_start\n" + " add a0, a0, a2\n" + " mov a2, sp\n" + " callx0 a0\n" + " # Save user_entry_point so we can jump to it.\n" + " mov a3, a2\n" +#else +#error Unsupported Xtensa ABI +#endif " l32i a7, sp, 0 # load argc\n" " # Load _dl_skip_args into a4.\n" " movi a4, _dl_skip_args\n" @@ -81,6 +95,7 @@ do { \ unsigned long l_addr = tpnt->loadaddr; \ Elf32_Word relative_count; \ unsigned long rel_addr; \ + Elf32_Addr prev_got_start = 0, prev_got_end = 0; \ int x; \ \ got_loc = (xtensa_got_location *) \ @@ -91,7 +106,24 @@ do { \ got_start = got_loc[x].offset & ~(PAGE_SIZE - 1); \ got_end = ((got_loc[x].offset + got_loc[x].length + PAGE_SIZE - 1) \ & ~(PAGE_SIZE - 1)); \ - _dl_mprotect ((void *)(got_start + l_addr), got_end - got_start, \ + if (got_end >= prev_got_start && got_start <= prev_got_end) { \ + if (got_end > prev_got_end) \ + prev_got_end = got_end; \ + if (got_start < prev_got_start) \ + prev_got_start = got_start; \ + continue; \ + } else if (prev_got_start != prev_got_end) { \ + _dl_mprotect ((void *)(prev_got_start + l_addr), \ + prev_got_end - prev_got_start, \ + PROT_READ | PROT_WRITE | PROT_EXEC); \ + } \ + prev_got_start = got_start; \ + prev_got_end = got_end; \ + } \ +\ + if (prev_got_start != prev_got_end) { \ + _dl_mprotect ((void *)(prev_got_start + l_addr), \ + prev_got_end - prev_got_start, \ PROT_READ | PROT_WRITE | PROT_EXEC); \ } \ \ diff --git a/ldso/ldso/xtensa/dl-syscalls.h b/ldso/ldso/xtensa/dl-syscalls.h index 4b42a57e0..f40c4fd31 100644 --- a/ldso/ldso/xtensa/dl-syscalls.h +++ b/ldso/ldso/xtensa/dl-syscalls.h @@ -1,7 +1 @@ -/* We can't use the real errno in ldso, since it has not yet - * been dynamicly linked in yet. */ -#include "sys/syscall.h" -extern int _dl_errno; -#undef __set_errno -#define __set_errno(X) {(_dl_errno) = (X);} - +/* stub for arch-specific syscall issues */ diff --git a/ldso/ldso/xtensa/dl-sysdep.h b/ldso/ldso/xtensa/dl-sysdep.h index b58feff54..d308237d3 100644 --- a/ldso/ldso/xtensa/dl-sysdep.h +++ b/ldso/ldso/xtensa/dl-sysdep.h @@ -14,9 +14,8 @@ Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, write to the Free - Software Foundation, Inc., 51 Franklin Street - Fifth Floor, - Boston, MA 02110-1301, USA. */ + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ /* Define this if the system uses RELOCA. */ #define ELF_USES_RELOCA @@ -37,6 +36,7 @@ typedef struct xtensa_got_location_struct { do { \ xtensa_got_location *got_loc; \ Elf32_Addr l_addr = MODULE->loadaddr; \ + Elf32_Addr prev_got_start = 0, prev_got_end = 0; \ int x; \ \ got_loc = (xtensa_got_location *) \ @@ -48,7 +48,28 @@ typedef struct xtensa_got_location_struct { got_start = got_loc[x].offset & ~(PAGE_SIZE - 1); \ got_end = ((got_loc[x].offset + got_loc[x].length + PAGE_SIZE - 1) \ & ~(PAGE_SIZE - 1)); \ - _dl_mprotect ((void *)(got_start + l_addr) , got_end - got_start, \ + if (got_end >= prev_got_start && got_start <= prev_got_end) \ + { \ + if (got_end > prev_got_end) \ + prev_got_end = got_end; \ + if (got_start < prev_got_start) \ + prev_got_start = got_start; \ + continue; \ + } \ + else if (prev_got_start != prev_got_end) \ + { \ + _dl_mprotect ((void *)(prev_got_start + l_addr), \ + prev_got_end - prev_got_start, \ + PROT_READ | PROT_WRITE | PROT_EXEC); \ + } \ + prev_got_start = got_start; \ + prev_got_end = got_end; \ + } \ + \ + if (prev_got_start != prev_got_end) \ + { \ + _dl_mprotect ((void *)(prev_got_start + l_addr), \ + prev_got_end - prev_got_start, \ PROT_READ | PROT_WRITE | PROT_EXEC); \ } \ \ @@ -73,21 +94,22 @@ typedef struct xtensa_got_location_struct { /* Used for error messages. */ #define ELF_TARGET "Xtensa" +/* Need bootstrap relocations */ +#define ARCH_NEEDS_BOOTSTRAP_RELOCS + struct elf_resolve; extern unsigned long _dl_linux_resolver (struct elf_resolve *, int); -/* 4096 bytes alignment */ -#define PAGE_ALIGN 0xfffff000 -#define ADDR_ALIGN 0xfff -#define OFFS_ALIGN 0x7ffff000 - -/* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry, so - undefined references should not be allowed to define the value. */ +/* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry or + TLS variable, so undefined references should not be allowed to define + the value. */ #define elf_machine_type_class(type) \ - (((type) == R_XTENSA_JMP_SLOT) * ELF_RTYPE_CLASS_PLT) + (((type) == R_XTENSA_JMP_SLOT || (type) == R_XTENSA_TLS_TPOFF \ + || (type) == R_XTENSA_TLSDESC_FN || (type) == R_XTENSA_TLSDESC_ARG) \ + * ELF_RTYPE_CLASS_PLT) /* Return the link-time address of _DYNAMIC. */ -static __inline__ Elf32_Addr +static __always_inline Elf32_Addr elf_machine_dynamic (void) { /* This function is only used while bootstrapping the runtime linker. @@ -97,7 +119,7 @@ elf_machine_dynamic (void) } /* Return the run-time load address of the shared object. */ -static __inline__ Elf32_Addr +static __always_inline Elf32_Addr elf_machine_load_address (void) { Elf32_Addr addr, tmp; @@ -118,7 +140,7 @@ elf_machine_load_address (void) return addr - 3; } -static __inline__ void +static __always_inline void elf_machine_relative (Elf32_Addr load_off, const Elf32_Addr rel_addr, Elf32_Word relative_count) { diff --git a/ldso/ldso/xtensa/dl-tlsdesc.S b/ldso/ldso/xtensa/dl-tlsdesc.S new file mode 100644 index 000000000..6f417f61a --- /dev/null +++ b/ldso/ldso/xtensa/dl-tlsdesc.S @@ -0,0 +1,103 @@ +/* Thread-local storage handling in the ELF dynamic linker. Xtensa version. + Copyright (C) 2012-2013 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <sysdep.h> + +#if defined __UCLIBC_HAS_TLS__ +#include <tls.h> +#include "tlsdesc.h" + + .text +HIDDEN_ENTRY (_dl_tlsdesc_return) + rur.threadptr a3 + add a2, a2, a3 + abi_ret +END (_dl_tlsdesc_return) + +#ifdef SHARED + + + /* This function is used for symbols that need dynamic TLS. + + The argument passed to this function points to the TLS descriptor. + + The assembly code that follows is a rendition of the following + C code, hand-optimized a little bit. + + ptrdiff_t + _dl_tlsdesc_dynamic(struct tlsdesc_dynamic_arg *td) + { + dtv_t *dtv = (dtv_t *)THREAD_DTV(); + if (td->gen_count <= dtv[0].counter + && dtv[td->tlsinfo.ti_module].pointer.val + != TLS_DTV_UNALLOCATED) + return dtv[td->tlsinfo.ti_module].pointer.val + + td->tlsinfo.ti_offset - __builtin_thread_pointer(); + return __tls_get_addr (&td->tlsinfo) - __builtin_thread_pointer(); + } + */ + +HIDDEN_ENTRY (_dl_tlsdesc_dynamic) + + /* dtv_t *dtv = (dtv_t *)THREAD_DTV(); */ + rur.threadptr a3 + l32i a4, a3, 0 + + /* if (td->gen_count <= dtv[0].counter */ + l32i a6, a2, TLSDESC_GEN_COUNT + l32i a7, a4, 0 + blt a7, a6, .Lslow + + /* && dtv[td->tlsinfo.ti_module].pointer.val != TLS_DTV_UNALLOCATED) */ + l32i a6, a2, TLSDESC_MODID + addx8 a6, a3, a6 + l32i a6, a6, 0 + beqi a6, -1, .Lslow + + /* return dtv[td->tlsinfo.ti_module].pointer.val + + td->tlsinfo.ti_offset - __builtin_thread_pointer(); */ + l32i a6, a2, TLSDESC_MODOFF + sub a2, a6, a3 + abi_ret + + /* return __tls_get_addr (&td->tlsinfo) - __builtin_thread_pointer(); */ +.Lslow: +#if defined(__XTENSA_WINDOWED_ABI__) + mov a6, a2 + movi a4, __tls_get_addr + callx4 a4 + sub a2, a6, a3 + retw +#elif defined(__XTENSA_CALL0_ABI__) + addi a1, a1, -16 + s32i a0, a1, 0 + s32i a3, a1, 4 + movi a0, __tls_get_addr + callx0 a0 + l32i a3, a1, 4 + l32i a0, a1, 0 + sub a2, a2, a3 + addi a1, a1, 16 + ret +#else +#error Unsupported Xtensa ABI +#endif +END (_dl_tlsdesc_dynamic) + +#endif /* SHARED */ +#endif diff --git a/ldso/ldso/xtensa/elfinterp.c b/ldso/ldso/xtensa/elfinterp.c index a459431b1..66deb63ab 100644 --- a/ldso/ldso/xtensa/elfinterp.c +++ b/ldso/ldso/xtensa/elfinterp.c @@ -32,13 +32,17 @@ #include "ldso.h" +#if defined(USE_TLS) && USE_TLS +#include "dl-tls.h" +#include "tlsdeschtab.h" +#endif + unsigned long _dl_linux_resolver (struct elf_resolve *tpnt, int reloc_entry) { - int reloc_type; ELF_RELOC *this_reloc; char *strtab; - Elf32_Sym *symtab; + ElfW(Sym) *symtab; int symtab_index; char *rel_addr; char *new_addr; @@ -47,25 +51,18 @@ _dl_linux_resolver (struct elf_resolve *tpnt, int reloc_entry) rel_addr = (char *) tpnt->dynamic_info[DT_JMPREL]; this_reloc = (ELF_RELOC *) (rel_addr + reloc_entry); - reloc_type = ELF32_R_TYPE (this_reloc->r_info); - symtab_index = ELF32_R_SYM (this_reloc->r_info); + symtab_index = ELF_R_SYM (this_reloc->r_info); - symtab = (Elf32_Sym *) tpnt->dynamic_info[DT_SYMTAB]; + symtab = (ElfW(Sym) *) tpnt->dynamic_info[DT_SYMTAB]; strtab = (char *) tpnt->dynamic_info[DT_STRTAB]; symname = strtab + symtab[symtab_index].st_name; - if (unlikely (reloc_type != R_XTENSA_JMP_SLOT)) { - _dl_dprintf (2, "%s: Incorrect relocation type in jump relocations\n", - _dl_progname); - _dl_exit (1); - } - /* Address of the literal to fix up. */ got_addr = (char **) (this_reloc->r_offset + tpnt->loadaddr); /* Get the address of the GOT entry. */ - new_addr = _dl_find_hash (symname, tpnt->symbol_scope, tpnt, - ELF_RTYPE_CLASS_PLT); + new_addr = _dl_find_hash (symname, &_dl_loaded_modules->symbol_scope, tpnt, + ELF_RTYPE_CLASS_PLT, NULL); if (unlikely (!new_addr)) { _dl_dprintf (2, "%s: can't resolve symbol '%s'\n", _dl_progname, symname); @@ -90,14 +87,14 @@ _dl_linux_resolver (struct elf_resolve *tpnt, int reloc_entry) static int -_dl_parse (struct elf_resolve *tpnt, struct dyn_elf *scope, +_dl_parse (struct elf_resolve *tpnt, struct r_scope_elem *scope, unsigned long rel_addr, unsigned long rel_size, - int (*reloc_fnc) (struct elf_resolve *tpnt, struct dyn_elf *scope, - ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab)) + int (*reloc_fnc) (struct elf_resolve *tpnt, struct r_scope_elem *scope, + ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab)) { unsigned int i; char *strtab; - Elf32_Sym *symtab; + ElfW(Sym) *symtab; ELF_RELOC *rpnt; int symtab_index; @@ -105,13 +102,13 @@ _dl_parse (struct elf_resolve *tpnt, struct dyn_elf *scope, rpnt = (ELF_RELOC *) rel_addr; rel_size /= sizeof (ELF_RELOC); - symtab = (Elf32_Sym *) tpnt->dynamic_info[DT_SYMTAB]; + symtab = (ElfW(Sym) *) tpnt->dynamic_info[DT_SYMTAB]; strtab = (char *) tpnt->dynamic_info[DT_STRTAB]; for (i = 0; i < rel_size; i++, rpnt++) { int res; - symtab_index = ELF32_R_SYM (rpnt->r_info); + symtab_index = ELF_R_SYM (rpnt->r_info); debug_sym (symtab, strtab, symtab_index); debug_reloc (symtab, strtab, rpnt); @@ -128,7 +125,7 @@ _dl_parse (struct elf_resolve *tpnt, struct dyn_elf *scope, strtab + symtab[symtab_index].st_name); if (unlikely (res < 0)) { - int reloc_type = ELF32_R_TYPE (rpnt->r_info); + int reloc_type = ELF_R_TYPE (rpnt->r_info); #if defined (__SUPPORT_LD_DEBUG__) _dl_dprintf (2, "can't handle reloc type %s\n", _dl_reltypes (reloc_type)); @@ -148,30 +145,34 @@ _dl_parse (struct elf_resolve *tpnt, struct dyn_elf *scope, static int -_dl_do_reloc (struct elf_resolve *tpnt, struct dyn_elf *scope, - ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab) +_dl_do_reloc (struct elf_resolve *tpnt, struct r_scope_elem *scope, + ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab) { int reloc_type; int symtab_index; char *symname; - Elf32_Sym *sym; - Elf32_Addr *reloc_addr; - Elf32_Addr symbol_addr; +#if defined USE_TLS && USE_TLS + struct elf_resolve *tls_tpnt = NULL; +#endif + struct symbol_ref sym_ref; + ElfW(Addr) *reloc_addr; + ElfW(Addr) symbol_addr; #if defined (__SUPPORT_LD_DEBUG__) - Elf32_Addr old_val; + ElfW(Addr) old_val; #endif - reloc_addr = (Elf32_Addr *) (tpnt->loadaddr + rpnt->r_offset); - reloc_type = ELF32_R_TYPE (rpnt->r_info); - symtab_index = ELF32_R_SYM (rpnt->r_info); - sym = &symtab[symtab_index]; + reloc_addr = (ElfW(Addr) *) (tpnt->loadaddr + rpnt->r_offset); + reloc_type = ELF_R_TYPE (rpnt->r_info); + symtab_index = ELF_R_SYM (rpnt->r_info); + sym_ref.sym = &symtab[symtab_index]; + sym_ref.tpnt = NULL; symbol_addr = 0; - symname = strtab + sym->st_name; + symname = strtab + sym_ref.sym->st_name; if (symtab_index) { - symbol_addr = (Elf32_Addr) + symbol_addr = (ElfW(Addr)) _dl_find_hash (symname, scope, tpnt, - elf_machine_type_class (reloc_type)); + elf_machine_type_class (reloc_type), &sym_ref); /* * We want to allow undefined references to weak symbols - this might @@ -179,11 +180,22 @@ _dl_do_reloc (struct elf_resolve *tpnt, struct dyn_elf *scope, * here, so all bases should be covered. */ if (unlikely (!symbol_addr && - ELF32_ST_BIND (sym->st_info) != STB_WEAK)) { - _dl_dprintf (2, "%s: can't resolve symbol '%s'\n", - _dl_progname, symname); - _dl_exit (1); + ELF_ST_TYPE (sym_ref.sym->st_info) != STT_TLS && + ELF_ST_BIND (sym_ref.sym->st_info) != STB_WEAK)) { + return 1; } + if (_dl_trace_prelink) { + _dl_debug_lookup (symname, tpnt, &symtab[symtab_index], + &sym_ref, elf_machine_type_class(reloc_type)); + } +#if defined USE_TLS && USE_TLS + tls_tpnt = sym_ref.tpnt; +#endif + } else { + symbol_addr =symtab[symtab_index].st_value; +#if defined USE_TLS && USE_TLS + tls_tpnt = tpnt; +#endif } #if defined (__SUPPORT_LD_DEBUG__) @@ -201,12 +213,12 @@ _dl_do_reloc (struct elf_resolve *tpnt, struct dyn_elf *scope, case R_XTENSA_RTLD: if (rpnt->r_addend == 1) { - /* Grab the function pointer stashed at the beginning of the - GOT by the GOT_INIT function. */ - *reloc_addr = *(Elf32_Addr *) tpnt->dynamic_info[DT_PLTGOT]; + /* Grab the function pointer stashed at the beginning + of the GOT by the GOT_INIT function. */ + *reloc_addr = *(ElfW(Addr) *) tpnt->dynamic_info[DT_PLTGOT]; } else if (rpnt->r_addend == 2) { /* Store the link map for the object. */ - *reloc_addr = (Elf32_Addr) tpnt; + *reloc_addr = (ElfW(Addr)) tpnt; } else { _dl_exit (1); } @@ -216,12 +228,41 @@ _dl_do_reloc (struct elf_resolve *tpnt, struct dyn_elf *scope, *reloc_addr += tpnt->loadaddr + rpnt->r_addend; break; +#if defined USE_TLS && USE_TLS + case R_XTENSA_TLS_TPOFF: + CHECK_STATIC_TLS((struct link_map *) tls_tpnt); + *reloc_addr = symbol_addr + tls_tpnt->l_tls_offset + rpnt->r_addend; + break; + case R_XTENSA_TLSDESC_FN: +#ifndef SHARED + CHECK_STATIC_TLS((struct link_map *) tls_tpnt); +#else + if (!TRY_STATIC_TLS ((struct link_map *) tls_tpnt)) + *reloc_addr = (ElfW(Addr)) _dl_tlsdesc_dynamic; + else +#endif + *reloc_addr = (ElfW(Addr)) _dl_tlsdesc_return; + break; + case R_XTENSA_TLSDESC_ARG: +#ifndef SHARED + CHECK_STATIC_TLS((struct link_map *) tls_tpnt); +#else + if (!TRY_STATIC_TLS ((struct link_map *) tls_tpnt)) + *reloc_addr = (ElfW(Addr)) + _dl_make_tlsdesc_dynamic((struct link_map *) tls_tpnt, + symbol_addr + *reloc_addr); + else +#endif + *reloc_addr += symbol_addr + tls_tpnt->l_tls_offset; + break; +#endif + default: return -1; /* Calls _dl_exit(1). */ } #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_reloc && _dl_debug_detail) - _dl_dprintf (_dl_debug_file, "\tpatched: %x ==> %x @ %x", + _dl_dprintf (_dl_debug_file, "\tpatched: %x ==> %x @ %x\n", old_val, *reloc_addr, reloc_addr); #endif @@ -230,17 +271,17 @@ _dl_do_reloc (struct elf_resolve *tpnt, struct dyn_elf *scope, static int -_dl_do_lazy_reloc (struct elf_resolve *tpnt, struct dyn_elf *scope, - ELF_RELOC *rpnt, Elf32_Sym *symtab, char *strtab) +_dl_do_lazy_reloc (struct elf_resolve *tpnt, struct r_scope_elem *scope, + ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab) { int reloc_type; - Elf32_Addr *reloc_addr; + ElfW(Addr) *reloc_addr; #if defined (__SUPPORT_LD_DEBUG__) - Elf32_Addr old_val; + ElfW(Addr) old_val; #endif - reloc_addr = (Elf32_Addr *) (tpnt->loadaddr + rpnt->r_offset); - reloc_type = ELF32_R_TYPE (rpnt->r_info); + reloc_addr = (ElfW(Addr) *) (tpnt->loadaddr + rpnt->r_offset); + reloc_type = ELF_R_TYPE (rpnt->r_info); #if defined (__SUPPORT_LD_DEBUG__) old_val = *reloc_addr; @@ -260,7 +301,7 @@ _dl_do_lazy_reloc (struct elf_resolve *tpnt, struct dyn_elf *scope, #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_reloc && _dl_debug_detail) - _dl_dprintf (_dl_debug_file, "\tpatched: %x ==> %x @ %x", + _dl_dprintf (_dl_debug_file, "\tpatched: %x ==> %x @ %x\n", old_val, *reloc_addr, reloc_addr); #endif return 0; @@ -277,9 +318,10 @@ _dl_parse_lazy_relocation_information (struct dyn_elf *rpnt, int _dl_parse_relocation_information (struct dyn_elf *rpnt, + struct r_scope_elem *scope, unsigned long rel_addr, unsigned long rel_size) { - return _dl_parse (rpnt->dyn, rpnt->dyn->symbol_scope, rel_addr, rel_size, + return _dl_parse (rpnt->dyn, scope, rel_addr, rel_size, _dl_do_reloc); } diff --git a/ldso/ldso/xtensa/resolve.S b/ldso/ldso/xtensa/resolve.S index 902cd8238..12a554de7 100644 --- a/ldso/ldso/xtensa/resolve.S +++ b/ldso/ldso/xtensa/resolve.S @@ -14,9 +14,8 @@ Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, write to the Free - Software Foundation, Inc., 51 Franklin Street - Fifth Floor, - Boston, MA 02110-1301, USA. */ + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ #define MIN_FRAME_SIZE 32 @@ -28,16 +27,11 @@ .text .align 4 + .literal_position .global _dl_linux_resolve .type _dl_linux_resolve, @function _dl_linux_resolve: - /* Fix up the high 2 bits of the return address. */ - movi a13, 0f - slli a12, a0, 2 -0: extui a13, a13, 30, 2 - ssai 2 - src a12, a13, a12 - +#if defined(__XTENSA_WINDOWED_ABI__) /* Call the fixup function. */ movi a8, _dl_linux_resolver callx8 a8 @@ -54,4 +48,38 @@ _dl_linux_resolve: /* Jump to the next instruction past the ENTRY. */ addi a10, a10, 3 jx a10 +#elif defined(__XTENSA_CALL0_ABI__) + /* Reserve stack space and save incoming arguments. */ + addi a1, a1, -32 + s32i a0, a1, 0 + s32i a2, a1, 8 + s32i a3, a1, 12 + s32i a4, a1, 16 + s32i a5, a1, 20 + s32i a6, a1, 24 + s32i a7, a1, 28 + + /* Move arguments for the _dl_linux_resolver to proper registers. */ + mov a2, a10 + mov a3, a11 + /* Call the fixup function. */ + movi a0, _dl_linux_resolver + callx0 a0 + mov a10, a2 + + /* Restore incoming arguments from stack and deallocate reservation. */ + l32i a0, a1, 0 + l32i a2, a1, 8 + l32i a3, a1, 12 + l32i a4, a1, 16 + l32i a5, a1, 20 + l32i a6, a1, 24 + l32i a7, a1, 28 + addi a1, a1, 32 + + /* Jump to the target function. */ + jx a10 +#else +#error Unsupported Xtensa ABI +#endif .size _dl_linux_resolve, . - _dl_linux_resolve diff --git a/ldso/libdl/Makefile.in b/ldso/libdl/Makefile.in index 41cec858f..fe1eb9dab 100644 --- a/ldso/libdl/Makefile.in +++ b/ldso/libdl/Makefile.in @@ -1,23 +1,28 @@ # Makefile.in for uClibc # -# Copyright (C) 2000-2006 Erik Andersen <andersen@uclibc.org> +# Copyright (C) 2000-2008 Erik Andersen <andersen@uclibc.org> # # Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball. # +subdirs += ldso/libdl + CFLAGS-libdl := -DNOT_IN_libc -DIS_IN_libdl $(SSP_ALL_CFLAGS) CFLAGS-libdl += -I$(top_srcdir)ldso/ldso/$(TARGET_ARCH) -I$(top_srcdir)ldso/include -I$(top_srcdir)ldso/ldso CFLAGS-libdl += -DUCLIBC_RUNTIME_PREFIX=\"$(RUNTIME_PREFIX)\" -ifeq ($(SUPPORT_LD_DEBUG),y) -CFLAGS-libdl += -D__SUPPORT_LD_DEBUG__ -endif +CFLAGS-$(SUPPORT_LD_DEBUG)-ldso/libdl := -D__SUPPORT_LD_DEBUG__ CFLAGS-libdl.c := -DLDSO_ELFINTERP=\"$(TARGET_ARCH)/elfinterp.c\" -LDFLAGS-libdl.so := $(LDFLAGS) -Wl,-fini,dl_cleanup +LDFLAGS-$(UCLIBC_FORMAT_DSBT_ELF)-libdl.so := -Wl,--dsbt-index=3 +LDFLAGS-libdl.so := $(LDFLAGS) + +ifeq ($(LDSO_NO_CLEANUP),) +LDFLAGS-libdl.so += -Wl,-fini,$(SYMBOL_PREFIX)dl_cleanup +endif LIBS-libdl.so := $(LIBS) $(ldso) @@ -39,10 +44,10 @@ libdl-so-y := $(libdl_OUT)/libdl.oS lib-a-$(HAVE_SHARED) += $(top_builddir)lib/libdl.a lib-so-y += $(top_builddir)lib/libdl.so -objclean-y += libdl_clean +objclean-y += CLEAN_ldso/libdl $(top_builddir)lib/libdl.so: $(libdl_OUT)/libdl_so.a $(libc.depend) - $(call link.so,$(libdl_FULL_NAME),$(MAJOR_VERSION)) + $(call link.so,$(libdl_FULL_NAME),$(ABI_VERSION)) $(libdl_OUT)/libdl_so.a: $(libdl-so-y) $(Q)$(RM) $@ @@ -53,5 +58,5 @@ $(top_builddir)lib/libdl.a: $(libdl-a-y) $(Q)$(RM) $@ $(do_ar) -libdl_clean: - $(RM) $(libdl_OUT)/*.{o,os,a,oS} +CLEAN_ldso/libdl: + $(do_rm) $(addprefix $(libdl_OUT)/*., o os oS a) diff --git a/ldso/libdl/libdl.c b/ldso/libdl/libdl.c index 9681e581b..42a09a8bb 100644 --- a/ldso/libdl/libdl.c +++ b/ldso/libdl/libdl.c @@ -32,24 +32,41 @@ #include <ldso.h> #include <stdio.h> -#include <string.h> /* Needed for 'strstr' prototype' */ +#include <string.h> #include <stdbool.h> +#include <bits/uClibc_mutex.h> +#ifdef __UCLIBC_HAS_TLS__ +#include <tls.h> +#endif + +#if defined(USE_TLS) && USE_TLS +#include <ldsodefs.h> +#include <dl-tls.h> +extern void _dl_add_to_slotinfo(struct link_map *l); +#endif + +/* TODO: get rid of global lock and use more finegrained locking, or + * perhaps RCU for the global structures */ +__UCLIBC_MUTEX_STATIC(_dl_mutex, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP); #ifdef SHARED +# if defined(USE_TLS) && USE_TLS +extern struct link_map *_dl_update_slotinfo(unsigned long int req_modid); +# endif /* When libdl is loaded as a shared library, we need to load in * and use a pile of symbols from ldso... */ - -extern char *_dl_find_hash(const char *, struct dyn_elf *, struct elf_resolve *, int); -extern struct elf_resolve * _dl_load_shared_library(int, struct dyn_elf **, - struct elf_resolve *, char *, int); -extern int _dl_fixup(struct dyn_elf *rpnt, int lazy); +#include <dl-elf.h> +#if 0 +extern int _dl_fixup(struct dyn_elf *rpnt, struct r_scope_elem *scope, int lazy); extern void _dl_protect_relro(struct elf_resolve * tpnt); +#endif extern int _dl_errno; extern struct dyn_elf *_dl_symbol_tables; extern struct dyn_elf *_dl_handles; extern struct elf_resolve *_dl_loaded_modules; +extern void _dl_free (void *__ptr); extern struct r_debug *_dl_debug_addr; extern unsigned long _dl_error_number; extern void *(*_dl_malloc_function)(size_t); @@ -67,8 +84,7 @@ extern void _dl_perform_mips_global_got_relocations(struct elf_resolve *tpnt, in extern char *_dl_debug; #endif - -#else /* SHARED */ +#else /* !SHARED */ #define _dl_malloc malloc #define _dl_free free @@ -77,20 +93,21 @@ extern char *_dl_debug; * the symbols that otherwise would have been loaded in from ldso... */ #ifdef __SUPPORT_LD_DEBUG__ -char *_dl_debug = 0; -char *_dl_debug_symbols = 0; -char *_dl_debug_move = 0; -char *_dl_debug_reloc = 0; -char *_dl_debug_detail = 0; -char *_dl_debug_nofixups = 0; -char *_dl_debug_bindings = 0; +char *_dl_debug = NULL; +char *_dl_debug_symbols = NULL; +char *_dl_debug_move = NULL; +char *_dl_debug_reloc = NULL; +char *_dl_debug_detail = NULL; +char *_dl_debug_nofixups = NULL; +char *_dl_debug_bindings = NULL; int _dl_debug_file = 2; #endif const char *_dl_progname = ""; /* Program name */ void *(*_dl_malloc_function)(size_t); void (*_dl_free_function) (void *p); -char *_dl_library_path = 0; /* Where we look for libraries */ -char *_dl_ldsopath = 0; /* Location of the shared lib loader */ +#ifdef __LDSO_LD_LIBRARY_PATH__ +char *_dl_library_path = NULL; /* Where we look for libraries */ +#endif int _dl_errno = 0; /* We can't use the real errno in ldso */ size_t _dl_pagesize = PAGE_SIZE; /* Store the page size for use later */ /* This global variable is also to communicate with debuggers such as gdb. */ @@ -98,6 +115,15 @@ struct r_debug *_dl_debug_addr = NULL; #include "../ldso/dl-array.c" #include "../ldso/dl-debug.c" + + +# if defined(USE_TLS) && USE_TLS +/* + * Giving this initialized value preallocates some surplus bytes in the + * static TLS area, see __libc_setup_tls (libc-tls.c). + */ +size_t _dl_tls_static_size = 2048; +# endif #include LDSO_ELFINTERP #include "../ldso/dl-hash.c" #define _dl_trace_loaded_objects 0 @@ -108,7 +134,7 @@ struct r_debug *_dl_debug_addr = NULL; # define _dl_if_debug_print(fmt, args...) \ do { \ if (_dl_debug) \ - fprintf(stderr, "%s():%i: " fmt, __FUNCTION__, __LINE__, ## args); \ + fprintf(stderr, "%s():%i: " fmt, __func__, __LINE__, ## args); \ } while (0) #else # define _dl_if_debug_print(fmt, args...) @@ -117,7 +143,7 @@ struct r_debug *_dl_debug_addr = NULL; static int do_dlclose(void *, int need_fini); -static const char *dl_error_names[] = { +static const char *const dl_error_names[] = { "", "File not found", "Unable to open /dev/zero", @@ -134,6 +160,7 @@ static const char *dl_error_names[] = { "Not an ELF shared library", "Unable to mmap file", "No dynamic section", + "Library contains unsupported TLS", #ifdef ELF_USES_RELOCA "Unable to process REL relocs", #else @@ -143,20 +170,134 @@ static const char *dl_error_names[] = { "Unable to resolve symbol" }; -void dl_cleanup(void) __attribute__ ((destructor)); + +#if defined(USE_TLS) && USE_TLS +#ifdef SHARED +/* + * Systems which do not have tls_index also probably have to define + * DONT_USE_TLS_INDEX. + */ + +# ifndef __TLS_GET_ADDR +# define __TLS_GET_ADDR __tls_get_addr +# endif + +/* + * Return the symbol address given the map of the module it is in and + * the symbol record. This is used in dl-sym.c. + */ +static void * +internal_function +_dl_tls_symaddr(struct link_map *map, const Elf32_Addr st_value) +{ +# ifndef DONT_USE_TLS_INDEX + tls_index tmp = + { + .ti_module = map->l_tls_modid, + .ti_offset = st_value + }; + + return __TLS_GET_ADDR (&tmp); +# else + return __TLS_GET_ADDR (map->l_tls_modid, st_value); +# endif +} +#endif + +/* Returns true when a non-empty entry was found. */ +static bool +remove_slotinfo(size_t idx, struct dtv_slotinfo_list *listp, size_t disp, + bool should_be_there) +{ + if (idx - disp >= listp->len) { + if (listp->next == NULL) { + /* + * The index is not actually valid in the slotinfo list, + * because this object was closed before it was fully set + * up due to some error. + */ + _dl_assert(!should_be_there); + } else { + if (remove_slotinfo(idx, listp->next, disp + listp->len, + should_be_there)) + return true; + + /* + * No non-empty entry. Search from the end of this element's + * slotinfo array. + */ + idx = disp + listp->len; + } + } else { + struct link_map *old_map = listp->slotinfo[idx - disp].map; + + /* + * The entry might still be in its unused state if we are + * closing an object that wasn't fully set up. + */ + if (__builtin_expect(old_map != NULL, 1)) { + _dl_assert(old_map->l_tls_modid == idx); + + /* Mark the entry as unused. */ + listp->slotinfo[idx - disp].gen = _dl_tls_generation + 1; + listp->slotinfo[idx - disp].map = NULL; + } + + /* + * If this is not the last currently used entry no need to + * look further. + */ + if (idx != _dl_tls_max_dtv_idx) + return true; + } + + while (idx - disp > (disp == 0 ? 1 + _dl_tls_static_nelem : 0)) { + --idx; + + if (listp->slotinfo[idx - disp].map != NULL) { + /* Found a new last used index. */ + _dl_tls_max_dtv_idx = idx; + return true; + } + } + + /* No non-entry in this list element. */ + return false; +} +#endif + +#ifndef __LDSO_NO_CLEANUP__ +void dl_cleanup(void) attribute_hidden __attribute__ ((destructor)); void dl_cleanup(void) { - struct dyn_elf *d; - for (d = _dl_handles; d; d = d->next_handle) { - do_dlclose(d, 1); + struct dyn_elf *h, *n; + + for (h = _dl_handles; h; h = n) { + n = h->next_handle; + do_dlclose(h, 1); } } +#endif -void *dlopen(const char *libname, int flag) +static ptrdiff_t _dl_build_local_scope (struct elf_resolve **list, + struct elf_resolve *map) +{ + struct elf_resolve **p = list; + struct init_fini_list *q; + + *p++ = map; + map->init_flag |= DL_RESERVED; + if (map->init_fini) + for (q = map->init_fini; q; q = q->next) + if (! (q->tpnt->init_flag & DL_RESERVED)) + p += _dl_build_local_scope (p, q->tpnt); + return p - list; +} + +static void *do_dlopen(const char *libname, int flag, ElfW(Addr) from) { struct elf_resolve *tpnt, *tfrom; struct dyn_elf *dyn_chain, *rpnt = NULL, *dyn_ptr, *relro_ptr, *handle; - ElfW(Addr) from; struct elf_resolve *tpnt1; void (*dl_brk) (void); int now_flag; @@ -164,15 +305,20 @@ void *dlopen(const char *libname, int flag) unsigned int nlist, i; struct elf_resolve **init_fini_list; static bool _dl_init; + struct elf_resolve **local_scope; +#ifdef SHARED + struct r_scope_elem *ls; +#endif +#if defined(USE_TLS) && USE_TLS + bool any_tls = false; +#endif /* A bit of sanity checking... */ - if (!(flag & (RTLD_LAZY|RTLD_NOW))) { + if (!(flag & (RTLD_LAZY|RTLD_NOW|RTLD_NOLOAD))) { _dl_error_number = LD_BAD_HANDLE; return NULL; } - from = (ElfW(Addr)) __builtin_return_address(0); - if (!_dl_init) { _dl_init = true; _dl_malloc_function = malloc; @@ -186,7 +332,7 @@ void *dlopen(const char *libname, int flag) # ifdef __SUPPORT_LD_DEBUG__ _dl_debug = getenv("LD_DEBUG"); if (_dl_debug) { - if (_dl_strstr(_dl_debug, "all")) { + if (strstr(_dl_debug, "all")) { _dl_debug_detail = _dl_debug_move = _dl_debug_symbols = _dl_debug_reloc = _dl_debug_bindings = _dl_debug_nofixups = (void*)1; } else { @@ -220,14 +366,15 @@ void *dlopen(const char *libname, int flag) tfrom = tpnt; } } - for (rpnt = _dl_symbol_tables; rpnt && rpnt->next; rpnt=rpnt->next); + for (rpnt = _dl_symbol_tables; rpnt && rpnt->next; rpnt = rpnt->next) + continue; relro_ptr = rpnt; now_flag = (flag & RTLD_NOW) ? RTLD_NOW : 0; if (getenv("LD_BIND_NOW")) now_flag = RTLD_NOW; -#ifndef SHARED +#if !defined SHARED && defined __LDSO_LD_LIBRARY_PATH__ /* When statically linked, the _dl_library_path is not yet initialized */ _dl_library_path = getenv("LD_LIBRARY_PATH"); #endif @@ -235,16 +382,16 @@ void *dlopen(const char *libname, int flag) /* Try to load the specified library */ _dl_if_debug_print("Trying to dlopen '%s', RTLD_GLOBAL:%d RTLD_NOW:%d\n", (char*)libname, (flag & RTLD_GLOBAL ? 1:0), (now_flag & RTLD_NOW ? 1:0)); - tpnt = _dl_load_shared_library(0, &rpnt, tfrom, (char*)libname, 0); + tpnt = _dl_load_shared_library(flag & (RTLD_NOLOAD | RTLD_GLOBAL | RTLD_NODELETE), + &rpnt, tfrom, (char*)libname, 0); if (tpnt == NULL) { _dl_unmap_cache(); return NULL; } dyn_chain = (struct dyn_elf *) malloc(sizeof(struct dyn_elf)); - _dl_memset(dyn_chain, 0, sizeof(struct dyn_elf)); + memset(dyn_chain, 0, sizeof(struct dyn_elf)); dyn_chain->dyn = tpnt; - tpnt->rtld_flags |= (flag & RTLD_GLOBAL); dyn_chain->next_handle = _dl_handles; _dl_handles = dyn_ptr = dyn_chain; @@ -257,24 +404,23 @@ void *dlopen(const char *libname, int flag) dyn_chain->init_fini.init_fini = handle->init_fini.init_fini; dyn_chain->init_fini.nlist = handle->init_fini.nlist; for (i = 0; i < dyn_chain->init_fini.nlist; i++) - dyn_chain->init_fini.init_fini[i]->rtld_flags |= (flag & RTLD_GLOBAL); + dyn_chain->init_fini.init_fini[i]->rtld_flags |= (flag & (RTLD_GLOBAL|RTLD_NODELETE)); dyn_chain->next = handle->next; break; } } return dyn_chain; - } else { - tpnt->init_flag |= DL_OPENED; } + tpnt->init_flag |= DL_OPENED; + _dl_if_debug_print("Looking for needed libraries\n"); nlist = 0; runp = alloca(sizeof(*runp)); runp->tpnt = tpnt; runp->next = NULL; dep_list = runp2 = runp; - for (; runp; runp = runp->next) - { + for (; runp; runp = runp->next) { ElfW(Dyn) *dpnt; char *lpntstr; @@ -286,15 +432,14 @@ void *dlopen(const char *libname, int flag) dpnt->d_un.d_val); _dl_if_debug_print("Trying to load '%s', needed by '%s'\n", lpntstr, runp->tpnt->libname); - tpnt1 = _dl_load_shared_library(0, &rpnt, runp->tpnt, lpntstr, 0); + tpnt1 = _dl_load_shared_library(flag & (RTLD_GLOBAL | RTLD_NODELETE), + &rpnt, runp->tpnt, lpntstr, 0); if (!tpnt1) goto oops; - tpnt1->rtld_flags |= (flag & RTLD_GLOBAL); - /* This list is for dlsym() and relocation */ dyn_ptr->next = (struct dyn_elf *) malloc(sizeof(struct dyn_elf)); - _dl_memset (dyn_ptr->next, 0, sizeof (struct dyn_elf)); + memset (dyn_ptr->next, 0, sizeof (struct dyn_elf)); dyn_ptr = dyn_ptr->next; dyn_ptr->dyn = tpnt1; /* Used to record RTLD_LOCAL scope */ @@ -336,6 +481,23 @@ void *dlopen(const char *libname, int flag) } } + /* Build the local scope for the dynamically loaded modules. */ + local_scope = _dl_malloc(nlist * sizeof(struct elf_resolve *)); /* Could it allocated on stack? */ + for (i = 0; i < nlist; i++) + if (init_fini_list[i]->symbol_scope.r_nlist == 0) { + int k, cnt; + cnt = _dl_build_local_scope(local_scope, init_fini_list[i]); + init_fini_list[i]->symbol_scope.r_list = _dl_malloc(cnt * sizeof(struct elf_resolve *)); + init_fini_list[i]->symbol_scope.r_nlist = cnt; + _dl_memcpy (init_fini_list[i]->symbol_scope.r_list, local_scope, + cnt * sizeof (struct elf_resolve *)); + /* Restoring the init_flag.*/ + for (k = 0; k < nlist; k++) + init_fini_list[k]->init_flag &= ~DL_RESERVED; + } + + _dl_free(local_scope); + /* Sort the INIT/FINI list in dependency order. */ for (runp2 = dep_list; runp2; runp2 = runp2->next) { unsigned int j, k; @@ -364,8 +526,8 @@ void *dlopen(const char *libname, int flag) fprintf(stderr, "lib: %s has deps:\n", init_fini_list[i]->libname); runp = init_fini_list[i]->init_fini; for (; runp; runp = runp->next) - printf(" %s ", runp->tpnt->libname); - printf("\n"); + fprintf(stderr, " %s ", runp->tpnt->libname); + fprintf(stderr, "\n"); } } #endif @@ -376,6 +538,19 @@ void *dlopen(const char *libname, int flag) * Now we go through and look for REL and RELA records that indicate fixups * to the GOT tables. We need to do this in reverse order so that COPY * directives work correctly */ + +#ifdef SHARED + /* + * Get the tail of the list. + * In the static case doesn't need to extend the global scope, it is + * ready to be used as it is, because _dl_loaded_modules already points + * to the dlopened library. + */ + for (ls = &_dl_loaded_modules->symbol_scope; ls && ls->next; ls = ls->next); + + /* Extend the global scope by adding the local scope of the dlopened DSO. */ + ls->next = &dyn_chain->dyn->symbol_scope; +#endif #ifdef __mips__ /* * Relocation of the GOT entries for MIPS have to be done @@ -384,7 +559,7 @@ void *dlopen(const char *libname, int flag) _dl_perform_mips_global_got_relocations(tpnt, !now_flag); #endif - if (_dl_fixup(dyn_chain, now_flag)) + if (_dl_fixup(dyn_chain, &_dl_loaded_modules->symbol_scope, now_flag)) goto oops; if (relro_ptr) { @@ -396,6 +571,51 @@ void *dlopen(const char *libname, int flag) /* TODO: Should we set the protections of all pages back to R/O now ? */ +#if defined(USE_TLS) && USE_TLS + + for (i=0; i < nlist; i++) { + struct elf_resolve *tmp_tpnt = init_fini_list[i]; + /* Only add TLS memory if this object is loaded now and + therefore is not yet initialized. */ + + if (!(tmp_tpnt->init_flag & INIT_FUNCS_CALLED) + /* Only if the module defines thread local data. */ + && __builtin_expect (tmp_tpnt->l_tls_blocksize > 0, 0)) { + + /* Now that we know the object is loaded successfully add + modules containing TLS data to the slot info table. We + might have to increase its size. */ + _dl_add_to_slotinfo ((struct link_map*)tmp_tpnt); + + /* It is the case in which we couldn't perform TLS static + initialization at relocation time, and we delayed it until + the relocation has been completed. */ + + if (tmp_tpnt->l_need_tls_init) { + tmp_tpnt->l_need_tls_init = 0; +# ifdef SHARED + /* Update the slot information data for at least the + generation of the DSO we are allocating data for. */ + _dl_update_slotinfo (tmp_tpnt->l_tls_modid); +# endif + + _dl_init_static_tls((struct link_map*)tmp_tpnt); + _dl_assert (tmp_tpnt->l_need_tls_init == 0); + } + + /* We have to bump the generation counter. */ + any_tls = true; + } + } + + /* Bump the generation number if necessary. */ + if (any_tls && __builtin_expect (++_dl_tls_generation == 0, 0)) { + _dl_debug_early("TLS generation counter wrapped! Please report this."); + _dl_exit(30); + } + +#endif + /* Notify the debugger we have added some objects. */ if (_dl_debug_addr) { dl_brk = (void (*)(void)) _dl_debug_addr->r_brk; @@ -438,23 +658,36 @@ oops: return NULL; } -void *dlsym(void *vhandle, const char *name) +void *dlopen(const char *libname, int flag) +{ + void *ret; + + __UCLIBC_MUTEX_CONDITIONAL_LOCK(_dl_mutex, 1); + ret = do_dlopen(libname, flag, + (ElfW(Addr)) __builtin_return_address(0)); + __UCLIBC_MUTEX_CONDITIONAL_UNLOCK(_dl_mutex, 1); + + return ret; +} + +static void *do_dlsym(void *vhandle, const char *name, void *caller_address) { struct elf_resolve *tpnt, *tfrom; struct dyn_elf *handle; - ElfW(Addr) from; + ElfW(Addr) from = 0; struct dyn_elf *rpnt; void *ret; + struct symbol_ref sym_ref = { NULL, NULL }; /* Nastiness to support underscore prefixes. */ #ifdef __UCLIBC_UNDERSCORES__ char tmp_buf[80]; char *name2 = tmp_buf; size_t nlen = strlen (name) + 1; if (nlen + 1 > sizeof (tmp_buf)) - name2 = malloc (nlen + 1); + name2 = malloc (nlen + 1); if (name2 == 0) { - _dl_error_number = LD_ERROR_MMAP_FAILED; - return 0; + _dl_error_number = LD_ERROR_MMAP_FAILED; + return 0; } name2[0] = '_'; memcpy (name2 + 1, name, nlen); @@ -485,7 +718,7 @@ void *dlsym(void *vhandle, const char *name) * dynamic loader itself, as it doesn't know * how to properly treat it. */ - from = (ElfW(Addr)) __builtin_return_address(0); + from = (ElfW(Addr)) caller_address; tfrom = NULL; for (rpnt = _dl_symbol_tables; rpnt; rpnt = rpnt->next) { @@ -498,8 +731,22 @@ void *dlsym(void *vhandle, const char *name) } tpnt = NULL; if (handle == _dl_symbol_tables) - tpnt = handle->dyn; /* Only search RTLD_GLOBAL objs if global object */ - ret = _dl_find_hash(name2, handle, tpnt, ELF_RTYPE_CLASS_DLSYM); + tpnt = handle->dyn; /* Only search RTLD_GLOBAL objs if global object */ + + do { + ret = _dl_find_hash(name2, &handle->dyn->symbol_scope, tpnt, ELF_RTYPE_CLASS_DLSYM, &sym_ref); + if (ret != NULL) + break; + handle = handle->next; + } while (from && handle); + +#if defined(USE_TLS) && USE_TLS && defined SHARED + if (sym_ref.sym && (ELF_ST_TYPE(sym_ref.sym->st_info) == STT_TLS) && (sym_ref.tpnt)) { + /* The found symbol is a thread-local storage variable. + Return its address for the current thread. */ + ret = _dl_tls_symaddr ((struct link_map *)sym_ref.tpnt, (Elf32_Addr)ret); + } +#endif /* * Nothing found. @@ -514,6 +761,17 @@ out: return ret; } +void *dlsym(void *vhandle, const char *name) +{ + void *ret; + + __UCLIBC_MUTEX_CONDITIONAL_LOCK(_dl_mutex, 1); + ret = do_dlsym(vhandle, name, __builtin_return_address(0)); + __UCLIBC_MUTEX_CONDITIONAL_UNLOCK(_dl_mutex, 1); + + return ret; +} + #if 0 void *dlvsym(void *vhandle, const char *name, const char *version) { @@ -530,8 +788,17 @@ static int do_dlclose(void *vhandle, int need_fini) int (*dl_elf_fini) (void); void (*dl_brk) (void); struct dyn_elf *handle; - unsigned int end; + unsigned int end = 0, start = 0xffffffff; unsigned int i, j; + struct r_scope_elem *ls, *ls_next = NULL; + struct elf_resolve **handle_rlist; + +#if defined(USE_TLS) && USE_TLS + bool any_tls = false; + size_t tls_free_start = NO_TLS_OFFSET; + size_t tls_free_end = NO_TLS_OFFSET; + struct link_map *tls_lmap; +#endif handle = (struct dyn_elf *) vhandle; if (handle == _dl_symbol_tables) @@ -553,19 +820,34 @@ static int do_dlclose(void *vhandle, int need_fini) _dl_handles = rpnt->next_handle; _dl_if_debug_print("%s: usage count: %d\n", handle->dyn->libname, handle->dyn->usage_count); - if (handle->dyn->usage_count != 1) { + if (handle->dyn->usage_count != 1 || (handle->dyn->rtld_flags & RTLD_NODELETE)) { handle->dyn->usage_count--; free(handle); return 0; } + + /* Store the handle's local scope array for later removal */ + handle_rlist = handle->dyn->symbol_scope.r_list; + + /* Store references to the local scope entries for later removal */ + for (ls = &_dl_loaded_modules->symbol_scope; ls && ls->next; ls = ls->next) + if (ls->next->r_list[0] == handle->dyn) { + break; + } + /* ls points to the previous local symbol scope */ + if(ls && ls->next) + ls_next = ls->next->next; + /* OK, this is a valid handle - now close out the file */ for (j = 0; j < handle->init_fini.nlist; ++j) { tpnt = handle->init_fini.init_fini[j]; - if (--tpnt->usage_count == 0) { + tpnt->usage_count--; + if (tpnt->usage_count == 0 && !(tpnt->rtld_flags & RTLD_NODELETE)) { if ((tpnt->dynamic_info[DT_FINI] || tpnt->dynamic_info[DT_FINI_ARRAY]) - && need_fini && - !(tpnt->init_flag & FINI_FUNCS_CALLED)) { + && need_fini + && !(tpnt->init_flag & FINI_FUNCS_CALLED) + ) { tpnt->init_flag |= FINI_FUNCS_CALLED; _dl_run_fini_array(tpnt); @@ -583,10 +865,117 @@ static int do_dlclose(void *vhandle, int need_fini) i < tpnt->n_phent; ppnt++, i++) { if (ppnt->p_type != PT_LOAD) continue; + if (ppnt->p_vaddr < start) + start = ppnt->p_vaddr; if (end < ppnt->p_vaddr + ppnt->p_memsz) end = ppnt->p_vaddr + ppnt->p_memsz; } - DL_LIB_UNMAP (tpnt, end); + +#if defined(USE_TLS) && USE_TLS + /* Do the cast to make things easy. */ + tls_lmap = (struct link_map *) tpnt; + + /* Remove the object from the dtv slotinfo array if it uses TLS. */ + if (__builtin_expect (tls_lmap->l_tls_blocksize > 0, 0)) { + any_tls = true; + + if (_dl_tls_dtv_slotinfo_list != NULL + && ! remove_slotinfo (tls_lmap->l_tls_modid, + _dl_tls_dtv_slotinfo_list, 0, + (tpnt->init_flag & INIT_FUNCS_CALLED))) + /* All dynamically loaded modules with TLS are unloaded. */ + _dl_tls_max_dtv_idx = _dl_tls_static_nelem; + + if (tls_lmap->l_tls_offset != NO_TLS_OFFSET) { + /* + * Collect a contiguous chunk built from the objects in + * this search list, going in either direction. When the + * whole chunk is at the end of the used area then we can + * reclaim it. + */ +# if defined(TLS_TCB_AT_TP) + if (tls_free_start == NO_TLS_OFFSET + || (size_t) tls_lmap->l_tls_offset == tls_free_start) { + /* Extend the contiguous chunk being reclaimed. */ + tls_free_start + = tls_lmap->l_tls_offset - + tls_lmap->l_tls_blocksize; + + if (tls_free_end == NO_TLS_OFFSET) + tls_free_end = tls_lmap->l_tls_offset; + } else if (tls_lmap->l_tls_offset - tls_lmap->l_tls_blocksize + == tls_free_end) + /* Extend the chunk backwards. */ + tls_free_end = tls_lmap->l_tls_offset; + else { + /* + * This isn't contiguous with the last chunk freed. + * One of them will be leaked unless we can free + * one block right away. + */ + if (tls_free_end == _dl_tls_static_used) { + _dl_tls_static_used = tls_free_start; + tls_free_end = tls_lmap->l_tls_offset; + tls_free_start + = tls_free_end - tls_lmap->l_tls_blocksize; + } else if ((size_t) tls_lmap->l_tls_offset + == _dl_tls_static_used) + _dl_tls_static_used = tls_lmap->l_tls_offset - + tls_lmap->l_tls_blocksize; + else if (tls_free_end < (size_t) tls_lmap->l_tls_offset) { + /* + * We pick the later block. It has a chance + * to be freed. + */ + tls_free_end = tls_lmap->l_tls_offset; + tls_free_start = tls_free_end - + tls_lmap->l_tls_blocksize; + } + } +# elif defined(TLS_DTV_AT_TP) + if ((size_t) tls_lmap->l_tls_offset == tls_free_end) + /* Extend the contiguous chunk being reclaimed. */ + tls_free_end -= tls_lmap->l_tls_blocksize; + else if (tls_lmap->l_tls_offset + tls_lmap->l_tls_blocksize + == tls_free_start) + /* Extend the chunk backwards. */ + tls_free_start = tls_lmap->l_tls_offset; + else { + /* + * This isn't contiguous with the last chunk + * freed. One of them will be leaked. + */ + if (tls_free_end == _dl_tls_static_used) + _dl_tls_static_used = tls_free_start; + tls_free_start = tls_lmap->l_tls_offset; + tls_free_end = tls_free_start + + tls_lmap->l_tls_blocksize; + } +# else +# error Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined +# endif + } else { + +#define TLS_DTV_UNALLOCATED ((void *) -1l) + + dtv_t *dtv = THREAD_DTV (); + + if (!(dtv[tls_lmap->l_tls_modid].pointer.is_static) && + dtv[tls_lmap->l_tls_modid].pointer.val != TLS_DTV_UNALLOCATED) { + /* Note that free is called for NULL is well. We + deallocate even if it is this dtv entry we are + supposed to load. The reason is that we call + memalign and not malloc. */ + _dl_free (dtv[tls_lmap->l_tls_modid].pointer.val); + dtv[tls_lmap->l_tls_modid].pointer.val = TLS_DTV_UNALLOCATED; + } + } + } +#endif + + end = (end + ADDR_ALIGN) & PAGE_ALIGN; + start = start & ~ADDR_ALIGN; + DL_LIB_UNMAP (tpnt, end - start); /* Free elements in RTLD_LOCAL scope list */ for (runp = tpnt->rtld_local; runp; runp = tmp) { tmp = runp->next; @@ -598,8 +987,8 @@ static int do_dlclose(void *vhandle, int need_fini) _dl_loaded_modules = tpnt->next; if (_dl_loaded_modules) _dl_loaded_modules->prev = 0; - } else - for (run_tpnt = _dl_loaded_modules; run_tpnt; run_tpnt = run_tpnt->next) + } else { + for (run_tpnt = _dl_loaded_modules; run_tpnt; run_tpnt = run_tpnt->next) { if (run_tpnt->next == tpnt) { _dl_if_debug_print("removing loaded_modules: %s\n", tpnt->libname); run_tpnt->next = run_tpnt->next->next; @@ -607,6 +996,8 @@ static int do_dlclose(void *vhandle, int need_fini) run_tpnt->next->prev = run_tpnt; break; } + } + } /* Next, remove tpnt from the global symbol table list */ if (_dl_symbol_tables) { @@ -614,7 +1005,7 @@ static int do_dlclose(void *vhandle, int need_fini) _dl_symbol_tables = _dl_symbol_tables->next; if (_dl_symbol_tables) _dl_symbol_tables->prev = 0; - } else + } else { for (rpnt1 = _dl_symbol_tables; rpnt1->next; rpnt1 = rpnt1->next) { if (rpnt1->next->dyn == tpnt) { _dl_if_debug_print("removing symbol_tables: %s\n", tpnt->libname); @@ -626,14 +1017,38 @@ static int do_dlclose(void *vhandle, int need_fini) break; } } + } } free(tpnt->libname); + if (handle->dyn != tpnt) + free(tpnt->symbol_scope.r_list); free(tpnt); } } + /* Unlink and release the handle's local scope from global one */ + if(ls) + ls->next = ls_next; + free(handle_rlist); + + for (rpnt1 = handle->next; rpnt1; rpnt1 = rpnt1_tmp) { + rpnt1_tmp = rpnt1->next; + free(rpnt1); + } free(handle->init_fini.init_fini); free(handle); +#if defined(USE_TLS) && USE_TLS + /* If we removed any object which uses TLS bump the generation counter. */ + if (any_tls) { + if (__builtin_expect(++_dl_tls_generation == 0, 0)) { + _dl_debug_early("TLS generation counter wrapped! Please report to the uClibc mailing list.\n"); + _dl_exit(30); + } + + if (tls_free_end == _dl_tls_static_used) + _dl_tls_static_used = tls_free_start; + } +#endif if (_dl_debug_addr) { dl_brk = (void (*)(void)) _dl_debug_addr->r_brk; @@ -651,7 +1066,13 @@ static int do_dlclose(void *vhandle, int need_fini) int dlclose(void *vhandle) { - return do_dlclose(vhandle, 1); + int ret; + + __UCLIBC_MUTEX_CONDITIONAL_LOCK(_dl_mutex, 1); + ret = do_dlclose(vhandle, 1); + __UCLIBC_MUTEX_CONDITIONAL_UNLOCK(_dl_mutex, 1); + + return ret; } char *dlerror(void) @@ -669,8 +1090,10 @@ char *dlerror(void) * Dump information to stderr about the current loaded modules */ #ifdef __USE_GNU -static char *type[] = { "Lib", "Exe", "Int", "Mod" }; +# if 0 +static const char type[][4] = { "Lib", "Exe", "Int", "Mod" }; +/* reimplement this, being a GNU extension it should be the same as on glibc */ int dlinfo(void) { struct elf_resolve *tpnt; @@ -697,8 +1120,9 @@ int dlinfo(void) } return 0; } +#endif -int dladdr(const void *__address, Dl_info * __info) +static int do_dladdr(const void *__address, Dl_info * __info) { struct elf_resolve *pelf; struct elf_resolve *rpnt; @@ -761,7 +1185,11 @@ int dladdr(const void *__address, Dl_info * __info) ElfW(Addr) symbol_addr; symbol_addr = (ElfW(Addr)) DL_RELOC_ADDR(pelf->loadaddr, symtab[si].st_value); - if (symbol_addr <= (ElfW(Addr))__address && (!sf || sa < symbol_addr)) { + if ((symtab[si].st_shndx != SHN_UNDEF + || symtab[si].st_value != 0) + && ELF_ST_TYPE(symtab[si].st_info) != STT_TLS + && DL_ADDR_SYM_MATCH(symbol_addr, &symtab[si], sa, + (ElfW(Addr)) __address)) { sa = symbol_addr; sn = si; sf = 1; @@ -777,7 +1205,11 @@ int dladdr(const void *__address, Dl_info * __info) ElfW(Addr) symbol_addr; symbol_addr = (ElfW(Addr)) DL_RELOC_ADDR(pelf->loadaddr, symtab[si].st_value); - if (symbol_addr <= (ElfW(Addr))__address && (!sf || sa < symbol_addr)) { + if ((symtab[si].st_shndx != SHN_UNDEF + || symtab[si].st_value != 0) + && ELF_ST_TYPE(symtab[si].st_info) != STT_TLS + && DL_ADDR_SYM_MATCH(symbol_addr, &symtab[si], sa, + (ElfW(Addr)) __address)) { sa = symbol_addr; sn = si; sf = 1; @@ -802,3 +1234,14 @@ int dladdr(const void *__address, Dl_info * __info) } } #endif + +int dladdr(const void *__address, Dl_info * __info) +{ + int ret; + + __UCLIBC_MUTEX_CONDITIONAL_LOCK(_dl_mutex, 1); + ret = do_dladdr(__address, __info); + __UCLIBC_MUTEX_CONDITIONAL_UNLOCK(_dl_mutex, 1); + + return ret; +} diff --git a/ldso/man/dlopen.3 b/ldso/man/dlopen.3 index 8d1e09e71..0907aed9c 100644 --- a/ldso/man/dlopen.3 +++ b/ldso/man/dlopen.3 @@ -19,9 +19,8 @@ .\" GNU General Public License for more details. .\" .\" You should have received a copy of the GNU General Public -.\" License along with this manual; if not, write to the Free -.\" Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, -.\" USA. +.\" License along with this manual; if not, see +.\" <http://www.gnu.org/licenses/>. .\" .TH DLOPEN 3 "16 May 1995" "Linux" "Linux Programmer's Manual" .SH NAME |
