summaryrefslogtreecommitdiff
path: root/ldso/ldso
diff options
context:
space:
mode:
authorWaldemar Brodkorb <wbx@uclibc-ng.org>2016-10-04 06:51:35 +0200
committerWaldemar Brodkorb <wbx@openadk.org>2017-02-01 17:48:38 +0100
commitdba942c80dc2cfa5768a856fff98e22a755fdd27 (patch)
tree371e775cb6dfec085945f6834aeb9d3b6de36fbe /ldso/ldso
parent9b457baf8d46329f7d7ee2aa084022bb0df88551 (diff)
add experimental aarch64 support
Ported over from GNU C Library and runtime tested in Qemu.
Diffstat (limited to 'ldso/ldso')
-rw-r--r--ldso/ldso/aarch64/dl-startup.h98
-rw-r--r--ldso/ldso/aarch64/dl-syscalls.h1
-rw-r--r--ldso/ldso/aarch64/dl-sysdep.h107
-rw-r--r--ldso/ldso/aarch64/dl-tlsdesc.S207
-rw-r--r--ldso/ldso/aarch64/elfinterp.c306
-rw-r--r--ldso/ldso/aarch64/resolve.S97
6 files changed, 816 insertions, 0 deletions
diff --git a/ldso/ldso/aarch64/dl-startup.h b/ldso/ldso/aarch64/dl-startup.h
new file mode 100644
index 000000000..1fac5ec35
--- /dev/null
+++ b/ldso/ldso/aarch64/dl-startup.h
@@ -0,0 +1,98 @@
+/*
+ * Architecture specific code used by dl-startup.c
+ * Copyright (C) 2016 Waldemar Brodkorb <wbx@uclibc-ng.org>
+ * Ported from GNU libc
+ * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball.
+ */
+
+/* Copyright (C) 1995-2016 Free Software Foundation, Inc.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2.1 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <features.h>
+
+__asm__("\
+.text \n\
+.globl _start \n\
+.type _start, %function \n\
+.globl _dl_start_user \n\
+.type _dl_start_user, %function \n\
+_start: \n\
+ mov x0, sp \n\
+ bl _dl_start \n\
+ // returns user entry point in x0 \n\
+ mov x21, x0 \n\
+_dl_start_user: \n\
+ // get the original arg count \n\
+ ldr x1, [sp] \n\
+ // get the argv address \n\
+ add x2, sp, #(1<<3) \n\
+ // get _dl_skip_args to see if we were \n\
+ // invoked as an executable \n\
+ adrp x4, _dl_skip_args \n\
+ ldr w4, [x4, #:lo12:_dl_skip_args] \n\
+ // do we need to adjust argc/argv \n\
+ cmp w4, 0 \n\
+ beq .L_done_stack_adjust \n\
+ // subtract _dl_skip_args from original arg count \n\
+ sub x1, x1, x4 \n\
+ // store adjusted argc back to stack \n\
+ str x1, [sp] \n\
+ // find the first unskipped argument \n\
+ mov x3, x2 \n\
+ add x4, x2, x4, lsl #3 \n\
+ // shuffle envp down \n\
+1: ldr x5, [x4], #(1<<3) \n\
+ str x5, [x3], #(1<<3) \n\
+ cmp x5, #0 \n\
+ bne 1b \n\
+ // shuffle auxv down \n\
+1: ldp x0, x5, [x4, #(2<<3)]! \n\
+ stp x0, x5, [x3], #(2<<3) \n\
+ cmp x0, #0 \n\
+ bne 1b \n\
+.L_done_stack_adjust: \n\
+ // compute envp \n\
+ add x3, x2, x1, lsl #3 \n\
+ add x3, x3, #(1<<3) \n\
+ // load the finalizer function \n\
+ adrp x0, _dl_fini \n\
+ add x0, x0, #:lo12:_dl_fini \n\
+ // jump to the user_s entry point \n\
+ br x21 \n\
+");
+
+/* Get a pointer to the argv array. On many platforms this can be just
+ * the address of the first argument, on other platforms we need to
+ * do something a little more subtle here. */
+#define GET_ARGV(ARGVP, ARGS) ARGVP = (((unsigned long*)ARGS)+1)
+
+/* Handle relocation of the symbols in the dynamic loader. */
+static __always_inline
+void PERFORM_BOOTSTRAP_RELOC(ELF_RELOC *rpnt, ElfW(Addr) *reloc_addr,
+ ElfW(Addr) symbol_addr, ElfW(Addr) load_addr, ElfW(Addr) *sym)
+{
+ switch (ELF_R_TYPE(rpnt->r_info)) {
+ case R_AARCH64_NONE:
+ break;
+ case R_AARCH64_ABS64:
+ case R_AARCH64_GLOB_DAT:
+ case R_AARCH64_JUMP_SLOT:
+ *reloc_addr = symbol_addr + rpnt->r_addend;
+ break;
+ default:
+ _dl_exit(1);
+ }
+}
diff --git a/ldso/ldso/aarch64/dl-syscalls.h b/ldso/ldso/aarch64/dl-syscalls.h
new file mode 100644
index 000000000..f40c4fd31
--- /dev/null
+++ b/ldso/ldso/aarch64/dl-syscalls.h
@@ -0,0 +1 @@
+/* stub for arch-specific syscall issues */
diff --git a/ldso/ldso/aarch64/dl-sysdep.h b/ldso/ldso/aarch64/dl-sysdep.h
new file mode 100644
index 000000000..4e8cdd906
--- /dev/null
+++ b/ldso/ldso/aarch64/dl-sysdep.h
@@ -0,0 +1,107 @@
+/*
+ * Various assembly language/system dependent hacks that are required
+ * so that we can minimize the amount of platform specific code.
+ * Copyright (C) 2000-2004 by Erik Andersen <andersen@codepoet.org>
+ * Copyright (C) 2017 by Waldemar Brodkorb <wbx@uclibc-ng.org>
+ * Ported from GNU C Library
+ * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball.
+ */
+
+/* Copyright (C) 1995-2016 Free Software Foundation, Inc.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2.1 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* Define this if the system uses RELOCA. */
+#define ELF_USES_RELOCA
+
+#include <elf.h>
+#include <link.h>
+
+/* Initialization sequence for the GOT. */
+#define INIT_GOT(GOT_BASE,MODULE) \
+{ \
+ GOT_BASE[2] = (unsigned long) _dl_linux_resolve; \
+ GOT_BASE[1] = (unsigned long) MODULE; \
+}
+
+/* Here we define the magic numbers that this dynamic loader should accept */
+#define MAGIC1 EM_AARCH64
+#undef MAGIC2
+
+/* Used for error messages */
+#define ELF_TARGET "aarch64"
+
+struct elf_resolve;
+unsigned long _dl_linux_resolver(struct elf_resolve * tpnt, int reloc_entry);
+
+#define elf_machine_type_class(type) \
+ ((((type) == R_AARCH64_JUMP_SLOT \
+ || (type) == R_AARCH64_TLS_DTPMOD \
+ || (type) == R_AARCH64_TLS_DTPREL \
+ || (type) == R_AARCH64_TLS_TPREL \
+ || (type) == R_AARCH64_TLSDESC) * ELF_RTYPE_CLASS_PLT) \
+ | (((type) == R_AARCH64_COPY) * ELF_RTYPE_CLASS_COPY))
+
+/* Return the link-time address of _DYNAMIC. Conveniently, this is the
+ first element of the GOT. */
+extern const ElfW(Addr) _GLOBAL_OFFSET_TABLE_[] attribute_hidden;
+static __always_inline ElfW(Addr) __attribute__ ((unused))
+elf_machine_dynamic (void)
+{
+ return _GLOBAL_OFFSET_TABLE_[0];
+}
+
+/* Return the run-time load address of the shared object. */
+
+static __always_inline ElfW(Addr) __attribute__ ((unused))
+elf_machine_load_address (void)
+{
+ /* To figure out the load address we use the definition that for any symbol:
+ dynamic_addr(symbol) = static_addr(symbol) + load_addr
+
+ The choice of symbol is arbitrary. The static address we obtain
+ by constructing a non GOT reference to the symbol, the dynamic
+ address of the symbol we compute using adrp/add to compute the
+ symbol's address relative to the PC.
+ This depends on 32/16bit relocations being resolved at link time
+ and that the static address fits in the 32/16 bits. */
+
+ ElfW(Addr) static_addr;
+ ElfW(Addr) dynamic_addr;
+
+ __asm__(" \n"
+" adrp %1, _dl_start; \n"
+" add %1, %1, #:lo12:_dl_start \n"
+" ldr %w0, 1f \n"
+" b 2f \n"
+"1: \n"
+" .word _dl_start \n"
+"2: \n"
+ : "=r" (static_addr), "=r" (dynamic_addr));
+ return dynamic_addr - static_addr;
+}
+
+static __always_inline void
+elf_machine_relative(Elf64_Addr load_off, const Elf64_Addr rel_addr,
+ Elf64_Word relative_count)
+{
+ Elf64_Rela *rpnt = (Elf64_Rela*)rel_addr;
+ --rpnt;
+ do {
+ Elf64_Addr *const reloc_addr = (Elf64_Addr*)(load_off + (++rpnt)->r_offset);
+
+ *reloc_addr = load_off + rpnt->r_addend;
+ } while (--relative_count);
+}
diff --git a/ldso/ldso/aarch64/dl-tlsdesc.S b/ldso/ldso/aarch64/dl-tlsdesc.S
new file mode 100644
index 000000000..4520da69b
--- /dev/null
+++ b/ldso/ldso/aarch64/dl-tlsdesc.S
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2017 Waldemar Brodkorb <wbx@uclibc-ng.org>
+ * Ported from GNU C Library
+ * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball.
+ */
+
+/* Thread-local storage handling in the ELF dynamic linker.
+ AArch64 version.
+ Copyright (C) 2011-2017 Free Software Foundation, Inc.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+
+#if defined __UCLIBC_HAS_TLS__
+
+#include <tls.h>
+#include "tlsdesc.h"
+
+#define PTR_REG(n) x##n
+#define PTR_LOG_SIZE 3
+#define PTR_SIZE (1<<PTR_LOG_SIZE)
+
+#define NSAVEDQREGPAIRS 16
+#define SAVE_Q_REGISTERS \
+ stp q0, q1, [sp, #-32*NSAVEDQREGPAIRS]!; \
+ cfi_adjust_cfa_offset (32*NSAVEDQREGPAIRS); \
+ stp q2, q3, [sp, #32*1]; \
+ stp q4, q5, [sp, #32*2]; \
+ stp q6, q7, [sp, #32*3]; \
+ stp q8, q9, [sp, #32*4]; \
+ stp q10, q11, [sp, #32*5]; \
+ stp q12, q13, [sp, #32*6]; \
+ stp q14, q15, [sp, #32*7]; \
+ stp q16, q17, [sp, #32*8]; \
+ stp q18, q19, [sp, #32*9]; \
+ stp q20, q21, [sp, #32*10]; \
+ stp q22, q23, [sp, #32*11]; \
+ stp q24, q25, [sp, #32*12]; \
+ stp q26, q27, [sp, #32*13]; \
+ stp q28, q29, [sp, #32*14]; \
+ stp q30, q31, [sp, #32*15];
+
+#define RESTORE_Q_REGISTERS \
+ ldp q2, q3, [sp, #32*1]; \
+ ldp q4, q5, [sp, #32*2]; \
+ ldp q6, q7, [sp, #32*3]; \
+ ldp q8, q9, [sp, #32*4]; \
+ ldp q10, q11, [sp, #32*5]; \
+ ldp q12, q13, [sp, #32*6]; \
+ ldp q14, q15, [sp, #32*7]; \
+ ldp q16, q17, [sp, #32*8]; \
+ ldp q18, q19, [sp, #32*9]; \
+ ldp q20, q21, [sp, #32*10]; \
+ ldp q22, q23, [sp, #32*11]; \
+ ldp q24, q25, [sp, #32*12]; \
+ ldp q26, q27, [sp, #32*13]; \
+ ldp q28, q29, [sp, #32*14]; \
+ ldp q30, q31, [sp, #32*15]; \
+ ldp q0, q1, [sp], #32*NSAVEDQREGPAIRS; \
+ cfi_adjust_cfa_offset (-32*NSAVEDQREGPAIRS);
+
+ .text
+
+ /* Compute the thread pointer offset for symbols in the static
+ TLS block. The offset is the same for all threads.
+ Prototype:
+ _dl_tlsdesc_return (tlsdesc *) ;
+ */
+ .hidden _dl_tlsdesc_return
+ .global _dl_tlsdesc_return
+ .type _dl_tlsdesc_return,%function
+ .align 2
+_dl_tlsdesc_return:
+ ldr x0,[x0,#8]
+ ret
+ .size _dl_tlsdesc_return, .-_dl_tlsdesc_return
+
+#ifdef SHARED
+ /* Handler for dynamic TLS symbols.
+ Prototype:
+ _dl_tlsdesc_dynamic (tlsdesc *) ;
+
+ The second word of the descriptor points to a
+ tlsdesc_dynamic_arg structure.
+
+ Returns the offset between the thread pointer and the
+ object referenced by the argument.
+
+ ptrdiff_t
+ __attribute__ ((__regparm__ (1)))
+ _dl_tlsdesc_dynamic (struct tlsdesc *tdp)
+ {
+ struct tlsdesc_dynamic_arg *td = tdp->arg;
+ dtv_t *dtv = *(dtv_t **)((char *)__thread_pointer + TCBHEAD_DTV);
+ if (__builtin_expect (td->gen_count <= dtv[0].counter
+ && (dtv[td->tlsinfo.ti_module].pointer.val
+ != TLS_DTV_UNALLOCATED),
+ 1))
+ return dtv[td->tlsinfo.ti_module].pointer.val
+ + td->tlsinfo.ti_offset
+ - __thread_pointer;
+
+ return ___tls_get_addr (&td->tlsinfo) - __thread_pointer;
+ }
+ */
+
+ .hidden _dl_tlsdesc_dynamic
+ .global _dl_tlsdesc_dynamic
+ .type _dl_tlsdesc_dynamic,%function
+ cfi_startproc
+ .align 2
+_dl_tlsdesc_dynamic:
+# define NSAVEXREGPAIRS 2
+ stp x29, x30, [sp,#-(32+16*NSAVEXREGPAIRS)]!
+ cfi_adjust_cfa_offset (32+16*NSAVEXREGPAIRS)
+ mov x29, sp
+
+ /* Save just enough registers to support fast path, if we fall
+ into slow path we will save additional registers. */
+
+ stp x1, x2, [sp, #32+16*0]
+ stp x3, x4, [sp, #32+16*1]
+
+ mrs x4, tpidr_el0
+ /* The ldar here happens after the load from [x0] at the call site
+ (that is generated by the compiler as part of the TLS access ABI),
+ so it reads the same value (this function is the final value of
+ td->entry) and thus it synchronizes with the release store to
+ td->entry in _dl_tlsdesc_resolve_rela_fixup ensuring that the load
+ from [x0,#PTR_SIZE] here happens after the initialization of td->arg. */
+ ldar PTR_REG (zr), [x0]
+ ldr PTR_REG (1), [x0,#TLSDESC_ARG]
+ ldr PTR_REG (0), [x4,#TCBHEAD_DTV]
+ ldr PTR_REG (3), [x1,#TLSDESC_GEN_COUNT]
+ ldr PTR_REG (2), [x0,#DTV_COUNTER]
+ cmp PTR_REG (3), PTR_REG (2)
+ b.hi 2f
+ ldr PTR_REG (2), [x1,#TLSDESC_MODID]
+ add PTR_REG (0), PTR_REG (0), PTR_REG (2), lsl #(PTR_LOG_SIZE + 1)
+ ldr PTR_REG (0), [x0] /* Load val member of DTV entry. */
+ cmp x0, #TLS_DTV_UNALLOCATED
+ b.eq 2f
+ ldr PTR_REG (1), [x1,#TLSDESC_MODOFF]
+ add PTR_REG (0), PTR_REG (0), PTR_REG (1)
+ sub PTR_REG (0), PTR_REG (0), PTR_REG (4)
+1:
+ ldp x1, x2, [sp, #32+16*0]
+ ldp x3, x4, [sp, #32+16*1]
+
+ ldp x29, x30, [sp], #(32+16*NSAVEXREGPAIRS)
+ cfi_adjust_cfa_offset (-32-16*NSAVEXREGPAIRS)
+# undef NSAVEXREGPAIRS
+ ret
+2:
+ /* This is the slow path. We need to call __tls_get_addr() which
+ means we need to save and restore all the register that the
+ callee will trash. */
+
+ /* Save the remaining registers that we must treat as caller save. */
+# define NSAVEXREGPAIRS 7
+ stp x5, x6, [sp, #-16*NSAVEXREGPAIRS]!
+ cfi_adjust_cfa_offset (16*NSAVEXREGPAIRS)
+ stp x7, x8, [sp, #16*1]
+ stp x9, x10, [sp, #16*2]
+ stp x11, x12, [sp, #16*3]
+ stp x13, x14, [sp, #16*4]
+ stp x15, x16, [sp, #16*5]
+ stp x17, x18, [sp, #16*6]
+
+ SAVE_Q_REGISTERS
+
+ mov x0, x1
+ bl __tls_get_addr
+
+ mrs x1, tpidr_el0
+ sub PTR_REG (0), PTR_REG (0), PTR_REG (1)
+
+ RESTORE_Q_REGISTERS
+
+ ldp x7, x8, [sp, #16*1]
+ ldp x9, x10, [sp, #16*2]
+ ldp x11, x12, [sp, #16*3]
+ ldp x13, x14, [sp, #16*4]
+ ldp x15, x16, [sp, #16*5]
+ ldp x17, x18, [sp, #16*6]
+ ldp x5, x6, [sp], #16*NSAVEXREGPAIRS
+ cfi_adjust_cfa_offset (-16*NSAVEXREGPAIRS)
+ b 1b
+ cfi_endproc
+ .size _dl_tlsdesc_dynamic, .-_dl_tlsdesc_dynamic
+# undef NSAVEXREGPAIRS
+
+#endif // SHARED
+#endif // __UCLIBC_HAS_TLS__
diff --git a/ldso/ldso/aarch64/elfinterp.c b/ldso/ldso/aarch64/elfinterp.c
new file mode 100644
index 000000000..879484e16
--- /dev/null
+++ b/ldso/ldso/aarch64/elfinterp.c
@@ -0,0 +1,306 @@
+/* AARCH64 ELF shared library loader suppport
+ *
+ * Copyright (C) 2001-2004 Erik Andersen
+ * Copyright (C) 2016-2017 Waldemar Brodkorb <wbx@uclibc-ng.org>
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. The name of the above contributors may not be
+ * used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* Program to load an ELF binary on a linux system, and run it.
+ References to symbols in sharable libraries can be resolved by either
+ an ELF sharable library or a linux style of shared library. */
+
+#include "ldso.h"
+
+#if defined(USE_TLS) && USE_TLS
+#include "dl-tls.h"
+#include "tlsdeschtab.h"
+#endif
+
+extern int _dl_linux_resolve(void);
+
+unsigned long _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry)
+{
+ ELF_RELOC *this_reloc;
+ char *strtab;
+ ElfW(Sym) *symtab;
+ int symtab_index;
+ char *rel_addr;
+ char *new_addr;
+ char **got_addr;
+ ElfW(Addr) instr_addr;
+ char *symname;
+
+ rel_addr = (char *)tpnt->dynamic_info[DT_JMPREL];
+ this_reloc = (ELF_RELOC *)(rel_addr + reloc_entry);
+ symtab_index = ELF_R_SYM(this_reloc->r_info);
+
+ symtab = (ElfW(Sym) *)tpnt->dynamic_info[DT_SYMTAB];
+ strtab = (char *)tpnt->dynamic_info[DT_STRTAB];
+ symname = strtab + symtab[symtab_index].st_name;
+
+ /* Address of jump instruction to fix up */
+ instr_addr = (this_reloc->r_offset + tpnt->loadaddr);
+ got_addr = (char **)instr_addr;
+
+ /* Get the address of the GOT entry */
+ new_addr = _dl_find_hash(symname, &_dl_loaded_modules->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT, NULL);
+ if (unlikely(!new_addr)) {
+ _dl_dprintf(2, "%s: can't resolve symbol '%s'\n", _dl_progname, symname);
+ _dl_exit(1);
+ }
+#if defined (__SUPPORT_LD_DEBUG__)
+ if (_dl_debug_bindings) {
+ _dl_dprintf(_dl_debug_file, "\nresolve function: %s", symname);
+ if (_dl_debug_detail) _dl_dprintf(_dl_debug_file,
+ "\tpatched %x ==> %x @ %x", *got_addr, new_addr, got_addr);
+ }
+ if (!_dl_debug_nofixups) {
+ *got_addr = new_addr;
+ }
+#else
+ *got_addr = new_addr;
+#endif
+ return (unsigned long)new_addr;
+}
+
+static int
+_dl_parse(struct elf_resolve *tpnt, struct r_scope_elem *scope,
+ unsigned long rel_addr, unsigned long rel_size,
+ int (*reloc_fnc) (struct elf_resolve *tpnt, struct r_scope_elem *scope,
+ ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab))
+{
+ unsigned int i;
+ char *strtab;
+ ElfW(Sym) *symtab;
+ ELF_RELOC *rpnt;
+ int symtab_index;
+
+ /* Parse the relocation information */
+ rpnt = (ELF_RELOC *)rel_addr;
+ rel_size = rel_size / sizeof(ELF_RELOC);
+
+ symtab = (ElfW(Sym) *)tpnt->dynamic_info[DT_SYMTAB];
+ strtab = (char *)tpnt->dynamic_info[DT_STRTAB];
+
+ for (i = 0; i < rel_size; i++, rpnt++) {
+ int res;
+
+ symtab_index = ELF_R_SYM(rpnt->r_info);
+
+ debug_sym(symtab, strtab, symtab_index);
+ debug_reloc(symtab, strtab, rpnt);
+
+ res = reloc_fnc(tpnt, scope, rpnt, symtab, strtab);
+
+ if (res==0)
+ continue;
+
+ _dl_dprintf(2, "\n%s: ", _dl_progname);
+
+ if (symtab_index)
+ _dl_dprintf(2, "symbol '%s': ",
+ strtab + symtab[symtab_index].st_name);
+
+ if (unlikely(res < 0)) {
+ int reloc_type = ELF_R_TYPE(rpnt->r_info);
+ _dl_dprintf(2, "can't handle reloc type %x\n", reloc_type);
+ _dl_exit(-res);
+ } else if (unlikely(res > 0)) {
+ _dl_dprintf(2, "can't resolve symbol\n");
+ return res;
+ }
+ }
+
+ return 0;
+}
+
+static int
+_dl_do_reloc (struct elf_resolve *tpnt, struct r_scope_elem *scope,
+ ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab)
+{
+ int reloc_type;
+ int symtab_index;
+ char *symname;
+#if defined USE_TLS && USE_TLS
+ struct elf_resolve *tls_tpnt = NULL;
+#endif
+ struct symbol_ref sym_ref;
+ ElfW(Addr) *reloc_addr;
+ ElfW(Addr) symbol_addr;
+#if defined (__SUPPORT_LD_DEBUG__)
+ ElfW(Addr) old_val;
+#endif
+
+ reloc_addr = (ElfW(Addr)*)(tpnt->loadaddr + (unsigned long)rpnt->r_offset);
+ reloc_type = ELF_R_TYPE(rpnt->r_info);
+ symtab_index = ELF_R_SYM(rpnt->r_info);
+ sym_ref.sym = &symtab[symtab_index];
+ sym_ref.tpnt = NULL;
+ symbol_addr = 0;
+ symname = strtab + sym_ref.sym->st_name;
+
+ if (symtab_index) {
+ symbol_addr = (ElfW(Addr))_dl_find_hash(symname, scope, tpnt,
+ elf_machine_type_class(reloc_type), &sym_ref);
+
+ /*
+ * We want to allow undefined references to weak symbols - this might
+ * have been intentional. We should not be linking local symbols
+ * here, so all bases should be covered.
+ */
+ if (unlikely (!symbol_addr &&
+ (ELF_ST_TYPE(symtab[symtab_index].st_info) != STT_TLS) &&
+ (ELF_ST_BIND(symtab[symtab_index].st_info) != STB_WEAK))) {
+ return 1;
+ }
+ if (_dl_trace_prelink) {
+ _dl_debug_lookup (symname, tpnt, &symtab[symtab_index],
+ &sym_ref, elf_machine_type_class(reloc_type));
+ }
+#if defined USE_TLS && USE_TLS
+ tls_tpnt = sym_ref.tpnt;
+#endif
+ } else {
+ /*
+ * Relocs against STN_UNDEF are usually treated as using a
+ * symbol value of zero, and using the module containing the
+ * reloc itself.
+ */
+ symbol_addr = sym_ref.sym->st_value;
+#if defined USE_TLS && USE_TLS
+ tls_tpnt = tpnt;
+#endif
+ }
+
+#if defined (__SUPPORT_LD_DEBUG__)
+ old_val = *reloc_addr;
+#endif
+
+ switch (reloc_type) {
+ case R_AARCH64_NONE:
+ break;
+ case R_AARCH64_ABS64: /* REL_SYMBOLIC */
+ case R_AARCH64_GLOB_DAT: /* REL_GOT */
+ case R_AARCH64_JUMP_SLOT: /* REL_PLT */
+ *reloc_addr = symbol_addr + rpnt->r_addend;
+ break;
+ case R_AARCH64_RELATIVE:
+ *reloc_addr += tpnt->loadaddr + rpnt->r_addend;
+ break;
+ case R_AARCH64_COPY:
+ _dl_memcpy((void *) reloc_addr,
+ (void *) symbol_addr, sym_ref.sym->st_size);
+ break;
+#if defined USE_TLS && USE_TLS
+ case R_AARCH64_TLS_TPREL:
+ CHECK_STATIC_TLS ((struct link_map *) tls_tpnt);
+ *reloc_addr = (symbol_addr + tls_tpnt->l_tls_offset);
+ break;
+ case R_AARCH64_TLSDESC:
+ {
+ struct tlsdesc volatile *td =
+ (struct tlsdesc volatile *)reloc_addr;
+#ifndef SHARED
+ CHECK_STATIC_TLS((struct link_map *) tls_tpnt);
+#else
+ if (!TRY_STATIC_TLS ((struct link_map *) tls_tpnt))
+ {
+ td->arg = _dl_make_tlsdesc_dynamic((struct link_map *) tls_tpnt, symbol_addr);
+ td->entry = _dl_tlsdesc_dynamic;
+ }
+ else
+#endif
+ {
+ td->arg = symbol_addr + tls_tpnt->l_tls_offset;
+ td->entry = _dl_tlsdesc_return;
+ }
+ }
+ break;
+#endif
+ default:
+ return -1; /*call _dl_exit(1) */
+ }
+
+#if defined (__SUPPORT_LD_DEBUG__)
+ if (_dl_debug_reloc && _dl_debug_detail) {
+ _dl_dprintf(_dl_debug_file, "\tpatched: %x ==> %x @ %x\n",
+ old_val, *reloc_addr, reloc_addr);
+ }
+#endif
+
+ return 0;
+}
+
+static int
+_dl_do_lazy_reloc (struct elf_resolve *tpnt, struct r_scope_elem *scope,
+ ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab)
+{
+ int reloc_type;
+ ElfW(Addr) *reloc_addr;
+#if defined (__SUPPORT_LD_DEBUG__)
+ ElfW(Addr) old_val;
+#endif
+
+ (void)scope;
+ (void)strtab;
+
+ reloc_addr = (ElfW(Addr)*)(tpnt->loadaddr + rpnt->r_offset);
+ reloc_type = ELF_R_TYPE(rpnt->r_info);
+
+#if defined (__SUPPORT_LD_DEBUG__)
+ old_val = *reloc_addr;
+#endif
+
+ switch (reloc_type) {
+ case R_AARCH64_NONE:
+ break;
+ case R_AARCH64_JUMP_SLOT:
+ *reloc_addr += tpnt->loadaddr;
+ break;
+ default:
+ return -1; /*call _dl_exit(1) */
+ }
+
+#if defined (__SUPPORT_LD_DEBUG__)
+ if (_dl_debug_reloc && _dl_debug_detail) {
+ _dl_dprintf(_dl_debug_file, "\tpatched_lazy: %x ==> %x @ %x\n",
+ old_val, *reloc_addr, reloc_addr);
+ }
+#endif
+
+ return 0;
+}
+
+void _dl_parse_lazy_relocation_information(struct dyn_elf *rpnt,
+ unsigned long rel_addr, unsigned long rel_size)
+{
+ (void)_dl_parse(rpnt->dyn, NULL, rel_addr, rel_size, _dl_do_lazy_reloc);
+}
+
+int _dl_parse_relocation_information(struct dyn_elf *rpnt,
+ struct r_scope_elem *scope, unsigned long rel_addr, unsigned long rel_size)
+{
+ return _dl_parse(rpnt->dyn, scope, rel_addr, rel_size, _dl_do_reloc);
+}
diff --git a/ldso/ldso/aarch64/resolve.S b/ldso/ldso/aarch64/resolve.S
new file mode 100644
index 000000000..3b907c46c
--- /dev/null
+++ b/ldso/ldso/aarch64/resolve.S
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2016 by Waldemar Brodkorb <wbx@uclibc-ng.org>
+ * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball.
+ * ported from GNU libc
+ */
+
+/* Copyright (C) 2005-2016 Free Software Foundation, Inc.
+
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2.1 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <features.h>
+
+#define PTR_REG(n) x##n
+#define PTR_LOG_SIZE 3
+#define PTR_SIZE (1<<PTR_LOG_SIZE)
+
+#define ip0 x16
+#define ip0l PTR_REG (16)
+#define ip1 x17
+#define lr x30
+
+/* RELA relocatons are 3 pointers */
+#define RELA_SIZE (PTR_SIZE * 3)
+
+ .text
+ .globl _dl_linux_resolve
+ .type _dl_linux_resolve, %function
+ .align 2
+
+_dl_linux_resolve:
+ /* AArch64 we get called with:
+ ip0 &PLTGOT[2]
+ ip1 temp(dl resolver entry point)
+ [sp, #8] lr
+ [sp, #0] &PLTGOT[n]
+ */
+
+ /* Save arguments. */
+ stp x8, x9, [sp, #-(80+8*16)]!
+ stp x6, x7, [sp, #16]
+ stp x4, x5, [sp, #32]
+ stp x2, x3, [sp, #48]
+ stp x0, x1, [sp, #64]
+ stp q0, q1, [sp, #(80+0*16)]
+ stp q2, q3, [sp, #(80+2*16)]
+ stp q4, q5, [sp, #(80+4*16)]
+ stp q6, q7, [sp, #(80+6*16)]
+
+ /* Get pointer to linker struct. */
+ ldr PTR_REG (0), [ip0, #-PTR_SIZE]
+
+ /* Prepare to call _dl_linux_resolver(). */
+ ldr x1, [sp, 80+8*16] /* Recover &PLTGOT[n] */
+
+ sub x1, x1, ip0
+ add x1, x1, x1, lsl #1
+ lsl x1, x1, #3
+ sub x1, x1, #(RELA_SIZE<<3)
+ lsr x1, x1, #3
+
+ /* Call resolver routine. */
+ bl _dl_linux_resolver
+
+ /* Save the return. */
+ mov ip0, x0
+
+ /* Get arguments and return address back. */
+ ldp q0, q1, [sp, #(80+0*16)]
+ ldp q2, q3, [sp, #(80+2*16)]
+ ldp q4, q5, [sp, #(80+4*16)]
+ ldp q6, q7, [sp, #(80+6*16)]
+ ldp x0, x1, [sp, #64]
+ ldp x2, x3, [sp, #48]
+ ldp x4, x5, [sp, #32]
+ ldp x6, x7, [sp, #16]
+ ldp x8, x9, [sp], #(80+8*16)
+
+ ldp ip1, lr, [sp], #16
+
+ /* Jump to the newly found address. */
+ br ip0
+
+.size _dl_linux_resolve, .-_dl_linux_resolve