diff options
author | Eyal Itkin <eyalit@checkpoint.com> | 2019-12-27 18:45:20 +0200 |
---|---|---|
committer | Waldemar Brodkorb <wbx@openadk.org> | 2020-02-16 12:32:21 +0100 |
commit | 886878b22424d6f95bcdeee55ada72049d21547c (patch) | |
tree | c1ca8f9ac64bb6750e8b949635ec87963b454848 /libc/stdlib | |
parent | 90f24fc94897d93deb80d933d18a4f31dc6bf05a (diff) |
Add Safe-Linking to fastbins
Safe-Linking is a security mechanism that protects single-linked
lists (such as the fastbins) from being tampered by attackers. The
mechanism makes use of randomness from ASLR (mmap_base), and when
combined with chunk alignment integrity checks, it protects the
pointers from being hijacked by an attacker.
While Safe-Unlinking protects double-linked lists (such as the small
bins), there wasn't any similar protection for attacks against
single-linked lists. This solution protects against 3 common attacks:
* Partial pointer override: modifies the lower bytes (Little Endian)
* Full pointer override: hijacks the pointer to an attacker's location
* Unaligned chunks: pointing the list to an unaligned address
The design assumes an attacker doesn't know where the heap is located,
and uses the ASLR randomness to "sign" the single-linked pointers. We
mark the pointer as P and the location in which it is stored as L, and
the calculation will be:
* PROTECT(P) := (L >> PAGE_SHIFT) XOR (P)
* *L = PROTECT(P)
This way, the random bits from the address L (which start at the bits
in the PAGE_SHIFT position), will be merged with the LSB of the stored
protected pointer. This protection layer prevents an attacker from
modifying the pointer into a controlled value.
An additional check that the chunks are MALLOC_ALIGNed adds an
important layer:
* Attackers can't point to illegal (unaligned) memory addresses
* Attackers must guess correctly the alignment bits
On standard 32 bit Linux machines, an attacker will directly fail 7
out of 8 times, and on 64 bit machines it will fail 15 out of 16
times.
The proposed solution adds 3-4 asm instructions per malloc()/free()
and therefore has only minor performance implications if it has
any. A similar protection was added to Chromium's version of TCMalloc
in 2013, and according to their documentation the performance overhead
was less than 2%.
Signed-off-by: Eyal Itkin <eyalit@checkpoint.com>
Diffstat (limited to 'libc/stdlib')
-rw-r--r-- | libc/stdlib/malloc-standard/free.c | 5 | ||||
-rw-r--r-- | libc/stdlib/malloc-standard/mallinfo.c | 3 | ||||
-rw-r--r-- | libc/stdlib/malloc-standard/malloc.c | 6 | ||||
-rw-r--r-- | libc/stdlib/malloc-standard/malloc.h | 12 |
4 files changed, 21 insertions, 5 deletions
diff --git a/libc/stdlib/malloc-standard/free.c b/libc/stdlib/malloc-standard/free.c index a2d765d41..f3602cf48 100644 --- a/libc/stdlib/malloc-standard/free.c +++ b/libc/stdlib/malloc-standard/free.c @@ -214,8 +214,9 @@ void attribute_hidden __malloc_consolidate(mstate av) *fb = 0; do { + CHECK_PTR(p); check_inuse_chunk(p); - nextp = p->fd; + nextp = REVEAL_PTR(&p->fd, p->fd); /* Slightly streamlined version of consolidation code in free() */ size = p->size & ~PREV_INUSE; @@ -308,7 +309,7 @@ void free(void* mem) set_fastchunks(av); fb = &(av->fastbins[fastbin_index(size)]); - p->fd = *fb; + p->fd = PROTECT_PTR(&p->fd, *fb); *fb = p; } diff --git a/libc/stdlib/malloc-standard/mallinfo.c b/libc/stdlib/malloc-standard/mallinfo.c index dbe4d49b8..992322341 100644 --- a/libc/stdlib/malloc-standard/mallinfo.c +++ b/libc/stdlib/malloc-standard/mallinfo.c @@ -49,7 +49,8 @@ struct mallinfo mallinfo(void) fastavail = 0; for (i = 0; i < NFASTBINS; ++i) { - for (p = av->fastbins[i]; p != 0; p = p->fd) { + for (p = av->fastbins[i]; p != 0; p = REVEAL_PTR(&p->fd, p->fd)) { + CHECK_PTR(p); ++nfastblocks; fastavail += chunksize(p); } diff --git a/libc/stdlib/malloc-standard/malloc.c b/libc/stdlib/malloc-standard/malloc.c index 1a6d4dc1c..1f898eb29 100644 --- a/libc/stdlib/malloc-standard/malloc.c +++ b/libc/stdlib/malloc-standard/malloc.c @@ -260,12 +260,13 @@ void __do_check_malloc_state(void) assert(p == 0); while (p != 0) { + CHECK_PTR(p); /* each chunk claims to be inuse */ __do_check_inuse_chunk(p); total += chunksize(p); /* chunk belongs in this bin */ assert(fastbin_index(chunksize(p)) == i); - p = p->fd; + p = REVEAL_PTR(&p->fd, p->fd); } } @@ -855,7 +856,8 @@ void* malloc(size_t bytes) if ((unsigned long)(nb) <= (unsigned long)(av->max_fast)) { fb = &(av->fastbins[(fastbin_index(nb))]); if ( (victim = *fb) != 0) { - *fb = victim->fd; + CHECK_PTR(victim); + *fb = REVEAL_PTR(&victim->fd, victim->fd); check_remalloced_chunk(victim, nb); retval = chunk2mem(victim); goto DONE; diff --git a/libc/stdlib/malloc-standard/malloc.h b/libc/stdlib/malloc-standard/malloc.h index 44120d388..30a696e5a 100644 --- a/libc/stdlib/malloc-standard/malloc.h +++ b/libc/stdlib/malloc-standard/malloc.h @@ -839,6 +839,18 @@ typedef struct malloc_chunk* mfastbinptr; #define get_max_fast(M) \ ((M)->max_fast & ~(FASTCHUNKS_BIT | ANYCHUNKS_BIT)) +/* + Safe-Linking: + Use randomness from ASLR (mmap_base) to protect single-linked lists + of fastbins. Together with allocation alignment checks, this mechanism + reduces the risk of pointer hijacking, as was done with Safe-Unlinking + in the double-linked lists of smallbins. +*/ +#define PROTECT_PTR(pos, ptr) ((mchunkptr)((((size_t)pos) >> PAGE_SHIFT) ^ ((size_t)ptr))) +#define REVEAL_PTR(pos, ptr) PROTECT_PTR(pos, ptr) +#define CHECK_PTR(P) \ + if (!aligned_OK(P)) \ + abort(); /* morecore_properties is a status word holding dynamically discovered |