summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorlinted <linted@users.noreply.github.com>2023-01-21 15:22:48 -0500
committerWaldemar Brodkorb <wbx@openadk.org>2023-01-22 09:31:12 +0100
commit60f1d7cff4c17d3be239bb86237010404d75f7b7 (patch)
treed6d6e6e9d9f9579360e22edd23e27b2f07ac2bf6
parent40f4a56f4311c0e39e145160a38c4eff7942b697 (diff)
Fix for CVE-2022-29503.
Changed linux thread's stack allocation mmap to use new MAP_FIXED_NOREPLACE flag on kernels >4.17. For older kernels, a check is added to see if requested address matches the address received. If the addresses don't match, an error is returned and thread creation is aborted. Signed-off-by: linted <linted@users.noreply.github.com>
-rw-r--r--libpthread/linuxthreads/manager.c35
1 files changed, 30 insertions, 5 deletions
diff --git a/libpthread/linuxthreads/manager.c b/libpthread/linuxthreads/manager.c
index 2a1ee62af..122997b10 100644
--- a/libpthread/linuxthreads/manager.c
+++ b/libpthread/linuxthreads/manager.c
@@ -47,6 +47,15 @@
# define USE_SELECT
#endif
+/* MAP_FIXED_NOREPLACE is not supported in kernel <= 4.17
+ * If it's not already defined, define it to 0.
+ * We check the results of mmap to ensure the correct
+ * results, and error out otherwise.
+ */
+#ifndef MAP_FIXED_NOREPLACE
+#define MAP_FIXED_NOREPLACE 0
+#endif
+
/* Array of active threads. Entry 0 is reserved for the initial thread. */
struct pthread_handle_struct __pthread_handles[PTHREAD_THREADS_MAX] =
{ { __LOCK_INITIALIZER, &__pthread_initial_thread, 0},
@@ -371,12 +380,19 @@ static int pthread_allocate_stack(const pthread_attr_t *attr,
/* Allocate space for stack and thread descriptor at default address */
new_thread = default_new_thread;
new_thread_bottom = (char *) (new_thread + 1) - stacksize;
- if (mmap((caddr_t)((char *)(new_thread + 1) - INITIAL_STACK_SIZE),
+ void * new_stack_addr = NULL;
+ new_stack_addr = mmap((caddr_t)((char *)(new_thread + 1) - INITIAL_STACK_SIZE),
INITIAL_STACK_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_GROWSDOWN,
- -1, 0) == MAP_FAILED)
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE | MAP_GROWSDOWN,
+ -1, 0);
+ if (new_stack_addr == MAP_FAILED){
/* Bad luck, this segment is already mapped. */
return -1;
+ } else if ( new_stack_addr != (caddr_t)((char *)(new_thread + 1) - INITIAL_STACK_SIZE)) {
+ /* Worse luck, we almost overwrote an existing page */
+ munmap(new_stack_addr, INITIAL_STACK_SIZE);
+ return -2;
+ }
/* We manage to get a stack. Now see whether we need a guard
and allocate it if necessary. Notice that the default
attributes (stack_size = STACK_SIZE - pagesize) do not need
@@ -496,9 +512,10 @@ static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
return EAGAIN;
if (__pthread_handles[sseg].h_descr != NULL)
continue;
- if (pthread_allocate_stack(attr, thread_segment(sseg), pagesize,
+ int res = pthread_allocate_stack(attr, thread_segment(sseg), pagesize,
&new_thread, &new_thread_bottom,
- &guardaddr, &guardsize) == 0)
+ &guardaddr, &guardsize);
+ if ( res == 0)
break;
#ifndef __ARCH_USE_MMU__
else
@@ -507,6 +524,14 @@ static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
* use the next one. However, when there is no MMU, malloc () is used.
* It's waste of CPU cycles to continue to try if it fails. */
return EAGAIN;
+#else
+ else if (res == -2)
+ /* When there is an MMU, if pthread_allocate_stack failed with -2,
+ * it indicates that we are attempting to mmap in address space which
+ * is already allocated. Any additional attempts will result in failure
+ * since we have exhausted our stack area.
+ */
+ return EAGAIN;
#endif
}
__pthread_handles_num++;