Hello,
Attached is a patch addressing CVE-2022-29503. After extensive testing, I
wasn't able to trigger the bug as described in the reference articles. In
my testing, the only way to trigger this bug is to modify
PTHREAD_THREADS_MAX to allow enough threads to exhaust the address space.
It is however possible that some custom configurations have small enough
memory spaces which allow this to become an issue.
Since this is a problem of mmap'ing overtop of existing mapped pages, I
have elected to take advantage of the new MAP_FIXED_NOREPLACE flag and its
associated guidance for backporting. I define it to 0 at the top of
manager.c if it isn't defined to support older kernels.
From 2b11aa00bd3b637c5c96db39538952dd12514085 Mon Sep 17 00:00:00 2001
From: linted <linted(a)users.noreply.github.com>
Date: Sat, 21 Jan 2023 15:22:48 -0500
Subject: [PATCH 1/2] Fix for CVE-2022-29503. Changed linux thread's stack
allocation mmap to use new MAP_FIXED_NOREPLACE flag on kernels >4.17. For
older kernels, a check is added to see if requested address matches the
address received. If the addresses don't match, an error is returned and
thread creation is aborted.
Signed-off-by: linted <linted(a)users.noreply.github.com>
---
libpthread/linuxthreads/manager.c | 35 ++++++++++++++++++++++++++-----
1 file changed, 30 insertions(+), 5 deletions(-)
diff --git a/libpthread/linuxthreads/manager.c
b/libpthread/linuxthreads/manager.c
index 2a1ee62af..122997b10 100644
--- a/libpthread/linuxthreads/manager.c
+++ b/libpthread/linuxthreads/manager.c
@@ -47,6 +47,15 @@
# define USE_SELECT
#endif
+/* MAP_FIXED_NOREPLACE is not supported in kernel <= 4.17
+ * If it's not already defined, define it to 0.
+ * We check the results of mmap to ensure the correct
+ * results, and error out otherwise.
+ */
+#ifndef MAP_FIXED_NOREPLACE
+#define MAP_FIXED_NOREPLACE 0
+#endif
+
/* Array of active threads. Entry 0 is reserved for the initial thread. */
struct pthread_handle_struct __pthread_handles[PTHREAD_THREADS_MAX] =
{ { __LOCK_INITIALIZER, &__pthread_initial_thread, 0},
@@ -371,12 +380,19 @@ static int pthread_allocate_stack(const
pthread_attr_t *attr,
/* Allocate space for stack and thread descriptor at default address
*/
new_thread = default_new_thread;
new_thread_bottom = (char *) (new_thread + 1) - stacksize;
- if (mmap((caddr_t)((char *)(new_thread + 1) - INITIAL_STACK_SIZE),
+ void * new_stack_addr = NULL;
+ new_stack_addr = mmap((caddr_t)((char *)(new_thread + 1) -
INITIAL_STACK_SIZE),
INITIAL_STACK_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_GROWSDOWN,
- -1, 0) == MAP_FAILED)
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE |
MAP_GROWSDOWN,
+ -1, 0);
+ if (new_stack_addr == MAP_FAILED){
/* Bad luck, this segment is already mapped. */
return -1;
+ } else if ( new_stack_addr != (caddr_t)((char *)(new_thread + 1) -
INITIAL_STACK_SIZE)) {
+ /* Worse luck, we almost overwrote an existing page */
+ munmap(new_stack_addr, INITIAL_STACK_SIZE);
+ return -2;
+ }
/* We manage to get a stack. Now see whether we need a guard
and allocate it if necessary. Notice that the default
attributes (stack_size = STACK_SIZE - pagesize) do not need
@@ -496,9 +512,10 @@ static int pthread_handle_create(pthread_t *thread,
const pthread_attr_t *attr,
return EAGAIN;
if (__pthread_handles[sseg].h_descr != NULL)
continue;
- if (pthread_allocate_stack(attr, thread_segment(sseg), pagesize,
+ int res = pthread_allocate_stack(attr, thread_segment(sseg),
pagesize,
&new_thread, &new_thread_bottom,
- &guardaddr, &guardsize) == 0)
+ &guardaddr, &guardsize);
+ if ( res == 0)
break;
#ifndef __ARCH_USE_MMU__
else
@@ -507,6 +524,14 @@ static int pthread_handle_create(pthread_t *thread,
const pthread_attr_t *attr,
* use the next one. However, when there is no MMU, malloc () is
used.
* It's waste of CPU cycles to continue to try if it fails. */
return EAGAIN;
+#else
+ else if (res == -2)
+ /* When there is an MMU, if pthread_allocate_stack failed with -2,
+ * it indicates that we are attempting to mmap in address space
which
+ * is already allocated. Any additional attempts will result in
failure
+ * since we have exhausted our stack area.
+ */
+ return EAGAIN;
#endif
}
__pthread_handles_num++;
--
2.34.1