summaryrefslogtreecommitdiff
path: root/debian/patches/upstreamme0004-kern-allocate-kernel-stacks-using-the-slab-allocator.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches/upstreamme0004-kern-allocate-kernel-stacks-using-the-slab-allocator.patch')
-rw-r--r--debian/patches/upstreamme0004-kern-allocate-kernel-stacks-using-the-slab-allocator.patch92
1 files changed, 92 insertions, 0 deletions
diff --git a/debian/patches/upstreamme0004-kern-allocate-kernel-stacks-using-the-slab-allocator.patch b/debian/patches/upstreamme0004-kern-allocate-kernel-stacks-using-the-slab-allocator.patch
new file mode 100644
index 0000000..98a3613
--- /dev/null
+++ b/debian/patches/upstreamme0004-kern-allocate-kernel-stacks-using-the-slab-allocator.patch
@@ -0,0 +1,92 @@
+From b49fd391d6aef3df51aeaa4dc12b68c0b4b1bb1f Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Thu, 2 Jul 2015 16:20:44 +0200
+Subject: [PATCH gnumach 4/5] kern: allocate kernel stacks using the slab
+ allocator
+
+* kern/slab.c (kmem_cache_init): Relax alignment restriction.
+* kern/thread.c (stack_cache): New variable.
+(stack_alloc): Use the slab allocator.
+(stack_collect): Adjust accordingly.
+(thread_init): Initialize `stack_cache'.
+---
+ kern/slab.c | 1 -
+ kern/thread.c | 29 +++++++++++++++++++----------
+ 2 files changed, 19 insertions(+), 11 deletions(-)
+
+diff --git a/kern/slab.c b/kern/slab.c
+index 60378b5..32469df 100644
+--- a/kern/slab.c
++++ b/kern/slab.c
+@@ -800,7 +800,6 @@ void kmem_cache_init(struct kmem_cache *cache, const char *name,
+
+ assert(obj_size > 0);
+ assert(ISP2(align));
+- assert(align < PAGE_SIZE);
+
+ buf_size = P2ROUND(obj_size, align);
+
+diff --git a/kern/thread.c b/kern/thread.c
+index 1f47553..36a5a92 100644
+--- a/kern/thread.c
++++ b/kern/thread.c
+@@ -165,6 +165,11 @@ boolean_t stack_alloc_try(
+ }
+
+ /*
++ * We allocate kernel stacks using the slab allocator.
++ */
++static struct kmem_cache stack_cache;
++
++/*
+ * stack_alloc:
+ *
+ * Allocate a kernel stack for a thread.
+@@ -195,15 +200,10 @@ kern_return_t stack_alloc(
+ (void) splx(s);
+
+ if (stack == 0) {
+- kern_return_t kr;
+- /*
+- * Kernel stacks should be naturally aligned,
+- * so that it is easy to find the starting/ending
+- * addresses of a stack given an address in the middle.
+- */
+- kr = kmem_alloc_aligned(kmem_map, &stack, KERNEL_STACK_SIZE);
+- if (kr != KERN_SUCCESS)
+- return kr;
++ stack = kmem_cache_alloc(&stack_cache);
++ assert ((stack & (KERNEL_STACK_SIZE-1)) == 0);
++ if (stack == 0)
++ return KERN_RESOURCE_SHORTAGE;
+
+ #if MACH_DEBUG
+ stack_init(stack);
+@@ -265,7 +265,7 @@ void stack_collect(void)
+ #if MACH_DEBUG
+ stack_finalize(stack);
+ #endif /* MACH_DEBUG */
+- kmem_free(kmem_map, stack, KERNEL_STACK_SIZE);
++ kmem_cache_free(&stack_cache, stack);
+
+ s = splsched();
+ stack_lock();
+@@ -301,6 +301,15 @@ void thread_init(void)
+ NULL, NULL, NULL, 0);
+
+ /*
++ * Kernel stacks should be naturally aligned, so that it
++ * is easy to find the starting/ending addresses of a
++ * stack given an address in the middle.
++ */
++ kmem_cache_init(&stack_cache, "stack",
++ KERNEL_STACK_SIZE, KERNEL_STACK_SIZE,
++ NULL, NULL, NULL, 0);
++
++ /*
+ * Fill in a template thread for fast initialization.
+ * [Fields that must be (or are typically) reset at
+ * time of creation are so noted.]
+--
+2.1.4
+