summaryrefslogtreecommitdiff
path: root/debian/patches/upstreamme0005-kern-remove-the-list-of-free-stacks.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches/upstreamme0005-kern-remove-the-list-of-free-stacks.patch')
-rw-r--r--debian/patches/upstreamme0005-kern-remove-the-list-of-free-stacks.patch270
1 files changed, 270 insertions, 0 deletions
diff --git a/debian/patches/upstreamme0005-kern-remove-the-list-of-free-stacks.patch b/debian/patches/upstreamme0005-kern-remove-the-list-of-free-stacks.patch
new file mode 100644
index 0000000..b1727c4
--- /dev/null
+++ b/debian/patches/upstreamme0005-kern-remove-the-list-of-free-stacks.patch
@@ -0,0 +1,270 @@
+From 13a4d09db07487d56ddc0667779cb8e0fe85a912 Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Fri, 3 Jul 2015 12:41:48 +0200
+Subject: [PATCH gnumach 5/5] kern: remove the list of free stacks
+
+* kern/counters.c: Remove relevant counters.
+* kern/counters.h: Likewise.
+* kern/thread.c (stack_free_{list,count,limit}): Drop variables.
+(stack_next): Remove macro.
+(stack_alloc_try): Allocate stack using the slab allocator.
+(stack_alloc): Merely call `stack_alloc_try'.
+(stack_free): Adopt accordingly.
+---
+ kern/counters.c | 3 --
+ kern/counters.h | 3 --
+ kern/thread.c | 145 ++++++--------------------------------------------------
+ 3 files changed, 14 insertions(+), 137 deletions(-)
+
+diff --git a/kern/counters.c b/kern/counters.c
+index 0a0665b..74fd42d 100644
+--- a/kern/counters.c
++++ b/kern/counters.c
+@@ -46,9 +46,6 @@ mach_counter_t c_stacks_current = 0;
+ mach_counter_t c_stacks_max = 0;
+ mach_counter_t c_stacks_min = 0;
+ mach_counter_t c_stacks_total = 0;
+-mach_counter_t c_stack_alloc_hits = 0;
+-mach_counter_t c_stack_alloc_misses = 0;
+-mach_counter_t c_stack_alloc_max = 0;
+ mach_counter_t c_clock_ticks = 0;
+ mach_counter_t c_ipc_mqueue_send_block = 0;
+ mach_counter_t c_ipc_mqueue_receive_block_user = 0;
+diff --git a/kern/counters.h b/kern/counters.h
+index aa1e739..bfa9b44 100644
+--- a/kern/counters.h
++++ b/kern/counters.h
+@@ -69,9 +69,6 @@ extern mach_counter_t c_stacks_current;
+ extern mach_counter_t c_stacks_max;
+ extern mach_counter_t c_stacks_min;
+ extern mach_counter_t c_stacks_total;
+-extern mach_counter_t c_stack_alloc_hits;
+-extern mach_counter_t c_stack_alloc_misses;
+-extern mach_counter_t c_stack_alloc_max;
+ extern mach_counter_t c_clock_ticks;
+ extern mach_counter_t c_ipc_mqueue_send_block;
+ extern mach_counter_t c_ipc_mqueue_receive_block_user;
+diff --git a/kern/thread.c b/kern/thread.c
+index 36a5a92..f6e3021 100644
+--- a/kern/thread.c
++++ b/kern/thread.c
+@@ -101,8 +101,6 @@ vm_size_t stack_max_usage = 0;
+ * stack_free
+ * stack_handoff
+ * stack_collect
+- * and if MACH_DEBUG:
+- * stack_statistics
+ */
+ #else /* MACHINE_STACK */
+ /*
+@@ -120,16 +118,10 @@ decl_simple_lock_data(, stack_lock_data)/* splsched only */
+ #define stack_lock() simple_lock(&stack_lock_data)
+ #define stack_unlock() simple_unlock(&stack_lock_data)
+
+-vm_offset_t stack_free_list; /* splsched only */
+-unsigned int stack_free_count = 0; /* splsched only */
+-unsigned int stack_free_limit = 1; /* patchable */
+-
+ /*
+- * The next field is at the base of the stack,
+- * so the low end is left unsullied.
++ * We allocate kernel stacks using the slab allocator.
+ */
+-
+-#define stack_next(stack) (*((vm_offset_t *)((stack) + KERNEL_STACK_SIZE) - 1))
++static struct kmem_cache stack_cache;
+
+ /*
+ * stack_alloc_try:
+@@ -144,73 +136,37 @@ boolean_t stack_alloc_try(
+ {
+ vm_offset_t stack;
+
+- stack_lock();
+- stack = stack_free_list;
+- if (stack != 0) {
+- stack_free_list = stack_next(stack);
+- stack_free_count--;
+- } else {
++ stack = kmem_cache_alloc(&stack_cache);
++ assert ((stack & (KERNEL_STACK_SIZE-1)) == 0);
++
++#if MACH_DEBUG
++ if (stack)
++ stack_init(stack);
++#endif /* MACH_DEBUG */
++ if (! stack)
+ stack = thread->stack_privilege;
+- }
+- stack_unlock();
+
+ if (stack != 0) {
+ stack_attach(thread, stack, resume);
+- counter(c_stack_alloc_hits++);
+ return TRUE;
+ } else {
+- counter(c_stack_alloc_misses++);
+ return FALSE;
+ }
+ }
+
+ /*
+- * We allocate kernel stacks using the slab allocator.
+- */
+-static struct kmem_cache stack_cache;
+-
+-/*
+ * stack_alloc:
+ *
+ * Allocate a kernel stack for a thread.
+- * May block.
+ */
+
+ kern_return_t stack_alloc(
+ thread_t thread,
+ void (*resume)(thread_t))
+ {
+- vm_offset_t stack;
+- spl_t s;
+-
+- /*
+- * We first try the free list. It is probably empty,
+- * or stack_alloc_try would have succeeded, but possibly
+- * a stack was freed before the swapin thread got to us.
+- */
+-
+- s = splsched();
+- stack_lock();
+- stack = stack_free_list;
+- if (stack != 0) {
+- stack_free_list = stack_next(stack);
+- stack_free_count--;
+- }
+- stack_unlock();
+- (void) splx(s);
+-
+- if (stack == 0) {
+- stack = kmem_cache_alloc(&stack_cache);
+- assert ((stack & (KERNEL_STACK_SIZE-1)) == 0);
+- if (stack == 0)
+- return KERN_RESOURCE_SHORTAGE;
+-
+-#if MACH_DEBUG
+- stack_init(stack);
+-#endif /* MACH_DEBUG */
+- }
++ if (! stack_alloc_try (thread, resume))
++ return KERN_RESOURCE_SHORTAGE;
+
+- stack_attach(thread, stack, resume);
+ return KERN_SUCCESS;
+ }
+
+@@ -228,17 +184,8 @@ void stack_free(
+
+ stack = stack_detach(thread);
+
+- if (stack != thread->stack_privilege) {
+- stack_lock();
+- stack_next(stack) = stack_free_list;
+- stack_free_list = stack;
+- stack_free_count += 1;
+-#if MACH_COUNTERS
+- if (stack_free_count > c_stack_alloc_max)
+- c_stack_alloc_max = stack_free_count;
+-#endif /* MACH_COUNTERS */
+- stack_unlock();
+- }
++ if (stack != thread->stack_privilege)
++ kmem_cache_free (&stack_cache, stack);
+ }
+
+ /*
+@@ -250,28 +197,6 @@ void stack_free(
+
+ void stack_collect(void)
+ {
+- vm_offset_t stack;
+- spl_t s;
+-
+- s = splsched();
+- stack_lock();
+- while (stack_free_count > stack_free_limit) {
+- stack = stack_free_list;
+- stack_free_list = stack_next(stack);
+- stack_free_count--;
+- stack_unlock();
+- (void) splx(s);
+-
+-#if MACH_DEBUG
+- stack_finalize(stack);
+-#endif /* MACH_DEBUG */
+- kmem_cache_free(&stack_cache, stack);
+-
+- s = splsched();
+- stack_lock();
+- }
+- stack_unlock();
+- (void) splx(s);
+ }
+ #endif /* MACHINE_STACK */
+
+@@ -2382,46 +2307,6 @@ void stack_finalize(
+ }
+ }
+
+-#ifndef MACHINE_STACK
+-/*
+- * stack_statistics:
+- *
+- * Return statistics on cached kernel stacks.
+- * *maxusagep must be initialized by the caller.
+- */
+-
+-void stack_statistics(
+- natural_t *totalp,
+- vm_size_t *maxusagep)
+-{
+- spl_t s;
+-
+- s = splsched();
+- stack_lock();
+- if (stack_check_usage) {
+- vm_offset_t stack;
+-
+- /*
+- * This is pretty expensive to do at splsched,
+- * but it only happens when someone makes
+- * a debugging call, so it should be OK.
+- */
+-
+- for (stack = stack_free_list; stack != 0;
+- stack = stack_next(stack)) {
+- vm_size_t usage = stack_usage(stack);
+-
+- if (usage > *maxusagep)
+- *maxusagep = usage;
+- }
+- }
+-
+- *totalp = stack_free_count;
+- stack_unlock();
+- (void) splx(s);
+-}
+-#endif /* MACHINE_STACK */
+-
+ kern_return_t host_stack_usage(
+ host_t host,
+ vm_size_t *reservedp,
+@@ -2441,8 +2326,6 @@ kern_return_t host_stack_usage(
+ maxusage = stack_max_usage;
+ simple_unlock(&stack_usage_lock);
+
+- stack_statistics(&total, &maxusage);
+-
+ *reservedp = 0;
+ *totalp = total;
+ *spacep = *residentp = total * round_page(KERNEL_STACK_SIZE);
+--
+2.1.4
+