diff options
author | Justus Winter <4winter@informatik.uni-hamburg.de> | 2015-07-03 12:41:48 +0200 |
---|---|---|
committer | Justus Winter <4winter@informatik.uni-hamburg.de> | 2015-08-15 12:43:26 +0200 |
commit | 59d3bf7af26bcb9b3cae0b2745b587259d659ec9 (patch) | |
tree | 092752f6d47d9e24d11b9e242ad72b002d122098 /kern | |
parent | d08acfbf13804d0a70fc7ed5ad26a7828b15101b (diff) |
kern: remove the list of free stacks
* kern/counters.c: Remove relevant counters.
* kern/counters.h: Likewise.
* kern/thread.c (stack_free_{list,count,limit}): Drop variables.
(stack_next): Remove macro.
(stack_alloc_try): Allocate stack using the slab allocator.
(stack_alloc): Merely call `stack_alloc_try'.
(stack_free): Adopt accordingly.
Diffstat (limited to 'kern')
-rw-r--r-- | kern/counters.c | 3 | ||||
-rw-r--r-- | kern/counters.h | 3 | ||||
-rw-r--r-- | kern/thread.c | 147 |
3 files changed, 16 insertions, 137 deletions
diff --git a/kern/counters.c b/kern/counters.c index 0a0665b..74fd42d 100644 --- a/kern/counters.c +++ b/kern/counters.c @@ -46,9 +46,6 @@ mach_counter_t c_stacks_current = 0; mach_counter_t c_stacks_max = 0; mach_counter_t c_stacks_min = 0; mach_counter_t c_stacks_total = 0; -mach_counter_t c_stack_alloc_hits = 0; -mach_counter_t c_stack_alloc_misses = 0; -mach_counter_t c_stack_alloc_max = 0; mach_counter_t c_clock_ticks = 0; mach_counter_t c_ipc_mqueue_send_block = 0; mach_counter_t c_ipc_mqueue_receive_block_user = 0; diff --git a/kern/counters.h b/kern/counters.h index aa1e739..bfa9b44 100644 --- a/kern/counters.h +++ b/kern/counters.h @@ -69,9 +69,6 @@ extern mach_counter_t c_stacks_current; extern mach_counter_t c_stacks_max; extern mach_counter_t c_stacks_min; extern mach_counter_t c_stacks_total; -extern mach_counter_t c_stack_alloc_hits; -extern mach_counter_t c_stack_alloc_misses; -extern mach_counter_t c_stack_alloc_max; extern mach_counter_t c_clock_ticks; extern mach_counter_t c_ipc_mqueue_send_block; extern mach_counter_t c_ipc_mqueue_receive_block_user; diff --git a/kern/thread.c b/kern/thread.c index 886d20d..88e425e 100644 --- a/kern/thread.c +++ b/kern/thread.c @@ -101,8 +101,6 @@ vm_size_t stack_max_usage = 0; * stack_free * stack_handoff * stack_collect - * and if MACH_DEBUG: - * stack_statistics */ #else /* MACHINE_STACK */ /* @@ -120,16 +118,10 @@ decl_simple_lock_data(, stack_lock_data)/* splsched only */ #define stack_lock() simple_lock(&stack_lock_data) #define stack_unlock() simple_unlock(&stack_lock_data) -vm_offset_t stack_free_list; /* splsched only */ -unsigned int stack_free_count = 0; /* splsched only */ -unsigned int stack_free_limit = 1; /* patchable */ - /* - * The next field is at the base of the stack, - * so the low end is left unsullied. + * We allocate kernel stacks using the slab allocator. */ - -#define stack_next(stack) (*((vm_offset_t *)((stack) + KERNEL_STACK_SIZE) - 1)) +static struct kmem_cache stack_cache; /* * stack_alloc_try: @@ -144,73 +136,37 @@ boolean_t stack_alloc_try( { vm_offset_t stack; - stack_lock(); - stack = stack_free_list; - if (stack != 0) { - stack_free_list = stack_next(stack); - stack_free_count--; - } else { + stack = kmem_cache_alloc(&stack_cache); + assert ((stack & (KERNEL_STACK_SIZE-1)) == 0); + +#if MACH_DEBUG + if (stack) + stack_init(stack); +#endif /* MACH_DEBUG */ + if (! stack) stack = thread->stack_privilege; - } - stack_unlock(); if (stack != 0) { stack_attach(thread, stack, resume); - counter(c_stack_alloc_hits++); return TRUE; } else { - counter(c_stack_alloc_misses++); return FALSE; } } /* - * We allocate kernel stacks using the slab allocator. - */ -static struct kmem_cache stack_cache; - -/* * stack_alloc: * * Allocate a kernel stack for a thread. - * May block. */ kern_return_t stack_alloc( thread_t thread, void (*resume)(thread_t)) { - vm_offset_t stack; - spl_t s; - - /* - * We first try the free list. It is probably empty, - * or stack_alloc_try would have succeeded, but possibly - * a stack was freed before the swapin thread got to us. - */ - - s = splsched(); - stack_lock(); - stack = stack_free_list; - if (stack != 0) { - stack_free_list = stack_next(stack); - stack_free_count--; - } - stack_unlock(); - (void) splx(s); - - if (stack == 0) { - stack = kmem_cache_alloc(&stack_cache); - assert ((stack & (KERNEL_STACK_SIZE-1)) == 0); - if (stack == 0) - return KERN_RESOURCE_SHORTAGE; - -#if MACH_DEBUG - stack_init(stack); -#endif /* MACH_DEBUG */ - } + if (! stack_alloc_try (thread, resume)) + return KERN_RESOURCE_SHORTAGE; - stack_attach(thread, stack, resume); return KERN_SUCCESS; } @@ -228,17 +184,8 @@ void stack_free( stack = stack_detach(thread); - if (stack != thread->stack_privilege) { - stack_lock(); - stack_next(stack) = stack_free_list; - stack_free_list = stack; - stack_free_count += 1; -#if MACH_COUNTERS - if (stack_free_count > c_stack_alloc_max) - c_stack_alloc_max = stack_free_count; -#endif /* MACH_COUNTERS */ - stack_unlock(); - } + if (stack != thread->stack_privilege) + kmem_cache_free (&stack_cache, stack); } /* @@ -250,28 +197,6 @@ void stack_free( void stack_collect(void) { - vm_offset_t stack; - spl_t s; - - s = splsched(); - stack_lock(); - while (stack_free_count > stack_free_limit) { - stack = stack_free_list; - stack_free_list = stack_next(stack); - stack_free_count--; - stack_unlock(); - (void) splx(s); - -#if MACH_DEBUG - stack_finalize(stack); -#endif /* MACH_DEBUG */ - kmem_cache_free(&stack_cache, stack); - - s = splsched(); - stack_lock(); - } - stack_unlock(); - (void) splx(s); } #endif /* MACHINE_STACK */ @@ -2383,46 +2308,6 @@ void stack_finalize( } } -#ifndef MACHINE_STACK -/* - * stack_statistics: - * - * Return statistics on cached kernel stacks. - * *maxusagep must be initialized by the caller. - */ - -void stack_statistics( - natural_t *totalp, - vm_size_t *maxusagep) -{ - spl_t s; - - s = splsched(); - stack_lock(); - if (stack_check_usage) { - vm_offset_t stack; - - /* - * This is pretty expensive to do at splsched, - * but it only happens when someone makes - * a debugging call, so it should be OK. - */ - - for (stack = stack_free_list; stack != 0; - stack = stack_next(stack)) { - vm_size_t usage = stack_usage(stack); - - if (usage > *maxusagep) - *maxusagep = usage; - } - } - - *totalp = stack_free_count; - stack_unlock(); - (void) splx(s); -} -#endif /* MACHINE_STACK */ - kern_return_t host_stack_usage( host_t host, vm_size_t *reservedp, @@ -2438,12 +2323,12 @@ kern_return_t host_stack_usage( if (host == HOST_NULL) return KERN_INVALID_HOST; + total = 0; /* XXX */ + simple_lock(&stack_usage_lock); maxusage = stack_max_usage; simple_unlock(&stack_usage_lock); - stack_statistics(&total, &maxusage); - *reservedp = 0; *totalp = total; *spacep = *residentp = total * round_page(KERNEL_STACK_SIZE); |