summaryrefslogtreecommitdiff
path: root/kern/thread.c
diff options
context:
space:
mode:
authorJustus Winter <4winter@informatik.uni-hamburg.de>2015-07-03 12:41:48 +0200
committerJustus Winter <4winter@informatik.uni-hamburg.de>2015-08-28 15:48:04 +0200
commit2bd375fbd8ba0eb3c09113724afd3e78283e1f4e (patch)
tree4c08beb5d0d7eb15580719aa8e91418501bc8820 /kern/thread.c
parent66fa48d924d6898c97eb29a1165bbbc0f08c6ab4 (diff)
kern: remove the list of free stacks
* kern/counters.c: Remove relevant counters. * kern/counters.h: Likewise. * kern/thread.c (stack_free_{list,count,limit}): Drop variables. (stack_next): Remove macro. (stack_alloc_try): Allocate stack using the slab allocator. (stack_alloc): Merely call `stack_alloc_try'. (stack_free): Adopt accordingly.
Diffstat (limited to 'kern/thread.c')
-rw-r--r--kern/thread.c147
1 files changed, 16 insertions, 131 deletions
diff --git a/kern/thread.c b/kern/thread.c
index 3586e34..8676132 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -101,8 +101,6 @@ vm_size_t stack_max_usage = 0;
* stack_free
* stack_handoff
* stack_collect
- * and if MACH_DEBUG:
- * stack_statistics
*/
#else /* MACHINE_STACK */
/*
@@ -120,16 +118,10 @@ decl_simple_lock_data(, stack_lock_data)/* splsched only */
#define stack_lock() simple_lock(&stack_lock_data)
#define stack_unlock() simple_unlock(&stack_lock_data)
-vm_offset_t stack_free_list; /* splsched only */
-unsigned int stack_free_count = 0; /* splsched only */
-unsigned int stack_free_limit = 1; /* patchable */
-
/*
- * The next field is at the base of the stack,
- * so the low end is left unsullied.
+ * We allocate kernel stacks using the slab allocator.
*/
-
-#define stack_next(stack) (*((vm_offset_t *)((stack) + KERNEL_STACK_SIZE) - 1))
+static struct kmem_cache stack_cache;
/*
* stack_alloc_try:
@@ -144,73 +136,37 @@ boolean_t stack_alloc_try(
{
vm_offset_t stack;
- stack_lock();
- stack = stack_free_list;
- if (stack != 0) {
- stack_free_list = stack_next(stack);
- stack_free_count--;
- } else {
+ stack = kmem_cache_alloc(&stack_cache);
+ assert ((stack & (KERNEL_STACK_SIZE-1)) == 0);
+
+#if MACH_DEBUG
+ if (stack)
+ stack_init(stack);
+#endif /* MACH_DEBUG */
+ if (! stack)
stack = thread->stack_privilege;
- }
- stack_unlock();
if (stack != 0) {
stack_attach(thread, stack, resume);
- counter(c_stack_alloc_hits++);
return TRUE;
} else {
- counter(c_stack_alloc_misses++);
return FALSE;
}
}
/*
- * We allocate kernel stacks using the slab allocator.
- */
-static struct kmem_cache stack_cache;
-
-/*
* stack_alloc:
*
* Allocate a kernel stack for a thread.
- * May block.
*/
kern_return_t stack_alloc(
thread_t thread,
void (*resume)(thread_t))
{
- vm_offset_t stack;
- spl_t s;
-
- /*
- * We first try the free list. It is probably empty,
- * or stack_alloc_try would have succeeded, but possibly
- * a stack was freed before the swapin thread got to us.
- */
-
- s = splsched();
- stack_lock();
- stack = stack_free_list;
- if (stack != 0) {
- stack_free_list = stack_next(stack);
- stack_free_count--;
- }
- stack_unlock();
- (void) splx(s);
-
- if (stack == 0) {
- stack = kmem_cache_alloc(&stack_cache);
- assert ((stack & (KERNEL_STACK_SIZE-1)) == 0);
- if (stack == 0)
- return KERN_RESOURCE_SHORTAGE;
-
-#if MACH_DEBUG
- stack_init(stack);
-#endif /* MACH_DEBUG */
- }
+ if (! stack_alloc_try (thread, resume))
+ return KERN_RESOURCE_SHORTAGE;
- stack_attach(thread, stack, resume);
return KERN_SUCCESS;
}
@@ -228,17 +184,8 @@ void stack_free(
stack = stack_detach(thread);
- if (stack != thread->stack_privilege) {
- stack_lock();
- stack_next(stack) = stack_free_list;
- stack_free_list = stack;
- stack_free_count += 1;
-#if MACH_COUNTERS
- if (stack_free_count > c_stack_alloc_max)
- c_stack_alloc_max = stack_free_count;
-#endif /* MACH_COUNTERS */
- stack_unlock();
- }
+ if (stack != thread->stack_privilege)
+ kmem_cache_free (&stack_cache, stack);
}
/*
@@ -250,28 +197,6 @@ void stack_free(
void stack_collect(void)
{
- vm_offset_t stack;
- spl_t s;
-
- s = splsched();
- stack_lock();
- while (stack_free_count > stack_free_limit) {
- stack = stack_free_list;
- stack_free_list = stack_next(stack);
- stack_free_count--;
- stack_unlock();
- (void) splx(s);
-
-#if MACH_DEBUG
- stack_finalize(stack);
-#endif /* MACH_DEBUG */
- kmem_cache_free(&stack_cache, stack);
-
- s = splsched();
- stack_lock();
- }
- stack_unlock();
- (void) splx(s);
}
#endif /* MACHINE_STACK */
@@ -2383,46 +2308,6 @@ void stack_finalize(
}
}
-#ifndef MACHINE_STACK
-/*
- * stack_statistics:
- *
- * Return statistics on cached kernel stacks.
- * *maxusagep must be initialized by the caller.
- */
-
-void stack_statistics(
- natural_t *totalp,
- vm_size_t *maxusagep)
-{
- spl_t s;
-
- s = splsched();
- stack_lock();
- if (stack_check_usage) {
- vm_offset_t stack;
-
- /*
- * This is pretty expensive to do at splsched,
- * but it only happens when someone makes
- * a debugging call, so it should be OK.
- */
-
- for (stack = stack_free_list; stack != 0;
- stack = stack_next(stack)) {
- vm_size_t usage = stack_usage(stack);
-
- if (usage > *maxusagep)
- *maxusagep = usage;
- }
- }
-
- *totalp = stack_free_count;
- stack_unlock();
- (void) splx(s);
-}
-#endif /* MACHINE_STACK */
-
kern_return_t host_stack_usage(
host_t host,
vm_size_t *reservedp,
@@ -2438,12 +2323,12 @@ kern_return_t host_stack_usage(
if (host == HOST_NULL)
return KERN_INVALID_HOST;
+ total = 0; /* XXX */
+
simple_lock(&stack_usage_lock);
maxusage = stack_max_usage;
simple_unlock(&stack_usage_lock);
- stack_statistics(&total, &maxusage);
-
*reservedp = 0;
*totalp = total;
*spacep = *residentp = total * round_page(KERNEL_STACK_SIZE);