summaryrefslogtreecommitdiff
path: root/debian
diff options
context:
space:
mode:
authorJustus Winter <4winter@informatik.uni-hamburg.de>2015-07-04 16:54:05 +0200
committerJustus Winter <4winter@informatik.uni-hamburg.de>2015-07-04 16:54:05 +0200
commitb6b0ae37b280948ce0c657e1576b5cb2a0bcf44d (patch)
tree2242fdcafda7844896a86bbc6ecad159e678cf6c /debian
parent033eabe640d482913e6a995ffd3bdc21de7e7a5a (diff)
drop old patch series
Diffstat (limited to 'debian')
-rw-r--r--debian/patches/series3
-rw-r--r--debian/patches/upstreamme0001-ipc-avoid-kmem_alloc.patch45
-rw-r--r--debian/patches/upstreamme0002-kern-allocate-kernel-stacks-using-the-slab-allocator.patch92
-rw-r--r--debian/patches/upstreamme0003-xxx-drop-free-list.patch263
4 files changed, 0 insertions, 403 deletions
diff --git a/debian/patches/series b/debian/patches/series
index 5b24d4d..e532dda 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -13,6 +13,3 @@ vm-cache-policy0001-VM-cache-policy-change.patch
vm-cache-policy0002-vm-keep-track-of-clean-pages.patch
vm-cache-policy0003-vm-evict-clean-pages-first.patch
-upstreamme0001-ipc-avoid-kmem_alloc.patch
-upstreamme0002-kern-allocate-kernel-stacks-using-the-slab-allocator.patch
-upstreamme0003-xxx-drop-free-list.patch
diff --git a/debian/patches/upstreamme0001-ipc-avoid-kmem_alloc.patch b/debian/patches/upstreamme0001-ipc-avoid-kmem_alloc.patch
deleted file mode 100644
index 3b6c89d..0000000
--- a/debian/patches/upstreamme0001-ipc-avoid-kmem_alloc.patch
+++ /dev/null
@@ -1,45 +0,0 @@
-From 0d1c85b580225d5f5935643e16d410dc1e5d18c6 Mon Sep 17 00:00:00 2001
-From: Justus Winter <4winter@informatik.uni-hamburg.de>
-Date: Thu, 2 Jul 2015 16:14:18 +0200
-Subject: [PATCH gnumach 1/3] ipc: avoid kmem_alloc
-
-* ipc/ipc_table.c (ipc_table_alloc): Unconditionally use `kalloc'.
-(ipc_table_free): Unconditionally use `kfree'.
----
- ipc/ipc_table.c | 15 ++-------------
- 1 file changed, 2 insertions(+), 13 deletions(-)
-
-diff --git a/ipc/ipc_table.c b/ipc/ipc_table.c
-index 1a89d81..0f8592a 100644
---- a/ipc/ipc_table.c
-+++ b/ipc/ipc_table.c
-@@ -114,15 +114,7 @@ vm_offset_t
- ipc_table_alloc(
- vm_size_t size)
- {
-- vm_offset_t table;
--
-- if (size < PAGE_SIZE)
-- table = kalloc(size);
-- else
-- if (kmem_alloc(kmem_map, &table, size) != KERN_SUCCESS)
-- table = 0;
--
-- return table;
-+ return kalloc(size);
- }
-
- /*
-@@ -139,8 +131,5 @@ ipc_table_free(
- vm_size_t size,
- vm_offset_t table)
- {
-- if (size < PAGE_SIZE)
-- kfree(table, size);
-- else
-- kmem_free(kmem_map, table, size);
-+ kfree(table, size);
- }
---
-2.1.4
-
diff --git a/debian/patches/upstreamme0002-kern-allocate-kernel-stacks-using-the-slab-allocator.patch b/debian/patches/upstreamme0002-kern-allocate-kernel-stacks-using-the-slab-allocator.patch
deleted file mode 100644
index a611deb..0000000
--- a/debian/patches/upstreamme0002-kern-allocate-kernel-stacks-using-the-slab-allocator.patch
+++ /dev/null
@@ -1,92 +0,0 @@
-From c077233ee4f85a569cd05f557a1178977903b113 Mon Sep 17 00:00:00 2001
-From: Justus Winter <4winter@informatik.uni-hamburg.de>
-Date: Thu, 2 Jul 2015 16:20:44 +0200
-Subject: [PATCH gnumach 2/3] kern: allocate kernel stacks using the slab
- allocator
-
-* kern/slab.c (kmem_cache_init): Relax alignment restriction.
-* kern/thread.c (stack_cache): New variable.
-(stack_alloc): Use the slab allocator.
-(stack_collect): Adjust accordingly.
-(thread_init): Initialize `stack_cache'.
----
- kern/slab.c | 1 -
- kern/thread.c | 29 +++++++++++++++++++----------
- 2 files changed, 19 insertions(+), 11 deletions(-)
-
-diff --git a/kern/slab.c b/kern/slab.c
-index 60378b5..32469df 100644
---- a/kern/slab.c
-+++ b/kern/slab.c
-@@ -800,7 +800,6 @@ void kmem_cache_init(struct kmem_cache *cache, const char *name,
-
- assert(obj_size > 0);
- assert(ISP2(align));
-- assert(align < PAGE_SIZE);
-
- buf_size = P2ROUND(obj_size, align);
-
-diff --git a/kern/thread.c b/kern/thread.c
-index 1f47553..36a5a92 100644
---- a/kern/thread.c
-+++ b/kern/thread.c
-@@ -165,6 +165,11 @@ boolean_t stack_alloc_try(
- }
-
- /*
-+ * We allocate kernel stacks using the slab allocator.
-+ */
-+static struct kmem_cache stack_cache;
-+
-+/*
- * stack_alloc:
- *
- * Allocate a kernel stack for a thread.
-@@ -195,15 +200,10 @@ kern_return_t stack_alloc(
- (void) splx(s);
-
- if (stack == 0) {
-- kern_return_t kr;
-- /*
-- * Kernel stacks should be naturally aligned,
-- * so that it is easy to find the starting/ending
-- * addresses of a stack given an address in the middle.
-- */
-- kr = kmem_alloc_aligned(kmem_map, &stack, KERNEL_STACK_SIZE);
-- if (kr != KERN_SUCCESS)
-- return kr;
-+ stack = kmem_cache_alloc(&stack_cache);
-+ assert ((stack & (KERNEL_STACK_SIZE-1)) == 0);
-+ if (stack == 0)
-+ return KERN_RESOURCE_SHORTAGE;
-
- #if MACH_DEBUG
- stack_init(stack);
-@@ -265,7 +265,7 @@ void stack_collect(void)
- #if MACH_DEBUG
- stack_finalize(stack);
- #endif /* MACH_DEBUG */
-- kmem_free(kmem_map, stack, KERNEL_STACK_SIZE);
-+ kmem_cache_free(&stack_cache, stack);
-
- s = splsched();
- stack_lock();
-@@ -301,6 +301,15 @@ void thread_init(void)
- NULL, NULL, NULL, 0);
-
- /*
-+ * Kernel stacks should be naturally aligned, so that it
-+ * is easy to find the starting/ending addresses of a
-+ * stack given an address in the middle.
-+ */
-+ kmem_cache_init(&stack_cache, "stack",
-+ KERNEL_STACK_SIZE, KERNEL_STACK_SIZE,
-+ NULL, NULL, NULL, 0);
-+
-+ /*
- * Fill in a template thread for fast initialization.
- * [Fields that must be (or are typically) reset at
- * time of creation are so noted.]
---
-2.1.4
-
diff --git a/debian/patches/upstreamme0003-xxx-drop-free-list.patch b/debian/patches/upstreamme0003-xxx-drop-free-list.patch
deleted file mode 100644
index 85d1cc1..0000000
--- a/debian/patches/upstreamme0003-xxx-drop-free-list.patch
+++ /dev/null
@@ -1,263 +0,0 @@
-From 80766eca8172ebb99c6395f8458693c1647bca93 Mon Sep 17 00:00:00 2001
-From: Justus Winter <4winter@informatik.uni-hamburg.de>
-Date: Fri, 3 Jul 2015 12:41:48 +0200
-Subject: [PATCH gnumach 3/3] xxx drop free list
-
----
- kern/counters.c | 3 --
- kern/counters.h | 3 --
- kern/thread.c | 145 ++++++--------------------------------------------------
- 3 files changed, 14 insertions(+), 137 deletions(-)
-
-diff --git a/kern/counters.c b/kern/counters.c
-index 0a0665b..74fd42d 100644
---- a/kern/counters.c
-+++ b/kern/counters.c
-@@ -46,9 +46,6 @@ mach_counter_t c_stacks_current = 0;
- mach_counter_t c_stacks_max = 0;
- mach_counter_t c_stacks_min = 0;
- mach_counter_t c_stacks_total = 0;
--mach_counter_t c_stack_alloc_hits = 0;
--mach_counter_t c_stack_alloc_misses = 0;
--mach_counter_t c_stack_alloc_max = 0;
- mach_counter_t c_clock_ticks = 0;
- mach_counter_t c_ipc_mqueue_send_block = 0;
- mach_counter_t c_ipc_mqueue_receive_block_user = 0;
-diff --git a/kern/counters.h b/kern/counters.h
-index aa1e739..bfa9b44 100644
---- a/kern/counters.h
-+++ b/kern/counters.h
-@@ -69,9 +69,6 @@ extern mach_counter_t c_stacks_current;
- extern mach_counter_t c_stacks_max;
- extern mach_counter_t c_stacks_min;
- extern mach_counter_t c_stacks_total;
--extern mach_counter_t c_stack_alloc_hits;
--extern mach_counter_t c_stack_alloc_misses;
--extern mach_counter_t c_stack_alloc_max;
- extern mach_counter_t c_clock_ticks;
- extern mach_counter_t c_ipc_mqueue_send_block;
- extern mach_counter_t c_ipc_mqueue_receive_block_user;
-diff --git a/kern/thread.c b/kern/thread.c
-index 36a5a92..f6e3021 100644
---- a/kern/thread.c
-+++ b/kern/thread.c
-@@ -101,8 +101,6 @@ vm_size_t stack_max_usage = 0;
- * stack_free
- * stack_handoff
- * stack_collect
-- * and if MACH_DEBUG:
-- * stack_statistics
- */
- #else /* MACHINE_STACK */
- /*
-@@ -120,16 +118,10 @@ decl_simple_lock_data(, stack_lock_data)/* splsched only */
- #define stack_lock() simple_lock(&stack_lock_data)
- #define stack_unlock() simple_unlock(&stack_lock_data)
-
--vm_offset_t stack_free_list; /* splsched only */
--unsigned int stack_free_count = 0; /* splsched only */
--unsigned int stack_free_limit = 1; /* patchable */
--
- /*
-- * The next field is at the base of the stack,
-- * so the low end is left unsullied.
-+ * We allocate kernel stacks using the slab allocator.
- */
--
--#define stack_next(stack) (*((vm_offset_t *)((stack) + KERNEL_STACK_SIZE) - 1))
-+static struct kmem_cache stack_cache;
-
- /*
- * stack_alloc_try:
-@@ -144,73 +136,37 @@ boolean_t stack_alloc_try(
- {
- vm_offset_t stack;
-
-- stack_lock();
-- stack = stack_free_list;
-- if (stack != 0) {
-- stack_free_list = stack_next(stack);
-- stack_free_count--;
-- } else {
-+ stack = kmem_cache_alloc(&stack_cache);
-+ assert ((stack & (KERNEL_STACK_SIZE-1)) == 0);
-+
-+#if MACH_DEBUG
-+ if (stack)
-+ stack_init(stack);
-+#endif /* MACH_DEBUG */
-+ if (! stack)
- stack = thread->stack_privilege;
-- }
-- stack_unlock();
-
- if (stack != 0) {
- stack_attach(thread, stack, resume);
-- counter(c_stack_alloc_hits++);
- return TRUE;
- } else {
-- counter(c_stack_alloc_misses++);
- return FALSE;
- }
- }
-
- /*
-- * We allocate kernel stacks using the slab allocator.
-- */
--static struct kmem_cache stack_cache;
--
--/*
- * stack_alloc:
- *
- * Allocate a kernel stack for a thread.
-- * May block.
- */
-
- kern_return_t stack_alloc(
- thread_t thread,
- void (*resume)(thread_t))
- {
-- vm_offset_t stack;
-- spl_t s;
--
-- /*
-- * We first try the free list. It is probably empty,
-- * or stack_alloc_try would have succeeded, but possibly
-- * a stack was freed before the swapin thread got to us.
-- */
--
-- s = splsched();
-- stack_lock();
-- stack = stack_free_list;
-- if (stack != 0) {
-- stack_free_list = stack_next(stack);
-- stack_free_count--;
-- }
-- stack_unlock();
-- (void) splx(s);
--
-- if (stack == 0) {
-- stack = kmem_cache_alloc(&stack_cache);
-- assert ((stack & (KERNEL_STACK_SIZE-1)) == 0);
-- if (stack == 0)
-- return KERN_RESOURCE_SHORTAGE;
--
--#if MACH_DEBUG
-- stack_init(stack);
--#endif /* MACH_DEBUG */
-- }
-+ if (! stack_alloc_try (thread, resume))
-+ return KERN_RESOURCE_SHORTAGE;
-
-- stack_attach(thread, stack, resume);
- return KERN_SUCCESS;
- }
-
-@@ -228,17 +184,8 @@ void stack_free(
-
- stack = stack_detach(thread);
-
-- if (stack != thread->stack_privilege) {
-- stack_lock();
-- stack_next(stack) = stack_free_list;
-- stack_free_list = stack;
-- stack_free_count += 1;
--#if MACH_COUNTERS
-- if (stack_free_count > c_stack_alloc_max)
-- c_stack_alloc_max = stack_free_count;
--#endif /* MACH_COUNTERS */
-- stack_unlock();
-- }
-+ if (stack != thread->stack_privilege)
-+ kmem_cache_free (&stack_cache, stack);
- }
-
- /*
-@@ -250,28 +197,6 @@ void stack_free(
-
- void stack_collect(void)
- {
-- vm_offset_t stack;
-- spl_t s;
--
-- s = splsched();
-- stack_lock();
-- while (stack_free_count > stack_free_limit) {
-- stack = stack_free_list;
-- stack_free_list = stack_next(stack);
-- stack_free_count--;
-- stack_unlock();
-- (void) splx(s);
--
--#if MACH_DEBUG
-- stack_finalize(stack);
--#endif /* MACH_DEBUG */
-- kmem_cache_free(&stack_cache, stack);
--
-- s = splsched();
-- stack_lock();
-- }
-- stack_unlock();
-- (void) splx(s);
- }
- #endif /* MACHINE_STACK */
-
-@@ -2382,46 +2307,6 @@ void stack_finalize(
- }
- }
-
--#ifndef MACHINE_STACK
--/*
-- * stack_statistics:
-- *
-- * Return statistics on cached kernel stacks.
-- * *maxusagep must be initialized by the caller.
-- */
--
--void stack_statistics(
-- natural_t *totalp,
-- vm_size_t *maxusagep)
--{
-- spl_t s;
--
-- s = splsched();
-- stack_lock();
-- if (stack_check_usage) {
-- vm_offset_t stack;
--
-- /*
-- * This is pretty expensive to do at splsched,
-- * but it only happens when someone makes
-- * a debugging call, so it should be OK.
-- */
--
-- for (stack = stack_free_list; stack != 0;
-- stack = stack_next(stack)) {
-- vm_size_t usage = stack_usage(stack);
--
-- if (usage > *maxusagep)
-- *maxusagep = usage;
-- }
-- }
--
-- *totalp = stack_free_count;
-- stack_unlock();
-- (void) splx(s);
--}
--#endif /* MACHINE_STACK */
--
- kern_return_t host_stack_usage(
- host_t host,
- vm_size_t *reservedp,
-@@ -2441,8 +2326,6 @@ kern_return_t host_stack_usage(
- maxusage = stack_max_usage;
- simple_unlock(&stack_usage_lock);
-
-- stack_statistics(&total, &maxusage);
--
- *reservedp = 0;
- *totalp = total;
- *spacep = *residentp = total * round_page(KERNEL_STACK_SIZE);
---
-2.1.4
-