diff options
author | Justus Winter <4winter@informatik.uni-hamburg.de> | 2015-07-03 11:48:30 +0200 |
---|---|---|
committer | Justus Winter <4winter@informatik.uni-hamburg.de> | 2015-07-03 11:48:30 +0200 |
commit | 5708d7fffcb79200cc9519348c4fa72611fdb693 (patch) | |
tree | 72261fed633d73ad56fa82c7e9dbfbe13d8bd0a7 /debian | |
parent | 834009572a2f97e29612c0832b05f5bb5f58a702 (diff) |
add patch series
Diffstat (limited to 'debian')
3 files changed, 139 insertions, 0 deletions
diff --git a/debian/patches/series b/debian/patches/series index e532dda..c9dc7a4 100644 --- a/debian/patches/series +++ b/debian/patches/series @@ -13,3 +13,5 @@ vm-cache-policy0001-VM-cache-policy-change.patch vm-cache-policy0002-vm-keep-track-of-clean-pages.patch vm-cache-policy0003-vm-evict-clean-pages-first.patch +upstreamme0001-ipc-avoid-kmem_alloc.patch +upstreamme0002-kern-allocate-kernel-stacks-using-the-slab-allocator.patch diff --git a/debian/patches/upstreamme0001-ipc-avoid-kmem_alloc.patch b/debian/patches/upstreamme0001-ipc-avoid-kmem_alloc.patch new file mode 100644 index 0000000..47f0c7a --- /dev/null +++ b/debian/patches/upstreamme0001-ipc-avoid-kmem_alloc.patch @@ -0,0 +1,45 @@ +From 0d1c85b580225d5f5935643e16d410dc1e5d18c6 Mon Sep 17 00:00:00 2001 +From: Justus Winter <4winter@informatik.uni-hamburg.de> +Date: Thu, 2 Jul 2015 16:14:18 +0200 +Subject: [PATCH gnumach 1/2] ipc: avoid kmem_alloc + +* ipc/ipc_table.c (ipc_table_alloc): Unconditionally use `kalloc'. +(ipc_table_free): Unconditionally use `kfree'. +--- + ipc/ipc_table.c | 15 ++------------- + 1 file changed, 2 insertions(+), 13 deletions(-) + +diff --git a/ipc/ipc_table.c b/ipc/ipc_table.c +index 1a89d81..0f8592a 100644 +--- a/ipc/ipc_table.c ++++ b/ipc/ipc_table.c +@@ -114,15 +114,7 @@ vm_offset_t + ipc_table_alloc( + vm_size_t size) + { +- vm_offset_t table; +- +- if (size < PAGE_SIZE) +- table = kalloc(size); +- else +- if (kmem_alloc(kmem_map, &table, size) != KERN_SUCCESS) +- table = 0; +- +- return table; ++ return kalloc(size); + } + + /* +@@ -139,8 +131,5 @@ ipc_table_free( + vm_size_t size, + vm_offset_t table) + { +- if (size < PAGE_SIZE) +- kfree(table, size); +- else +- kmem_free(kmem_map, table, size); ++ kfree(table, size); + } +-- +2.1.4 + diff --git a/debian/patches/upstreamme0002-kern-allocate-kernel-stacks-using-the-slab-allocator.patch b/debian/patches/upstreamme0002-kern-allocate-kernel-stacks-using-the-slab-allocator.patch new file mode 100644 index 0000000..d688be7 --- /dev/null +++ b/debian/patches/upstreamme0002-kern-allocate-kernel-stacks-using-the-slab-allocator.patch @@ -0,0 +1,92 @@ +From c077233ee4f85a569cd05f557a1178977903b113 Mon Sep 17 00:00:00 2001 +From: Justus Winter <4winter@informatik.uni-hamburg.de> +Date: Thu, 2 Jul 2015 16:20:44 +0200 +Subject: [PATCH gnumach 2/2] kern: allocate kernel stacks using the slab + allocator + +* kern/slab.c (kmem_cache_init): Relax alignment restriction. +* kern/thread.c (stack_cache): New variable. +(stack_alloc): Use the slab allocator. +(stack_collect): Adjust accordingly. +(thread_init): Initialize `stack_cache'. +--- + kern/slab.c | 1 - + kern/thread.c | 29 +++++++++++++++++++---------- + 2 files changed, 19 insertions(+), 11 deletions(-) + +diff --git a/kern/slab.c b/kern/slab.c +index 60378b5..32469df 100644 +--- a/kern/slab.c ++++ b/kern/slab.c +@@ -800,7 +800,6 @@ void kmem_cache_init(struct kmem_cache *cache, const char *name, + + assert(obj_size > 0); + assert(ISP2(align)); +- assert(align < PAGE_SIZE); + + buf_size = P2ROUND(obj_size, align); + +diff --git a/kern/thread.c b/kern/thread.c +index 1f47553..36a5a92 100644 +--- a/kern/thread.c ++++ b/kern/thread.c +@@ -165,6 +165,11 @@ boolean_t stack_alloc_try( + } + + /* ++ * We allocate kernel stacks using the slab allocator. ++ */ ++static struct kmem_cache stack_cache; ++ ++/* + * stack_alloc: + * + * Allocate a kernel stack for a thread. +@@ -195,15 +200,10 @@ kern_return_t stack_alloc( + (void) splx(s); + + if (stack == 0) { +- kern_return_t kr; +- /* +- * Kernel stacks should be naturally aligned, +- * so that it is easy to find the starting/ending +- * addresses of a stack given an address in the middle. +- */ +- kr = kmem_alloc_aligned(kmem_map, &stack, KERNEL_STACK_SIZE); +- if (kr != KERN_SUCCESS) +- return kr; ++ stack = kmem_cache_alloc(&stack_cache); ++ assert ((stack & (KERNEL_STACK_SIZE-1)) == 0); ++ if (stack == 0) ++ return KERN_RESOURCE_SHORTAGE; + + #if MACH_DEBUG + stack_init(stack); +@@ -265,7 +265,7 @@ void stack_collect(void) + #if MACH_DEBUG + stack_finalize(stack); + #endif /* MACH_DEBUG */ +- kmem_free(kmem_map, stack, KERNEL_STACK_SIZE); ++ kmem_cache_free(&stack_cache, stack); + + s = splsched(); + stack_lock(); +@@ -301,6 +301,15 @@ void thread_init(void) + NULL, NULL, NULL, 0); + + /* ++ * Kernel stacks should be naturally aligned, so that it ++ * is easy to find the starting/ending addresses of a ++ * stack given an address in the middle. ++ */ ++ kmem_cache_init(&stack_cache, "stack", ++ KERNEL_STACK_SIZE, KERNEL_STACK_SIZE, ++ NULL, NULL, NULL, 0); ++ ++ /* + * Fill in a template thread for fast initialization. + * [Fields that must be (or are typically) reset at + * time of creation are so noted.] +-- +2.1.4 + |