summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJustus Winter <4winter@informatik.uni-hamburg.de>2015-07-04 16:54:06 +0200
committerJustus Winter <4winter@informatik.uni-hamburg.de>2015-07-04 16:54:06 +0200
commit93c8d6be59013bd2a6bd70c0e8c0a967d5ba69ea (patch)
treea3a990b489063099990007d0c3760a0519dc449e
parentb6b0ae37b280948ce0c657e1576b5cb2a0bcf44d (diff)
add patch series
-rw-r--r--debian/patches/series5
-rw-r--r--debian/patches/upstreamme0001-vm-fix-panic-message.patch34
-rw-r--r--debian/patches/upstreamme0002-kern-make-printf-handle-long-long-integers.patch121
-rw-r--r--debian/patches/upstreamme0003-ipc-avoid-kmem_alloc.patch45
-rw-r--r--debian/patches/upstreamme0004-kern-allocate-kernel-stacks-using-the-slab-allocator.patch92
-rw-r--r--debian/patches/upstreamme0005-kern-remove-the-list-of-free-stacks.patch270
6 files changed, 567 insertions, 0 deletions
diff --git a/debian/patches/series b/debian/patches/series
index e532dda..52f649a 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -13,3 +13,8 @@ vm-cache-policy0001-VM-cache-policy-change.patch
vm-cache-policy0002-vm-keep-track-of-clean-pages.patch
vm-cache-policy0003-vm-evict-clean-pages-first.patch
+upstreamme0001-vm-fix-panic-message.patch
+upstreamme0002-kern-make-printf-handle-long-long-integers.patch
+upstreamme0003-ipc-avoid-kmem_alloc.patch
+upstreamme0004-kern-allocate-kernel-stacks-using-the-slab-allocator.patch
+upstreamme0005-kern-remove-the-list-of-free-stacks.patch
diff --git a/debian/patches/upstreamme0001-vm-fix-panic-message.patch b/debian/patches/upstreamme0001-vm-fix-panic-message.patch
new file mode 100644
index 0000000..ca0300b
--- /dev/null
+++ b/debian/patches/upstreamme0001-vm-fix-panic-message.patch
@@ -0,0 +1,34 @@
+From d4c9b3b63c9f585e89bafcc83e6027c74b19a3b8 Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Sat, 4 Jul 2015 16:36:03 +0200
+Subject: [PATCH gnumach 1/5] vm: fix panic message
+
+* vm/vm_kern.c (kmem_init): Fix panic message.
+---
+ vm/vm_kern.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/vm/vm_kern.c b/vm/vm_kern.c
+index 775d8e8..9c0a20b 100644
+--- a/vm/vm_kern.c
++++ b/vm/vm_kern.c
+@@ -827,7 +827,6 @@ void kmem_init(
+ /*
+ * Reserve virtual memory allocated up to this time.
+ */
+-
+ if (start != VM_MIN_KERNEL_ADDRESS) {
+ kern_return_t rc;
+ vm_offset_t addr = VM_MIN_KERNEL_ADDRESS;
+@@ -838,7 +837,7 @@ void kmem_init(
+ VM_PROT_DEFAULT, VM_PROT_ALL,
+ VM_INHERIT_DEFAULT);
+ if (rc)
+- panic("%s:%d: vm_map_enter failed (%d)\n", rc);
++ panic("vm_map_enter failed (%d)\n", rc);
+ }
+ }
+
+--
+2.1.4
+
diff --git a/debian/patches/upstreamme0002-kern-make-printf-handle-long-long-integers.patch b/debian/patches/upstreamme0002-kern-make-printf-handle-long-long-integers.patch
new file mode 100644
index 0000000..76fa0eb
--- /dev/null
+++ b/debian/patches/upstreamme0002-kern-make-printf-handle-long-long-integers.patch
@@ -0,0 +1,121 @@
+From 81a3958304581ade78069d850261e74a9a58a014 Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Sat, 4 Jul 2015 11:59:48 +0200
+Subject: [PATCH gnumach 2/5] kern: make printf handle long long integers
+
+* Makefile.am (clib_routines): Steal `__umoddi3'.
+* kern/printf.c (MAXBUF): Increase size.
+(printnum, _doprnt): Handle long long integers.
+* kern/printf.h (printnum): Adjust declaration.
+---
+ Makefile.am | 2 +-
+ kern/printf.c | 28 +++++++++++++++++++++-------
+ kern/printf.h | 2 +-
+ 3 files changed, 23 insertions(+), 9 deletions(-)
+
+diff --git a/Makefile.am b/Makefile.am
+index 913db55..76a192b 100644
+--- a/Makefile.am
++++ b/Makefile.am
+@@ -159,7 +159,7 @@ noinst_PROGRAMS += \
+ clib_routines := memcmp memcpy memmove \
+ strchr strstr strsep strtok \
+ htonl htons ntohl ntohs \
+- udivdi3 __udivdi3 \
++ udivdi3 __udivdi3 __umoddi3 \
+ __rel_iplt_start __rel_iplt_end \
+ __ffsdi2 \
+ _START _start etext _edata end _end # actually ld magic, not libc.
+diff --git a/kern/printf.c b/kern/printf.c
+index 1db0d08..e060b9d 100644
+--- a/kern/printf.c
++++ b/kern/printf.c
+@@ -126,11 +126,11 @@
+ #define isdigit(d) ((d) >= '0' && (d) <= '9')
+ #define Ctod(c) ((c) - '0')
+
+-#define MAXBUF (sizeof(long int) * 8) /* enough for binary */
++#define MAXBUF (sizeof(long long int) * 8) /* enough for binary */
+
+
+ void printnum(
+- unsigned long u,
++ unsigned long long u,
+ int base,
+ void (*putc)( char, vm_offset_t ),
+ vm_offset_t putc_arg)
+@@ -178,8 +178,9 @@ void _doprnt(
+ int prec;
+ boolean_t ladjust;
+ char padc;
+- long n;
+- unsigned long u;
++ long long n;
++ unsigned long long u;
++ int have_long_long = 0;
+ int plus_sign;
+ int sign_char;
+ boolean_t altfmt, truncate;
+@@ -276,6 +277,10 @@ void _doprnt(
+
+ if (c == 'l')
+ c = *++fmt; /* need it if sizeof(int) < sizeof(long) */
++ if (c == 'l') {
++ c = *++fmt; /* handle `long long' */
++ have_long_long = 1;
++ }
+
+ truncate = FALSE;
+
+@@ -287,7 +292,10 @@ void _doprnt(
+ boolean_t any;
+ int i;
+
+- u = va_arg(argp, unsigned long);
++ if (! have_long_long)
++ u = va_arg(argp, unsigned long);
++ else
++ u = va_arg(argp, unsigned long long);
+ p = va_arg(argp, char *);
+ base = *p++;
+ printnum(u, base, putc, putc_arg);
+@@ -431,7 +439,10 @@ void _doprnt(
+ goto print_unsigned;
+
+ print_signed:
+- n = va_arg(argp, long);
++ if (! have_long_long)
++ n = va_arg(argp, long);
++ else
++ n = va_arg(argp, long long);
+ if (n >= 0) {
+ u = n;
+ sign_char = plus_sign;
+@@ -443,7 +454,10 @@ void _doprnt(
+ goto print_num;
+
+ print_unsigned:
+- u = va_arg(argp, unsigned long);
++ if (! have_long_long)
++ u = va_arg(argp, unsigned long);
++ else
++ u = va_arg(argp, unsigned long long);
+ goto print_num;
+
+ print_num:
+diff --git a/kern/printf.h b/kern/printf.h
+index 86857d3..76047f0 100644
+--- a/kern/printf.h
++++ b/kern/printf.h
+@@ -35,7 +35,7 @@ extern void _doprnt (const char *fmt,
+ int radix,
+ vm_offset_t putc_arg);
+
+-extern void printnum (unsigned long u, int base,
++extern void printnum (unsigned long long u, int base,
+ void (*putc)(char, vm_offset_t),
+ vm_offset_t putc_arg);
+
+--
+2.1.4
+
diff --git a/debian/patches/upstreamme0003-ipc-avoid-kmem_alloc.patch b/debian/patches/upstreamme0003-ipc-avoid-kmem_alloc.patch
new file mode 100644
index 0000000..fad62f6
--- /dev/null
+++ b/debian/patches/upstreamme0003-ipc-avoid-kmem_alloc.patch
@@ -0,0 +1,45 @@
+From 954cc1f1bacd758e3ad889419cf852f73c70b677 Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Thu, 2 Jul 2015 16:14:18 +0200
+Subject: [PATCH gnumach 3/5] ipc: avoid kmem_alloc
+
+* ipc/ipc_table.c (ipc_table_alloc): Unconditionally use `kalloc'.
+(ipc_table_free): Unconditionally use `kfree'.
+---
+ ipc/ipc_table.c | 15 ++-------------
+ 1 file changed, 2 insertions(+), 13 deletions(-)
+
+diff --git a/ipc/ipc_table.c b/ipc/ipc_table.c
+index 1a89d81..0f8592a 100644
+--- a/ipc/ipc_table.c
++++ b/ipc/ipc_table.c
+@@ -114,15 +114,7 @@ vm_offset_t
+ ipc_table_alloc(
+ vm_size_t size)
+ {
+- vm_offset_t table;
+-
+- if (size < PAGE_SIZE)
+- table = kalloc(size);
+- else
+- if (kmem_alloc(kmem_map, &table, size) != KERN_SUCCESS)
+- table = 0;
+-
+- return table;
++ return kalloc(size);
+ }
+
+ /*
+@@ -139,8 +131,5 @@ ipc_table_free(
+ vm_size_t size,
+ vm_offset_t table)
+ {
+- if (size < PAGE_SIZE)
+- kfree(table, size);
+- else
+- kmem_free(kmem_map, table, size);
++ kfree(table, size);
+ }
+--
+2.1.4
+
diff --git a/debian/patches/upstreamme0004-kern-allocate-kernel-stacks-using-the-slab-allocator.patch b/debian/patches/upstreamme0004-kern-allocate-kernel-stacks-using-the-slab-allocator.patch
new file mode 100644
index 0000000..98a3613
--- /dev/null
+++ b/debian/patches/upstreamme0004-kern-allocate-kernel-stacks-using-the-slab-allocator.patch
@@ -0,0 +1,92 @@
+From b49fd391d6aef3df51aeaa4dc12b68c0b4b1bb1f Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Thu, 2 Jul 2015 16:20:44 +0200
+Subject: [PATCH gnumach 4/5] kern: allocate kernel stacks using the slab
+ allocator
+
+* kern/slab.c (kmem_cache_init): Relax alignment restriction.
+* kern/thread.c (stack_cache): New variable.
+(stack_alloc): Use the slab allocator.
+(stack_collect): Adjust accordingly.
+(thread_init): Initialize `stack_cache'.
+---
+ kern/slab.c | 1 -
+ kern/thread.c | 29 +++++++++++++++++++----------
+ 2 files changed, 19 insertions(+), 11 deletions(-)
+
+diff --git a/kern/slab.c b/kern/slab.c
+index 60378b5..32469df 100644
+--- a/kern/slab.c
++++ b/kern/slab.c
+@@ -800,7 +800,6 @@ void kmem_cache_init(struct kmem_cache *cache, const char *name,
+
+ assert(obj_size > 0);
+ assert(ISP2(align));
+- assert(align < PAGE_SIZE);
+
+ buf_size = P2ROUND(obj_size, align);
+
+diff --git a/kern/thread.c b/kern/thread.c
+index 1f47553..36a5a92 100644
+--- a/kern/thread.c
++++ b/kern/thread.c
+@@ -165,6 +165,11 @@ boolean_t stack_alloc_try(
+ }
+
+ /*
++ * We allocate kernel stacks using the slab allocator.
++ */
++static struct kmem_cache stack_cache;
++
++/*
+ * stack_alloc:
+ *
+ * Allocate a kernel stack for a thread.
+@@ -195,15 +200,10 @@ kern_return_t stack_alloc(
+ (void) splx(s);
+
+ if (stack == 0) {
+- kern_return_t kr;
+- /*
+- * Kernel stacks should be naturally aligned,
+- * so that it is easy to find the starting/ending
+- * addresses of a stack given an address in the middle.
+- */
+- kr = kmem_alloc_aligned(kmem_map, &stack, KERNEL_STACK_SIZE);
+- if (kr != KERN_SUCCESS)
+- return kr;
++ stack = kmem_cache_alloc(&stack_cache);
++ assert ((stack & (KERNEL_STACK_SIZE-1)) == 0);
++ if (stack == 0)
++ return KERN_RESOURCE_SHORTAGE;
+
+ #if MACH_DEBUG
+ stack_init(stack);
+@@ -265,7 +265,7 @@ void stack_collect(void)
+ #if MACH_DEBUG
+ stack_finalize(stack);
+ #endif /* MACH_DEBUG */
+- kmem_free(kmem_map, stack, KERNEL_STACK_SIZE);
++ kmem_cache_free(&stack_cache, stack);
+
+ s = splsched();
+ stack_lock();
+@@ -301,6 +301,15 @@ void thread_init(void)
+ NULL, NULL, NULL, 0);
+
+ /*
++ * Kernel stacks should be naturally aligned, so that it
++ * is easy to find the starting/ending addresses of a
++ * stack given an address in the middle.
++ */
++ kmem_cache_init(&stack_cache, "stack",
++ KERNEL_STACK_SIZE, KERNEL_STACK_SIZE,
++ NULL, NULL, NULL, 0);
++
++ /*
+ * Fill in a template thread for fast initialization.
+ * [Fields that must be (or are typically) reset at
+ * time of creation are so noted.]
+--
+2.1.4
+
diff --git a/debian/patches/upstreamme0005-kern-remove-the-list-of-free-stacks.patch b/debian/patches/upstreamme0005-kern-remove-the-list-of-free-stacks.patch
new file mode 100644
index 0000000..b1727c4
--- /dev/null
+++ b/debian/patches/upstreamme0005-kern-remove-the-list-of-free-stacks.patch
@@ -0,0 +1,270 @@
+From 13a4d09db07487d56ddc0667779cb8e0fe85a912 Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Fri, 3 Jul 2015 12:41:48 +0200
+Subject: [PATCH gnumach 5/5] kern: remove the list of free stacks
+
+* kern/counters.c: Remove relevant counters.
+* kern/counters.h: Likewise.
+* kern/thread.c (stack_free_{list,count,limit}): Drop variables.
+(stack_next): Remove macro.
+(stack_alloc_try): Allocate stack using the slab allocator.
+(stack_alloc): Merely call `stack_alloc_try'.
+(stack_free): Adopt accordingly.
+---
+ kern/counters.c | 3 --
+ kern/counters.h | 3 --
+ kern/thread.c | 145 ++++++--------------------------------------------------
+ 3 files changed, 14 insertions(+), 137 deletions(-)
+
+diff --git a/kern/counters.c b/kern/counters.c
+index 0a0665b..74fd42d 100644
+--- a/kern/counters.c
++++ b/kern/counters.c
+@@ -46,9 +46,6 @@ mach_counter_t c_stacks_current = 0;
+ mach_counter_t c_stacks_max = 0;
+ mach_counter_t c_stacks_min = 0;
+ mach_counter_t c_stacks_total = 0;
+-mach_counter_t c_stack_alloc_hits = 0;
+-mach_counter_t c_stack_alloc_misses = 0;
+-mach_counter_t c_stack_alloc_max = 0;
+ mach_counter_t c_clock_ticks = 0;
+ mach_counter_t c_ipc_mqueue_send_block = 0;
+ mach_counter_t c_ipc_mqueue_receive_block_user = 0;
+diff --git a/kern/counters.h b/kern/counters.h
+index aa1e739..bfa9b44 100644
+--- a/kern/counters.h
++++ b/kern/counters.h
+@@ -69,9 +69,6 @@ extern mach_counter_t c_stacks_current;
+ extern mach_counter_t c_stacks_max;
+ extern mach_counter_t c_stacks_min;
+ extern mach_counter_t c_stacks_total;
+-extern mach_counter_t c_stack_alloc_hits;
+-extern mach_counter_t c_stack_alloc_misses;
+-extern mach_counter_t c_stack_alloc_max;
+ extern mach_counter_t c_clock_ticks;
+ extern mach_counter_t c_ipc_mqueue_send_block;
+ extern mach_counter_t c_ipc_mqueue_receive_block_user;
+diff --git a/kern/thread.c b/kern/thread.c
+index 36a5a92..f6e3021 100644
+--- a/kern/thread.c
++++ b/kern/thread.c
+@@ -101,8 +101,6 @@ vm_size_t stack_max_usage = 0;
+ * stack_free
+ * stack_handoff
+ * stack_collect
+- * and if MACH_DEBUG:
+- * stack_statistics
+ */
+ #else /* MACHINE_STACK */
+ /*
+@@ -120,16 +118,10 @@ decl_simple_lock_data(, stack_lock_data)/* splsched only */
+ #define stack_lock() simple_lock(&stack_lock_data)
+ #define stack_unlock() simple_unlock(&stack_lock_data)
+
+-vm_offset_t stack_free_list; /* splsched only */
+-unsigned int stack_free_count = 0; /* splsched only */
+-unsigned int stack_free_limit = 1; /* patchable */
+-
+ /*
+- * The next field is at the base of the stack,
+- * so the low end is left unsullied.
++ * We allocate kernel stacks using the slab allocator.
+ */
+-
+-#define stack_next(stack) (*((vm_offset_t *)((stack) + KERNEL_STACK_SIZE) - 1))
++static struct kmem_cache stack_cache;
+
+ /*
+ * stack_alloc_try:
+@@ -144,73 +136,37 @@ boolean_t stack_alloc_try(
+ {
+ vm_offset_t stack;
+
+- stack_lock();
+- stack = stack_free_list;
+- if (stack != 0) {
+- stack_free_list = stack_next(stack);
+- stack_free_count--;
+- } else {
++ stack = kmem_cache_alloc(&stack_cache);
++ assert ((stack & (KERNEL_STACK_SIZE-1)) == 0);
++
++#if MACH_DEBUG
++ if (stack)
++ stack_init(stack);
++#endif /* MACH_DEBUG */
++ if (! stack)
+ stack = thread->stack_privilege;
+- }
+- stack_unlock();
+
+ if (stack != 0) {
+ stack_attach(thread, stack, resume);
+- counter(c_stack_alloc_hits++);
+ return TRUE;
+ } else {
+- counter(c_stack_alloc_misses++);
+ return FALSE;
+ }
+ }
+
+ /*
+- * We allocate kernel stacks using the slab allocator.
+- */
+-static struct kmem_cache stack_cache;
+-
+-/*
+ * stack_alloc:
+ *
+ * Allocate a kernel stack for a thread.
+- * May block.
+ */
+
+ kern_return_t stack_alloc(
+ thread_t thread,
+ void (*resume)(thread_t))
+ {
+- vm_offset_t stack;
+- spl_t s;
+-
+- /*
+- * We first try the free list. It is probably empty,
+- * or stack_alloc_try would have succeeded, but possibly
+- * a stack was freed before the swapin thread got to us.
+- */
+-
+- s = splsched();
+- stack_lock();
+- stack = stack_free_list;
+- if (stack != 0) {
+- stack_free_list = stack_next(stack);
+- stack_free_count--;
+- }
+- stack_unlock();
+- (void) splx(s);
+-
+- if (stack == 0) {
+- stack = kmem_cache_alloc(&stack_cache);
+- assert ((stack & (KERNEL_STACK_SIZE-1)) == 0);
+- if (stack == 0)
+- return KERN_RESOURCE_SHORTAGE;
+-
+-#if MACH_DEBUG
+- stack_init(stack);
+-#endif /* MACH_DEBUG */
+- }
++ if (! stack_alloc_try (thread, resume))
++ return KERN_RESOURCE_SHORTAGE;
+
+- stack_attach(thread, stack, resume);
+ return KERN_SUCCESS;
+ }
+
+@@ -228,17 +184,8 @@ void stack_free(
+
+ stack = stack_detach(thread);
+
+- if (stack != thread->stack_privilege) {
+- stack_lock();
+- stack_next(stack) = stack_free_list;
+- stack_free_list = stack;
+- stack_free_count += 1;
+-#if MACH_COUNTERS
+- if (stack_free_count > c_stack_alloc_max)
+- c_stack_alloc_max = stack_free_count;
+-#endif /* MACH_COUNTERS */
+- stack_unlock();
+- }
++ if (stack != thread->stack_privilege)
++ kmem_cache_free (&stack_cache, stack);
+ }
+
+ /*
+@@ -250,28 +197,6 @@ void stack_free(
+
+ void stack_collect(void)
+ {
+- vm_offset_t stack;
+- spl_t s;
+-
+- s = splsched();
+- stack_lock();
+- while (stack_free_count > stack_free_limit) {
+- stack = stack_free_list;
+- stack_free_list = stack_next(stack);
+- stack_free_count--;
+- stack_unlock();
+- (void) splx(s);
+-
+-#if MACH_DEBUG
+- stack_finalize(stack);
+-#endif /* MACH_DEBUG */
+- kmem_cache_free(&stack_cache, stack);
+-
+- s = splsched();
+- stack_lock();
+- }
+- stack_unlock();
+- (void) splx(s);
+ }
+ #endif /* MACHINE_STACK */
+
+@@ -2382,46 +2307,6 @@ void stack_finalize(
+ }
+ }
+
+-#ifndef MACHINE_STACK
+-/*
+- * stack_statistics:
+- *
+- * Return statistics on cached kernel stacks.
+- * *maxusagep must be initialized by the caller.
+- */
+-
+-void stack_statistics(
+- natural_t *totalp,
+- vm_size_t *maxusagep)
+-{
+- spl_t s;
+-
+- s = splsched();
+- stack_lock();
+- if (stack_check_usage) {
+- vm_offset_t stack;
+-
+- /*
+- * This is pretty expensive to do at splsched,
+- * but it only happens when someone makes
+- * a debugging call, so it should be OK.
+- */
+-
+- for (stack = stack_free_list; stack != 0;
+- stack = stack_next(stack)) {
+- vm_size_t usage = stack_usage(stack);
+-
+- if (usage > *maxusagep)
+- *maxusagep = usage;
+- }
+- }
+-
+- *totalp = stack_free_count;
+- stack_unlock();
+- (void) splx(s);
+-}
+-#endif /* MACHINE_STACK */
+-
+ kern_return_t host_stack_usage(
+ host_t host,
+ vm_size_t *reservedp,
+@@ -2441,8 +2326,6 @@ kern_return_t host_stack_usage(
+ maxusage = stack_max_usage;
+ simple_unlock(&stack_usage_lock);
+
+- stack_statistics(&total, &maxusage);
+-
+ *reservedp = 0;
+ *totalp = total;
+ *spacep = *residentp = total * round_page(KERNEL_STACK_SIZE);
+--
+2.1.4
+