summaryrefslogtreecommitdiff
path: root/debian/patches/upstreamme0010-kern-remove-the-list-of-free-stacks.patch
blob: 5e87b157032aa3a0844f2ec129ac0abd804cccb1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
From 944e8baf11a1a89c9516a47492b3c8eadb15d11a Mon Sep 17 00:00:00 2001
From: Justus Winter <4winter@informatik.uni-hamburg.de>
Date: Fri, 3 Jul 2015 12:41:48 +0200
Subject: [PATCH gnumach 10/10] kern: remove the list of free stacks

* kern/counters.c: Remove relevant counters.
* kern/counters.h: Likewise.
* kern/thread.c (stack_free_{list,count,limit}): Drop variables.
(stack_next): Remove macro.
(stack_alloc_try): Allocate stack using the slab allocator.
(stack_alloc): Merely call `stack_alloc_try'.
(stack_free): Adopt accordingly.
---
 kern/counters.c |   3 --
 kern/counters.h |   3 --
 kern/thread.c   | 145 ++++++--------------------------------------------------
 3 files changed, 14 insertions(+), 137 deletions(-)

diff --git a/kern/counters.c b/kern/counters.c
index 0a0665b..74fd42d 100644
--- a/kern/counters.c
+++ b/kern/counters.c
@@ -46,9 +46,6 @@ mach_counter_t c_stacks_current = 0;
 mach_counter_t c_stacks_max = 0;
 mach_counter_t c_stacks_min = 0;
 mach_counter_t c_stacks_total = 0;
-mach_counter_t c_stack_alloc_hits = 0;
-mach_counter_t c_stack_alloc_misses = 0;
-mach_counter_t c_stack_alloc_max = 0;
 mach_counter_t c_clock_ticks = 0;
 mach_counter_t c_ipc_mqueue_send_block = 0;
 mach_counter_t c_ipc_mqueue_receive_block_user = 0;
diff --git a/kern/counters.h b/kern/counters.h
index aa1e739..bfa9b44 100644
--- a/kern/counters.h
+++ b/kern/counters.h
@@ -69,9 +69,6 @@ extern mach_counter_t c_stacks_current;
 extern mach_counter_t c_stacks_max;
 extern mach_counter_t c_stacks_min;
 extern mach_counter_t c_stacks_total;
-extern mach_counter_t c_stack_alloc_hits;
-extern mach_counter_t c_stack_alloc_misses;
-extern mach_counter_t c_stack_alloc_max;
 extern mach_counter_t c_clock_ticks;
 extern mach_counter_t c_ipc_mqueue_send_block;
 extern mach_counter_t c_ipc_mqueue_receive_block_user;
diff --git a/kern/thread.c b/kern/thread.c
index 36a5a92..f6e3021 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -101,8 +101,6 @@ vm_size_t		stack_max_usage = 0;
  *		stack_free
  *		stack_handoff
  *		stack_collect
- *	and if MACH_DEBUG:
- *		stack_statistics
  */
 #else	/* MACHINE_STACK */
 /*
@@ -120,16 +118,10 @@ decl_simple_lock_data(, stack_lock_data)/* splsched only */
 #define stack_lock()	simple_lock(&stack_lock_data)
 #define stack_unlock()	simple_unlock(&stack_lock_data)
 
-vm_offset_t stack_free_list;		/* splsched only */
-unsigned int stack_free_count = 0;	/* splsched only */
-unsigned int stack_free_limit = 1;	/* patchable */
-
 /*
- *	The next field is at the base of the stack,
- *	so the low end is left unsullied.
+ * We allocate kernel stacks using the slab allocator.
  */
-
-#define stack_next(stack) (*((vm_offset_t *)((stack) + KERNEL_STACK_SIZE) - 1))
+static struct kmem_cache stack_cache;
 
 /*
  *	stack_alloc_try:
@@ -144,73 +136,37 @@ boolean_t stack_alloc_try(
 {
 	vm_offset_t stack;
 
-	stack_lock();
-	stack = stack_free_list;
-	if (stack != 0) {
-		stack_free_list = stack_next(stack);
-		stack_free_count--;
-	} else {
+	stack = kmem_cache_alloc(&stack_cache);
+	assert ((stack & (KERNEL_STACK_SIZE-1)) == 0);
+
+#if	MACH_DEBUG
+	if (stack)
+		stack_init(stack);
+#endif	/* MACH_DEBUG */
+	if (! stack)
 		stack = thread->stack_privilege;
-	}
-	stack_unlock();
 
 	if (stack != 0) {
 		stack_attach(thread, stack, resume);
-		counter(c_stack_alloc_hits++);
 		return TRUE;
 	} else {
-		counter(c_stack_alloc_misses++);
 		return FALSE;
 	}
 }
 
 /*
- * We allocate kernel stacks using the slab allocator.
- */
-static struct kmem_cache stack_cache;
-
-/*
  *	stack_alloc:
  *
  *	Allocate a kernel stack for a thread.
- *	May block.
  */
 
 kern_return_t stack_alloc(
 	thread_t	thread,
 	void		(*resume)(thread_t))
 {
-	vm_offset_t stack;
-	spl_t s;
-
-	/*
-	 *	We first try the free list.  It is probably empty,
-	 *	or stack_alloc_try would have succeeded, but possibly
-	 *	a stack was freed before the swapin thread got to us.
-	 */
-
-	s = splsched();
-	stack_lock();
-	stack = stack_free_list;
-	if (stack != 0) {
-		stack_free_list = stack_next(stack);
-		stack_free_count--;
-	}
-	stack_unlock();
-	(void) splx(s);
-
-	if (stack == 0) {
-		stack = kmem_cache_alloc(&stack_cache);
-		assert ((stack & (KERNEL_STACK_SIZE-1)) == 0);
-		if (stack == 0)
-			return KERN_RESOURCE_SHORTAGE;
-
-#if	MACH_DEBUG
-		stack_init(stack);
-#endif	/* MACH_DEBUG */
-	}
+	if (! stack_alloc_try (thread, resume))
+		return KERN_RESOURCE_SHORTAGE;
 
-	stack_attach(thread, stack, resume);
 	return KERN_SUCCESS;
 }
 
@@ -228,17 +184,8 @@ void stack_free(
 
 	stack = stack_detach(thread);
 
-	if (stack != thread->stack_privilege) {
-		stack_lock();
-		stack_next(stack) = stack_free_list;
-		stack_free_list = stack;
-		stack_free_count += 1;
-#if	MACH_COUNTERS
-		if (stack_free_count > c_stack_alloc_max)
-			c_stack_alloc_max = stack_free_count;
-#endif	/* MACH_COUNTERS */
-		stack_unlock();
-	}
+	if (stack != thread->stack_privilege)
+		kmem_cache_free (&stack_cache, stack);
 }
 
 /*
@@ -250,28 +197,6 @@ void stack_free(
 
 void stack_collect(void)
 {
-	vm_offset_t stack;
-	spl_t s;
-
-	s = splsched();
-	stack_lock();
-	while (stack_free_count > stack_free_limit) {
-		stack = stack_free_list;
-		stack_free_list = stack_next(stack);
-		stack_free_count--;
-		stack_unlock();
-		(void) splx(s);
-
-#if	MACH_DEBUG
-		stack_finalize(stack);
-#endif	/* MACH_DEBUG */
-		kmem_cache_free(&stack_cache, stack);
-
-		s = splsched();
-		stack_lock();
-	}
-	stack_unlock();
-	(void) splx(s);
 }
 #endif	/* MACHINE_STACK */
 
@@ -2382,46 +2307,6 @@ void stack_finalize(
 	}
 }
 
-#ifndef	MACHINE_STACK
-/*
- *	stack_statistics:
- *
- *	Return statistics on cached kernel stacks.
- *	*maxusagep must be initialized by the caller.
- */
-
-void stack_statistics(
-	natural_t *totalp,
-	vm_size_t *maxusagep)
-{
-	spl_t	s;
-
-	s = splsched();
-	stack_lock();
-	if (stack_check_usage) {
-		vm_offset_t stack;
-
-		/*
-		 *	This is pretty expensive to do at splsched,
-		 *	but it only happens when someone makes
-		 *	a debugging call, so it should be OK.
-		 */
-
-		for (stack = stack_free_list; stack != 0;
-		     stack = stack_next(stack)) {
-			vm_size_t usage = stack_usage(stack);
-
-			if (usage > *maxusagep)
-				*maxusagep = usage;
-		}
-	}
-
-	*totalp = stack_free_count;
-	stack_unlock();
-	(void) splx(s);
-}
-#endif	/* MACHINE_STACK */
-
 kern_return_t host_stack_usage(
 	host_t		host,
 	vm_size_t	*reservedp,
@@ -2441,8 +2326,6 @@ kern_return_t host_stack_usage(
 	maxusage = stack_max_usage;
 	simple_unlock(&stack_usage_lock);
 
-	stack_statistics(&total, &maxusage);
-
 	*reservedp = 0;
 	*totalp = total;
 	*spacep = *residentp = total * round_page(KERNEL_STACK_SIZE);
-- 
2.1.4