summaryrefslogtreecommitdiff
path: root/libthreads/stack.c
blob: 2cb50d916d4a22f5e17209e089809fa066e83822 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
/* 
 * Mach Operating System
 * Copyright (c) 1991,1990 Carnegie Mellon University
 * All Rights Reserved.
 * 
 * Permission to use, copy, modify and distribute this software and its
 * documentation is hereby granted, provided that both the copyright
 * notice and this permission notice appear in all copies of the
 * software, derivative works or modified versions, and any portions
 * thereof, and that both notices appear in supporting documentation.
 * 
 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
 * 
 * Carnegie Mellon requests users of this software to return to
 * 
 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
 *  School of Computer Science
 *  Carnegie Mellon University
 *  Pittsburgh PA 15213-3890
 * 
 * any improvements or extensions that they make and grant Carnegie Mellon
 * the rights to redistribute these changes.
 */
/*
 * HISTORY
 * $Log:	stack.c,v $
 * Revision 2.13  92/01/14  16:48:54  rpd
 * 	Fixed addr_range_check to deallocate the object port from vm_region.
 * 	[92/01/14            rpd]
 * 
 * Revision 2.12  92/01/03  20:37:10  dbg
 * 	Export cthread_stack_size, and use it if non-zero instead of
 * 	probing the stack.  Fix error in deallocating unused initial
 * 	stack (STACK_GROWTH_UP case).
 * 	[91/08/28            dbg]
 * 
 * Revision 2.11  91/07/31  18:39:34  dbg
 * 	Fix some bad stack references (stack direction).
 * 	[91/07/30  17:36:50  dbg]
 * 
 * Revision 2.10  91/05/14  17:58:49  mrt
 * 	Correcting copyright
 * 
 * Revision 2.9  91/02/14  14:21:08  mrt
 * 	Added new Mach copyright
 * 	[91/02/13  12:41:35  mrt]
 * 
 * Revision 2.8  90/11/05  18:10:46  rpd
 * 	Added cproc_stack_base.  Add stack_fork_child().
 * 	[90/11/01            rwd]
 * 
 * Revision 2.7  90/11/05  14:37:51  rpd
 * 	Fixed addr_range_check for new vm_region semantics.
 * 	[90/11/02            rpd]
 * 
 * Revision 2.6  90/10/12  13:07:34  rpd
 * 	Deal with positively growing stacks.
 * 	[90/10/10            rwd]
 * 	Deal with initial user stacks that are not perfectly aligned.
 * 	[90/09/26  11:51:46  rwd]
 * 
 * 	Leave extra stack page around in case it is needed before we
 *	switch stacks.
 * 	[90/09/25            rwd]
 * 
 * Revision 2.5  90/08/07  14:31:46  rpd
 * 	Removed RCS keyword nonsense.
 * 
 * Revision 2.4  90/06/02  15:14:18  rpd
 * 	Moved cthread_sp to machine-dependent files.
 * 	[90/04/24            rpd]
 * 	Converted to new IPC.
 * 	[90/03/20  20:56:35  rpd]
 * 
 * Revision 2.3  90/01/19  14:37:34  rwd
 * 	Move self pointer to top of stack
 * 	[89/12/12            rwd]
 * 
 * Revision 2.2  89/12/08  19:49:52  rwd
 * 	Back out change from af.
 * 	[89/12/08            rwd]
 * 
 * Revision 2.1.1.3  89/12/06  12:54:17  rwd
 * 	Gap fix from af
 * 	[89/12/06            rwd]
 * 
 * Revision 2.1.1.2  89/11/21  15:01:40  rwd
 * 	Add RED_ZONE ifdef.
 * 	[89/11/20            rwd]
 * 
 * Revision 2.1.1.1  89/10/24  13:00:44  rwd
 * 	Remove conditionals.
 * 	[89/10/23            rwd]
 * 
 * Revision 2.1  89/08/03  17:10:05  rwd
 * Created.
 * 
 * 18-Jan-89  David Golub (dbg) at Carnegie-Mellon University
 *	Altered for stand-alone use:
 *	use vm_region to probe for the bottom of the initial thread's
 *	stack.
 *
 *
 * 01-Dec-87  Eric Cooper (ecc) at Carnegie Mellon University
 *	Changed cthread stack allocation to use aligned stacks
 *	and store self pointer at base of stack.
 *	Added inline expansion for cthread_sp() function.
 */
/*
 * 	File: 	stack.c
 *	Author:	Eric Cooper, Carnegie Mellon University
 *	Date:	Dec, 1987
 *
 * 	C Thread stack allocation.
 *
 */

#include <cthreads.h>
#include "cthread_internals.h"
#include <hurd/threadvar.h>

#define	BYTES_TO_PAGES(b)	(((b) + vm_page_size - 1) / vm_page_size)

int cthread_stack_mask;
vm_size_t cthread_stack_size;
private vm_address_t next_stack_base;

vm_offset_t cproc_stack_base();	/* forward */

/*
 * Set up a stack segment for a thread.
 * Segment has a red zone (invalid page)
 * for early detection of stack overflow.
 * The cproc_self pointer is stored at the top.
 *
 *	--------- (high address)
 *	| self	|
 *	|  ...	|
 *	|	|
 *	| stack	|
 *	|	|
 *	|  ...	|
 *	|	|
 *	---------
 *	|	|
 *	|invalid|
 *	|	|
 *	--------- (stack base)
 *	--------- (low address)
 *
 * or the reverse, if the stack grows up.
 */

private void
setup_stack(p, base)
	register cproc_t p;
	register vm_address_t base;
{
	register kern_return_t r;

	p->stack_base = base;
	/*
	 * Stack size is segment size minus size of self pointer
	 */
	p->stack_size = cthread_stack_size;
	/*
	 * Protect red zone.
	 */
#ifdef RED_ZONE
	MACH_CALL(vm_protect(mach_task_self(), base + vm_page_size, vm_page_size, FALSE, VM_PROT_NONE), r);
#endif RED_ZONE
	/*
	 * Store self pointer.
	 */
	*(cproc_t *)&ur_cthread_ptr(base) = p;
}

vm_offset_t
addr_range_check(start_addr, end_addr, desired_protection)
	vm_offset_t	start_addr, end_addr;
	vm_prot_t	desired_protection;
{
	register vm_offset_t	addr;

	addr = start_addr;
	while (addr < end_addr) {
	    vm_offset_t		r_addr;
	    vm_size_t		r_size;
	    vm_prot_t		r_protection,
				r_max_protection;
	    vm_inherit_t	r_inheritance;
	    boolean_t		r_is_shared;
	    memory_object_name_t	r_object_name;
	    vm_offset_t		r_offset;
	    kern_return_t	kr;

	    r_addr = addr;
	    kr = vm_region(mach_task_self(), &r_addr, &r_size,
			   &r_protection, &r_max_protection, &r_inheritance,
			   &r_is_shared, &r_object_name, &r_offset);
	    if ((kr == KERN_SUCCESS) && MACH_PORT_VALID(r_object_name))
		(void) mach_port_deallocate(mach_task_self(), r_object_name);

	    if ((kr != KERN_SUCCESS) ||
		(r_addr > addr) ||
		((r_protection & desired_protection) != desired_protection))
		return (0);
	    addr = r_addr + r_size;
	}
	return (addr);
}

/*
 * Probe for bottom and top of stack.
 * Assume:
 * 1. stack grows DOWN
 * 2. There is an unallocated region below the stack.
 */
void
probe_stack(stack_bottom, stack_top)
	vm_offset_t	*stack_bottom;
	vm_offset_t	*stack_top;
{
	/*
	 * Since vm_region returns the region starting at
	 * or ABOVE the given address, we cannot use it
	 * directly to search downwards.  However, we
	 * also want a size that is the closest power of
	 * 2 to the stack size (so we can mask off the stack
	 * address and get the stack base).  So we probe
	 * in increasing powers of 2 until we find a gap
	 * in the stack.
	 */
	vm_offset_t	start_addr, end_addr;
	vm_offset_t	last_start_addr, last_end_addr;
	vm_size_t	stack_size;

	/*
	 * Start with a page
	 */
	start_addr = cthread_sp() & ~(vm_page_size - 1);
	end_addr   = start_addr + vm_page_size;

	stack_size = vm_page_size;

	/*
	 * Increase the tentative stack size, by doubling each
	 * time, until we have exceeded the stack (some of the
	 * range is not valid).
	 */
	do {
	    /*
	     * Save last addresses
	     */
	    last_start_addr = start_addr;
	    last_end_addr   = end_addr;

	    /*
	     * Double the stack size
	     */
	    stack_size <<= 1;
	    start_addr = end_addr - stack_size;

	    /*
	     * Check that the entire range exists and is writable
	     */
	} while (end_addr = (addr_range_check(start_addr,
				  end_addr,
				  VM_PROT_READ|VM_PROT_WRITE)));
	/*
	 * Back off to previous power of 2.
	 */
	*stack_bottom = last_start_addr;
	*stack_top = last_end_addr;
}

/* For GNU: */
unsigned long int __hurd_threadvar_stack_mask;
unsigned long int __hurd_threadvar_stack_offset;

vm_offset_t
stack_init(p)
	cproc_t p;
{
	vm_offset_t	stack_bottom,
			stack_top,
			start;
	vm_size_t	size;
	kern_return_t	r;

	void alloc_stack();

	/*
	 * Probe for bottom and top of stack, as a power-of-2 size.
	 */
	probe_stack(&stack_bottom, &stack_top);

	/*
	 * Use the stack size found for the Cthread stack size,
	 * if not already specified.
	 */
	if (cthread_stack_size == 0)
	    cthread_stack_size = stack_top - stack_bottom;
#ifdef	STACK_GROWTH_UP
	cthread_stack_mask = ~(cthread_stack_size - 1);
#else	STACK_GROWTH_UP
	cthread_stack_mask = cthread_stack_size - 1;
#endif	STACK_GROWTH_UP

	/* Set up the variables so GNU can find its per-thread variables.  */
	__hurd_threadvar_stack_mask = cthread_stack_mask;

	/*
	 * Guess at first available region for stack.
	 */
	next_stack_base = 0;

	/*
	 * Set up stack for main thread.
	 */
	alloc_stack(p);

	/*
	 * Delete rest of old stack.
	 */

#ifdef	STACK_GROWTH_UP
	start = (cthread_sp() | (vm_page_size - 1)) + 1 + vm_page_size;
	size = stack_top - start;
#else	STACK_GROWTH_UP
	start = stack_bottom;
	size = (cthread_sp() & ~(vm_page_size - 1)) - stack_bottom - 
	       vm_page_size;
#endif	STACK_GROWTH_UP
	MACH_CALL(vm_deallocate(mach_task_self(),start,size),r);

	/* The GNU per-thread variables will be stored just after the
	   cthread-self pointer at the base of the stack.  */
	__hurd_threadvar_stack_offset = sizeof (ur_cthread_t *);

	/*
	 * Return new stack; it gets passed back to the caller
	 * of cthread_init who must switch to it.
	 */
	return cproc_stack_base(p,
				sizeof(ur_cthread_t *) +
				/* Account for GNU per-thread variables.  */
				__hurd_threadvar_max * sizeof (long int));
}

/*
 * Allocate a stack segment for a thread.
 * Stacks are never deallocated.
 *
 * The variable next_stack_base is used to align stacks.
 * It may be updated by several threads in parallel,
 * but mutual exclusion is unnecessary: at worst,
 * the vm_allocate will fail and the thread will try again.
 */

void
alloc_stack(p)
	cproc_t p;
{
	vm_address_t base = next_stack_base;

	for (base = next_stack_base;
	     vm_allocate(mach_task_self(), &base, cthread_stack_size, FALSE) != KERN_SUCCESS;
	     base += cthread_stack_size)
		;
	next_stack_base = base + cthread_stack_size;
	setup_stack(p, base);
}

vm_offset_t
cproc_stack_base(cproc, offset)
	register cproc_t cproc;
	register int offset;
{
#ifdef	STACK_GROWTH_UP
	return (cproc->stack_base + offset);
#else	STACK_GROWTH_UP
	return (cproc->stack_base + cproc->stack_size - offset);
#endif	STACK_GROWTH_UP

}

void stack_fork_child()
/*
 * Called in the child after a fork().  Resets stack data structures to
 * coincide with the reality that we now have a single cproc and cthread.
 */
{
    next_stack_base = 0;
}