Bug Summary

File:obj-scan-build/../kern/thread.c
Location:line 1674, column 2
Description:Function call argument is an uninitialized value

Annotated Source Code

1/*
2 * Mach Operating System
3 * Copyright (c) 1994-1987 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26/*
27 * File: kern/thread.c
28 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub
29 * Date: 1986
30 *
31 * Thread management primitives implementation.
32 */
33
34#include <kern/printf.h>
35#include <mach/std_types.h>
36#include <mach/policy.h>
37#include <mach/thread_info.h>
38#include <mach/thread_special_ports.h>
39#include <mach/thread_status.h>
40#include <mach/time_value.h>
41#include <machine/vm_param.h>
42#include <kern/ast.h>
43#include <kern/counters.h>
44#include <kern/debug.h>
45#include <kern/eventcount.h>
46#include <kern/ipc_mig.h>
47#include <kern/ipc_tt.h>
48#include <kern/processor.h>
49#include <kern/queue.h>
50#include <kern/sched.h>
51#include <kern/sched_prim.h>
52#include <kern/syscall_subr.h>
53#include <kern/thread.h>
54#include <kern/thread_swap.h>
55#include <kern/host.h>
56#include <kern/kalloc.h>
57#include <kern/slab.h>
58#include <kern/mach_clock.h>
59#include <vm/vm_kern.h>
60#include <vm/vm_user.h>
61#include <ipc/ipc_kmsg.h>
62#include <ipc/ipc_port.h>
63#include <ipc/mach_msg.h>
64#include <ipc/mach_port.h>
65#include <machine/machspl.h> /* for splsched */
66#include <machine/pcb.h>
67#include <machine/thread.h> /* for MACHINE_STACK */
68
69thread_t active_threads[NCPUS1];
70vm_offset_t active_stacks[NCPUS1];
71
72struct kmem_cache thread_cache;
73
74queue_head_t reaper_queue;
75decl_simple_lock_data(, reaper_lock)struct simple_lock_data_empty reaper_lock;
76
77/* private */
78struct thread thread_template;
79
80#if MACH_DEBUG1
81#define STACK_MARKER0xdeadbeefU 0xdeadbeefU
82boolean_t stack_check_usage = FALSE((boolean_t) 0);
83decl_simple_lock_data(, stack_usage_lock)struct simple_lock_data_empty stack_usage_lock;
84vm_size_t stack_max_usage = 0;
85#endif /* MACH_DEBUG */
86
87/*
88 * Machine-dependent code must define:
89 * pcb_init
90 * pcb_terminate
91 * pcb_collect
92 *
93 * The thread->pcb field is reserved for machine-dependent code.
94 */
95
96#ifdef MACHINE_STACK
97/*
98 * Machine-dependent code must define:
99 * stack_alloc_try
100 * stack_alloc
101 * stack_free
102 * stack_handoff
103 * stack_collect
104 * and if MACH_DEBUG:
105 * stack_statistics
106 */
107#else /* MACHINE_STACK */
108/*
109 * We allocate stacks from generic kernel VM.
110 * Machine-dependent code must define:
111 * stack_attach
112 * stack_detach
113 * stack_handoff
114 *
115 * The stack_free_list can only be accessed at splsched,
116 * because stack_alloc_try/thread_invoke operate at splsched.
117 */
118
119decl_simple_lock_data(, stack_lock_data)struct simple_lock_data_empty stack_lock_data;/* splsched only */
120#define stack_lock() simple_lock(&stack_lock_data)
121#define stack_unlock()((void)(&stack_lock_data)) simple_unlock(&stack_lock_data)((void)(&stack_lock_data))
122
123vm_offset_t stack_free_list; /* splsched only */
124unsigned int stack_free_count = 0; /* splsched only */
125unsigned int stack_free_limit = 1; /* patchable */
126
127unsigned int stack_alloc_hits = 0; /* debugging */
128unsigned int stack_alloc_misses = 0; /* debugging */
129unsigned int stack_alloc_max = 0; /* debugging */
130
131/*
132 * The next field is at the base of the stack,
133 * so the low end is left unsullied.
134 */
135
136#define stack_next(stack)(*((vm_offset_t *)((stack) + (1*4096)) - 1)) (*((vm_offset_t *)((stack) + KERNEL_STACK_SIZE(1*4096)) - 1))
137
138/*
139 * stack_alloc_try:
140 *
141 * Non-blocking attempt to allocate a kernel stack.
142 * Called at splsched with the thread locked.
143 */
144
145boolean_t stack_alloc_try(
146 thread_t thread,
147 void (*resume)(thread_t))
148{
149 vm_offset_t stack;
150
151 stack_lock();
152 stack = stack_free_list;
153 if (stack != 0) {
154 stack_free_list = stack_next(stack)(*((vm_offset_t *)((stack) + (1*4096)) - 1));
155 stack_free_count--;
156 } else {
157 stack = thread->stack_privilege;
158 }
159 stack_unlock()((void)(&stack_lock_data));
160
161 if (stack != 0) {
162 stack_attach(thread, stack, resume);
163 stack_alloc_hits++;
164 return TRUE((boolean_t) 1);
165 } else {
166 stack_alloc_misses++;
167 return FALSE((boolean_t) 0);
168 }
169}
170
171/*
172 * stack_alloc:
173 *
174 * Allocate a kernel stack for a thread.
175 * May block.
176 */
177
178void stack_alloc(
179 thread_t thread,
180 void (*resume)(thread_t))
181{
182 vm_offset_t stack;
183 spl_t s;
184
185 /*
186 * We first try the free list. It is probably empty,
187 * or stack_alloc_try would have succeeded, but possibly
188 * a stack was freed before the swapin thread got to us.
189 */
190
191 s = splsched();
192 stack_lock();
193 stack = stack_free_list;
194 if (stack != 0) {
195 stack_free_list = stack_next(stack)(*((vm_offset_t *)((stack) + (1*4096)) - 1));
196 stack_free_count--;
197 }
198 stack_unlock()((void)(&stack_lock_data));
199 (void) splx(s);
200
201 if (stack == 0) {
202 /*
203 * Kernel stacks should be naturally aligned,
204 * so that it is easy to find the starting/ending
205 * addresses of a stack given an address in the middle.
206 */
207
208 if (kmem_alloc_aligned(kmem_map, &stack, KERNEL_STACK_SIZE(1*4096))
209 != KERN_SUCCESS0)
210 panic("stack_alloc");
211
212#if MACH_DEBUG1
213 stack_init(stack);
214#endif /* MACH_DEBUG */
215 }
216
217 stack_attach(thread, stack, resume);
218}
219
220/*
221 * stack_free:
222 *
223 * Free a thread's kernel stack.
224 * Called at splsched with the thread locked.
225 */
226
227void stack_free(
228 thread_t thread)
229{
230 vm_offset_t stack;
231
232 stack = stack_detach(thread);
233
234 if (stack != thread->stack_privilege) {
235 stack_lock();
236 stack_next(stack)(*((vm_offset_t *)((stack) + (1*4096)) - 1)) = stack_free_list;
237 stack_free_list = stack;
238 if (++stack_free_count > stack_alloc_max)
239 stack_alloc_max = stack_free_count;
240 stack_unlock()((void)(&stack_lock_data));
241 }
242}
243
244/*
245 * stack_collect:
246 *
247 * Free excess kernel stacks.
248 * May block.
249 */
250
251void stack_collect(void)
252{
253 vm_offset_t stack;
254 spl_t s;
255
256 s = splsched();
257 stack_lock();
258 while (stack_free_count > stack_free_limit) {
259 stack = stack_free_list;
260 stack_free_list = stack_next(stack)(*((vm_offset_t *)((stack) + (1*4096)) - 1));
261 stack_free_count--;
262 stack_unlock()((void)(&stack_lock_data));
263 (void) splx(s);
264
265#if MACH_DEBUG1
266 stack_finalize(stack);
267#endif /* MACH_DEBUG */
268 kmem_free(kmem_map, stack, KERNEL_STACK_SIZE(1*4096));
269
270 s = splsched();
271 stack_lock();
272 }
273 stack_unlock()((void)(&stack_lock_data));
274 (void) splx(s);
275}
276#endif /* MACHINE_STACK */
277
278/*
279 * stack_privilege:
280 *
281 * stack_alloc_try on this thread must always succeed.
282 */
283
284void stack_privilege(
285 thread_t thread)
286{
287 /*
288 * This implementation only works for the current thread.
289 */
290
291 if (thread != current_thread()(active_threads[(0)]))
292 panic("stack_privilege");
293
294 if (thread->stack_privilege == 0)
295 thread->stack_privilege = current_stack()(active_stacks[(0)]);
296}
297
298void thread_init(void)
299{
300 kmem_cache_init(&thread_cache, "thread", sizeof(struct thread), 0,
301 NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0);
302
303 /*
304 * Fill in a template thread for fast initialization.
305 * [Fields that must be (or are typically) reset at
306 * time of creation are so noted.]
307 */
308
309 /* thread_template.links (none) */
310 thread_template.runq = RUN_QUEUE_NULL((run_queue_t) 0);
311
312 /* thread_template.task (later) */
313 /* thread_template.thread_list (later) */
314 /* thread_template.pset_threads (later) */
315
316 /* thread_template.lock (later) */
317 /* one ref for being alive; one for the guy who creates the thread */
318 thread_template.ref_count = 2;
319
320 thread_template.pcb = (pcb_t) 0; /* (reset) */
321 thread_template.kernel_stack = (vm_offset_t) 0;
322 thread_template.stack_privilege = (vm_offset_t) 0;
323
324 thread_template.wait_event = 0;
325 /* thread_template.suspend_count (later) */
326 thread_template.wait_result = KERN_SUCCESS0;
327 thread_template.wake_active = FALSE((boolean_t) 0);
328 thread_template.state = TH_SUSP0x02 | TH_SWAPPED0x0100;
329 thread_template.swap_func = thread_bootstrap_return;
330
331/* thread_template.priority (later) */
332 thread_template.max_priority = BASEPRI_USER25;
333/* thread_template.sched_pri (later - compute_priority) */
334#if MACH_FIXPRI1
335 thread_template.sched_data = 0;
336 thread_template.policy = POLICY_TIMESHARE1;
337#endif /* MACH_FIXPRI */
338 thread_template.depress_priority = -1;
339 thread_template.cpu_usage = 0;
340 thread_template.sched_usage = 0;
341 /* thread_template.sched_stamp (later) */
342
343 thread_template.recover = (vm_offset_t) 0;
344 thread_template.vm_privilege = FALSE((boolean_t) 0);
345
346 thread_template.user_stop_count = 1;
347
348 /* thread_template.<IPC structures> (later) */
349
350 timer_init(&(thread_template.user_timer));
351 timer_init(&(thread_template.system_timer));
352 thread_template.user_timer_save.low = 0;
353 thread_template.user_timer_save.high = 0;
354 thread_template.system_timer_save.low = 0;
355 thread_template.system_timer_save.high = 0;
356 thread_template.cpu_delta = 0;
357 thread_template.sched_delta = 0;
358
359 thread_template.active = FALSE((boolean_t) 0); /* reset */
360 thread_template.ast = AST_ZILCH0x0;
361
362 /* thread_template.processor_set (later) */
363 thread_template.bound_processor = PROCESSOR_NULL((processor_t) 0);
364#if MACH_HOST0
365 thread_template.may_assign = TRUE((boolean_t) 1);
366 thread_template.assign_active = FALSE((boolean_t) 0);
367#endif /* MACH_HOST */
368
369#if NCPUS1 > 1
370 /* thread_template.last_processor (later) */
371#endif /* NCPUS > 1 */
372
373 /*
374 * Initialize other data structures used in
375 * this module.
376 */
377
378 queue_init(&reaper_queue)((&reaper_queue)->next = (&reaper_queue)->prev =
&reaper_queue)
;
379 simple_lock_init(&reaper_lock);
380
381#ifndef MACHINE_STACK
382 simple_lock_init(&stack_lock_data);
383#endif /* MACHINE_STACK */
384
385#if MACH_DEBUG1
386 simple_lock_init(&stack_usage_lock);
387#endif /* MACH_DEBUG */
388
389 /*
390 * Initialize any machine-dependent
391 * per-thread structures necessary.
392 */
393
394 pcb_module_init();
395}
396
397kern_return_t thread_create(
398 task_t parent_task,
399 thread_t *child_thread) /* OUT */
400{
401 thread_t new_thread;
402 processor_set_t pset;
403
404 if (parent_task == TASK_NULL((task_t) 0))
3
Assuming 'parent_task' is not equal to null
4
Taking false branch
405 return KERN_INVALID_ARGUMENT4;
406
407 /*
408 * Allocate a thread and initialize static fields
409 */
410
411 new_thread = (thread_t) kmem_cache_alloc(&thread_cache);
412
413 if (new_thread == THREAD_NULL((thread_t) 0))
5
Assuming 'new_thread' is not equal to null
6
Taking false branch
414 return KERN_RESOURCE_SHORTAGE6;
415
416 *new_thread = thread_template;
417
418 record_time_stamp (&new_thread->creation_time);
419
420 /*
421 * Initialize runtime-dependent fields
422 */
423
424 new_thread->task = parent_task;
425 simple_lock_init(&new_thread->lock);
426 new_thread->sched_stamp = sched_tick;
427 thread_timeout_setup(new_thread);
428
429 /*
430 * Create a pcb. The kernel stack is created later,
431 * when the thread is swapped-in.
432 */
433 pcb_init(new_thread);
434
435 ipc_thread_init(new_thread);
436
437 /*
438 * Find the processor set for the parent task.
439 */
440 task_lock(parent_task);
441 pset = parent_task->processor_set;
442 pset_reference(pset);
443 task_unlock(parent_task)((void)(&(parent_task)->lock));
444
445 /*
446 * Lock both the processor set and the task,
447 * so that the thread can be added to both
448 * simultaneously. Processor set must be
449 * locked first.
450 */
451
452 Restart:
453 pset_lock(pset);
454 task_lock(parent_task);
455
456 /*
457 * If the task has changed processor sets,
458 * catch up (involves lots of lock juggling).
459 */
460 {
461 processor_set_t cur_pset;
462
463 cur_pset = parent_task->processor_set;
464 if (!cur_pset->active)
7
Taking false branch
465 cur_pset = &default_pset;
466
467 if (cur_pset != pset) {
8
Taking false branch
468 pset_reference(cur_pset);
469 task_unlock(parent_task)((void)(&(parent_task)->lock));
470 pset_unlock(pset)((void)(&(pset)->lock));
471 pset_deallocate(pset);
472 pset = cur_pset;
473 goto Restart;
474 }
475 }
476
477 /*
478 * Set the thread`s priority from the pset and task.
479 */
480
481 new_thread->priority = parent_task->priority;
482 if (pset->max_priority > new_thread->max_priority)
9
Taking false branch
483 new_thread->max_priority = pset->max_priority;
484 if (new_thread->max_priority > new_thread->priority)
10
Taking false branch
485 new_thread->priority = new_thread->max_priority;
486 /*
487 * Don't need to lock thread here because it can't
488 * possibly execute and no one else knows about it.
489 */
490 compute_priority(new_thread, TRUE((boolean_t) 1));
491
492 /*
493 * Thread is suspended if the task is. Add 1 to
494 * suspend count since thread is created in suspended
495 * state.
496 */
497 new_thread->suspend_count = parent_task->suspend_count + 1;
498
499 /*
500 * Add the thread to the processor set.
501 * If the pset is empty, suspend the thread again.
502 */
503
504 pset_add_thread(pset, new_thread);
505 if (pset->empty)
11
Taking false branch
506 new_thread->suspend_count++;
507
508#if HW_FOOTPRINT0
509 /*
510 * Need to set last_processor, idle processor would be best, but
511 * that requires extra locking nonsense. Go for tail of
512 * processors queue to avoid master.
513 */
514 if (!pset->empty) {
515 new_thread->last_processor =
516 (processor_t)queue_first(&pset->processors)((&pset->processors)->next);
517 }
518 else {
519 /*
520 * Thread created in empty processor set. Pick
521 * master processor as an acceptable legal value.
522 */
523 new_thread->last_processor = master_processor;
524 }
525#else /* HW_FOOTPRINT */
526 /*
527 * Don't need to initialize because the context switch
528 * code will set it before it can be used.
529 */
530#endif /* HW_FOOTPRINT */
531
532#if MACH_PCSAMPLE1
533 new_thread->pc_sample.seqno = 0;
534 new_thread->pc_sample.sampletypes = 0;
535#endif /* MACH_PCSAMPLE */
536
537 new_thread->pc_sample.buffer = 0;
538 /*
539 * Add the thread to the task`s list of threads.
540 * The new thread holds another reference to the task.
541 */
542
543 parent_task->ref_count++;
544
545 parent_task->thread_count++;
546 queue_enter(&parent_task->thread_list, new_thread, thread_t,{ queue_entry_t prev; prev = (&parent_task->thread_list
)->prev; if ((&parent_task->thread_list) == prev) {
(&parent_task->thread_list)->next = (queue_entry_t
) (new_thread); } else { ((thread_t)prev)->thread_list.next
= (queue_entry_t)(new_thread); } (new_thread)->thread_list
.prev = prev; (new_thread)->thread_list.next = &parent_task
->thread_list; (&parent_task->thread_list)->prev
= (queue_entry_t) new_thread; }
12
Within the expansion of the macro 'queue_enter':
547 thread_list){ queue_entry_t prev; prev = (&parent_task->thread_list
)->prev; if ((&parent_task->thread_list) == prev) {
(&parent_task->thread_list)->next = (queue_entry_t
) (new_thread); } else { ((thread_t)prev)->thread_list.next
= (queue_entry_t)(new_thread); } (new_thread)->thread_list
.prev = prev; (new_thread)->thread_list.next = &parent_task
->thread_list; (&parent_task->thread_list)->prev
= (queue_entry_t) new_thread; }
;
548
549 /*
550 * Finally, mark the thread active.
551 */
552
553 new_thread->active = TRUE((boolean_t) 1);
554
555 if (!parent_task->active) {
13
Taking true branch
556 task_unlock(parent_task)((void)(&(parent_task)->lock));
557 pset_unlock(pset)((void)(&(pset)->lock));
558 (void) thread_terminate(new_thread);
14
Calling 'thread_terminate'
24
Returning from 'thread_terminate'
559 /* release ref we would have given our caller */
560 thread_deallocate(new_thread);
25
Calling 'thread_deallocate'
28
Returning from 'thread_deallocate'
561 return KERN_FAILURE5;
562 }
563 task_unlock(parent_task)((void)(&(parent_task)->lock));
564 pset_unlock(pset)((void)(&(pset)->lock));
565
566 ipc_thread_enable(new_thread);
567
568 *child_thread = new_thread;
569 return KERN_SUCCESS0;
570}
571
572unsigned int thread_deallocate_stack = 0;
573
574void thread_deallocate(
575 thread_t thread)
576{
577 spl_t s;
578 task_t task;
579 processor_set_t pset;
580
581 time_value_t user_time, system_time;
582
583 if (thread == THREAD_NULL((thread_t) 0))
21
Taking false branch
26
Taking false branch
584 return;
585
586 /*
587 * First, check for new count > 0 (the common case).
588 * Only the thread needs to be locked.
589 */
590 s = splsched();
591 thread_lock(thread);
592 if (--thread->ref_count > 0) {
22
Taking true branch
27
Taking true branch
593 thread_unlock(thread)((void)(&(thread)->lock));
594 (void) splx(s);
595 return;
596 }
597
598 /*
599 * Count is zero. However, the task's and processor set's
600 * thread lists have implicit references to
601 * the thread, and may make new ones. Their locks also
602 * dominate the thread lock. To check for this, we
603 * temporarily restore the one thread reference, unlock
604 * the thread, and then lock the other structures in
605 * the proper order.
606 */
607 thread->ref_count = 1;
608 thread_unlock(thread)((void)(&(thread)->lock));
609 (void) splx(s);
610
611 pset = thread->processor_set;
612 pset_lock(pset);
613
614#if MACH_HOST0
615 /*
616 * The thread might have moved.
617 */
618 while (pset != thread->processor_set) {
619 pset_unlock(pset)((void)(&(pset)->lock));
620 pset = thread->processor_set;
621 pset_lock(pset);
622 }
623#endif /* MACH_HOST */
624
625 task = thread->task;
626 task_lock(task);
627
628 s = splsched();
629 thread_lock(thread);
630
631 if (--thread->ref_count > 0) {
632 /*
633 * Task or processor_set made extra reference.
634 */
635 thread_unlock(thread)((void)(&(thread)->lock));
636 (void) splx(s);
637 task_unlock(task)((void)(&(task)->lock));
638 pset_unlock(pset)((void)(&(pset)->lock));
639 return;
640 }
641
642 /*
643 * Thread has no references - we can remove it.
644 */
645
646 /*
647 * Remove pending timeouts.
648 */
649 reset_timeout_check(&thread->timer)({ if ((&thread->timer)->set) reset_timeout((&thread
->timer)); })
;
650
651 reset_timeout_check(&thread->depress_timer)({ if ((&thread->depress_timer)->set) reset_timeout
((&thread->depress_timer)); })
;
652 thread->depress_priority = -1;
653
654 /*
655 * Accumulate times for dead threads in task.
656 */
657 thread_read_times(thread, &user_time, &system_time);
658 time_value_add(&task->total_user_time, &user_time){ (&task->total_user_time)->microseconds += (&user_time
)->microseconds; (&task->total_user_time)->seconds
+= (&user_time)->seconds; if ((&task->total_user_time
)->microseconds >= (1000000)) { (&task->total_user_time
)->microseconds -= (1000000); (&task->total_user_time
)->seconds++; } }
;
659 time_value_add(&task->total_system_time, &system_time){ (&task->total_system_time)->microseconds += (&
system_time)->microseconds; (&task->total_system_time
)->seconds += (&system_time)->seconds; if ((&task
->total_system_time)->microseconds >= (1000000)) { (
&task->total_system_time)->microseconds -= (1000000
); (&task->total_system_time)->seconds++; } }
;
660
661 /*
662 * Remove thread from task list and processor_set threads list.
663 */
664 task->thread_count--;
665 queue_remove(&task->thread_list, thread, thread_t, thread_list){ queue_entry_t next, prev; next = (thread)->thread_list.next
; prev = (thread)->thread_list.prev; if ((&task->thread_list
) == next) (&task->thread_list)->prev = prev; else (
(thread_t)next)->thread_list.prev = prev; if ((&task->
thread_list) == prev) (&task->thread_list)->next = next
; else ((thread_t)prev)->thread_list.next = next; }
;
666
667 pset_remove_thread(pset, thread);
668
669 thread_unlock(thread)((void)(&(thread)->lock)); /* no more references - safe */
670 (void) splx(s);
671 task_unlock(task)((void)(&(task)->lock));
672 pset_unlock(pset)((void)(&(pset)->lock));
673 pset_deallocate(pset);
674
675 /*
676 * A couple of quick sanity checks
677 */
678
679 if (thread == current_thread()(active_threads[(0)])) {
680 panic("thread deallocating itself");
681 }
682 if ((thread->state & ~(TH_RUN0x04 | TH_HALTED0x10 | TH_SWAPPED0x0100)) != TH_SUSP0x02)
683 panic("unstopped thread destroyed!");
684
685 /*
686 * Deallocate the task reference, since we know the thread
687 * is not running.
688 */
689 task_deallocate(thread->task); /* may block */
690
691 /*
692 * Clean up any machine-dependent resources.
693 */
694 if ((thread->state & TH_SWAPPED0x0100) == 0) {
695 splsched();
696 stack_free(thread);
697 (void) splx(s);
698 thread_deallocate_stack++;
699 }
700 /*
701 * Rattle the event count machinery (gag)
702 */
703 evc_notify_abort(thread);
704
705 pcb_terminate(thread);
706 kmem_cache_free(&thread_cache, (vm_offset_t) thread);
707}
708
709void thread_reference(
710 thread_t thread)
711{
712 spl_t s;
713
714 if (thread == THREAD_NULL((thread_t) 0))
715 return;
716
717 s = splsched();
718 thread_lock(thread);
719 thread->ref_count++;
720 thread_unlock(thread)((void)(&(thread)->lock));
721 (void) splx(s);
722}
723
724/*
725 * thread_terminate:
726 *
727 * Permanently stop execution of the specified thread.
728 *
729 * A thread to be terminated must be allowed to clean up any state
730 * that it has before it exits. The thread is broken out of any
731 * wait condition that it is in, and signalled to exit. It then
732 * cleans up its state and calls thread_halt_self on its way out of
733 * the kernel. The caller waits for the thread to halt, terminates
734 * its IPC state, and then deallocates it.
735 *
736 * If the caller is the current thread, it must still exit the kernel
737 * to clean up any state (thread and port references, messages, etc).
738 * When it exits the kernel, it then terminates its IPC state and
739 * queues itself for the reaper thread, which will wait for the thread
740 * to stop and then deallocate it. (A thread cannot deallocate itself,
741 * since it needs a kernel stack to execute.)
742 */
743kern_return_t thread_terminate(
744 thread_t thread)
745{
746 thread_t cur_thread = current_thread()(active_threads[(0)]);
747 task_t cur_task;
748 spl_t s;
749
750 if (thread == THREAD_NULL((thread_t) 0))
15
Taking false branch
751 return KERN_INVALID_ARGUMENT4;
752
753 /*
754 * Break IPC control over the thread.
755 */
756 ipc_thread_disable(thread);
757
758 if (thread == cur_thread) {
16
Taking false branch
759
760 /*
761 * Current thread will queue itself for reaper when
762 * exiting kernel.
763 */
764 s = splsched();
765 thread_lock(thread);
766 if (thread->active) {
767 thread->active = FALSE((boolean_t) 0);
768 thread_ast_set(thread, AST_TERMINATE)(thread)->ast |= (0x2);
769 }
770 thread_unlock(thread)((void)(&(thread)->lock));
771 ast_on(cpu_number(), AST_TERMINATE)({ if ((need_ast[(0)] |= (0x2)) != 0x0) { ; } });
772 splx(s);
773 return KERN_SUCCESS0;
774 }
775
776 /*
777 * Lock both threads and the current task
778 * to check termination races and prevent deadlocks.
779 */
780 cur_task = current_task()((active_threads[(0)])->task);
781 task_lock(cur_task);
782 s = splsched();
783 if ((vm_offset_t)thread < (vm_offset_t)cur_thread) {
17
Taking false branch
784 thread_lock(thread);
785 thread_lock(cur_thread);
786 }
787 else {
788 thread_lock(cur_thread);
789 thread_lock(thread);
790 }
791
792 /*
793 * If the current thread is being terminated, help out.
794 */
795 if ((!cur_task->active) || (!cur_thread->active)) {
18
Taking false branch
796 thread_unlock(cur_thread)((void)(&(cur_thread)->lock));
797 thread_unlock(thread)((void)(&(thread)->lock));
798 (void) splx(s);
799 task_unlock(cur_task)((void)(&(cur_task)->lock));
800 thread_terminate(cur_thread);
801 return KERN_FAILURE5;
802 }
803
804 thread_unlock(cur_thread)((void)(&(cur_thread)->lock));
805 task_unlock(cur_task)((void)(&(cur_task)->lock));
806
807 /*
808 * Terminate victim thread.
809 */
810 if (!thread->active) {
19
Taking false branch
811 /*
812 * Someone else got there first.
813 */
814 thread_unlock(thread)((void)(&(thread)->lock));
815 (void) splx(s);
816 return KERN_FAILURE5;
817 }
818
819 thread->active = FALSE((boolean_t) 0);
820
821 thread_unlock(thread)((void)(&(thread)->lock));
822 (void) splx(s);
823
824#if MACH_HOST0
825 /*
826 * Reassign thread to default pset if needed.
827 */
828 thread_freeze(thread);
829 if (thread->processor_set != &default_pset) {
830 thread_doassign(thread, &default_pset, FALSE((boolean_t) 0));
831 }
832#endif /* MACH_HOST */
833
834 /*
835 * Halt the victim at the clean point.
836 */
837 (void) thread_halt(thread, TRUE((boolean_t) 1));
838#if MACH_HOST0
839 thread_unfreeze(thread);
840#endif /* MACH_HOST */
841 /*
842 * Shut down the victims IPC and deallocate its
843 * reference to itself.
844 */
845 ipc_thread_terminate(thread);
846 thread_deallocate(thread);
20
Calling 'thread_deallocate'
23
Returning from 'thread_deallocate'
847 return KERN_SUCCESS0;
848}
849
850kern_return_t thread_terminate_release(
851 thread_t thread,
852 task_t task,
853 mach_port_t thread_name,
854 mach_port_t reply_port,
855 vm_offset_t address,
856 vm_size_t size)
857{
858 if (task == NULL((void *) 0))
859 return KERN_INVALID_ARGUMENT4;
860
861 mach_port_deallocate(task->itk_space, thread_name);
862
863 if (reply_port != MACH_PORT_NULL((mach_port_t) 0))
864 mach_port_destroy(task->itk_space, reply_port);
865
866 if ((address != 0) || (size != 0))
867 vm_deallocate(task->map, address, size);
868
869 return thread_terminate(thread);
870}
871
872/*
873 * thread_force_terminate:
874 *
875 * Version of thread_terminate called by task_terminate. thread is
876 * not the current thread. task_terminate is the dominant operation,
877 * so we can force this thread to stop.
878 */
879void
880thread_force_terminate(
881 thread_t thread)
882{
883 boolean_t deallocate_here;
884 spl_t s;
885
886 ipc_thread_disable(thread);
887
888#if MACH_HOST0
889 /*
890 * Reassign thread to default pset if needed.
891 */
892 thread_freeze(thread);
893 if (thread->processor_set != &default_pset)
894 thread_doassign(thread, &default_pset, FALSE((boolean_t) 0));
895#endif /* MACH_HOST */
896
897 s = splsched();
898 thread_lock(thread);
899 deallocate_here = thread->active;
900 thread->active = FALSE((boolean_t) 0);
901 thread_unlock(thread)((void)(&(thread)->lock));
902 (void) splx(s);
903
904 (void) thread_halt(thread, TRUE((boolean_t) 1));
905 ipc_thread_terminate(thread);
906
907#if MACH_HOST0
908 thread_unfreeze(thread);
909#endif /* MACH_HOST */
910
911 if (deallocate_here)
912 thread_deallocate(thread);
913}
914
915
916/*
917 * Halt a thread at a clean point, leaving it suspended.
918 *
919 * must_halt indicates whether thread must halt.
920 *
921 */
922kern_return_t thread_halt(
923 thread_t thread,
924 boolean_t must_halt)
925{
926 thread_t cur_thread = current_thread()(active_threads[(0)]);
927 kern_return_t ret;
928 spl_t s;
929
930 if (thread == cur_thread)
931 panic("thread_halt: trying to halt current thread.");
932 /*
933 * If must_halt is FALSE, then a check must be made for
934 * a cycle of halt operations.
935 */
936 if (!must_halt) {
937 /*
938 * Grab both thread locks.
939 */
940 s = splsched();
941 if ((vm_offset_t)thread < (vm_offset_t)cur_thread) {
942 thread_lock(thread);
943 thread_lock(cur_thread);
944 }
945 else {
946 thread_lock(cur_thread);
947 thread_lock(thread);
948 }
949
950 /*
951 * If target thread is already halted, grab a hold
952 * on it and return.
953 */
954 if (thread->state & TH_HALTED0x10) {
955 thread->suspend_count++;
956 thread_unlock(cur_thread)((void)(&(cur_thread)->lock));
957 thread_unlock(thread)((void)(&(thread)->lock));
958 (void) splx(s);
959 return KERN_SUCCESS0;
960 }
961
962 /*
963 * If someone is trying to halt us, we have a potential
964 * halt cycle. Break the cycle by interrupting anyone
965 * who is trying to halt us, and causing this operation
966 * to fail; retry logic will only retry operations
967 * that cannot deadlock. (If must_halt is TRUE, this
968 * operation can never cause a deadlock.)
969 */
970 if (cur_thread->ast & AST_HALT0x1) {
971 thread_wakeup_with_result((event_t)&cur_thread->wake_active,thread_wakeup_prim(((event_t)&cur_thread->wake_active)
, ((boolean_t) 0), (2))
972 THREAD_INTERRUPTED)thread_wakeup_prim(((event_t)&cur_thread->wake_active)
, ((boolean_t) 0), (2))
;
973 thread_unlock(thread)((void)(&(thread)->lock));
974 thread_unlock(cur_thread)((void)(&(cur_thread)->lock));
975 (void) splx(s);
976 return KERN_FAILURE5;
977 }
978
979 thread_unlock(cur_thread)((void)(&(cur_thread)->lock));
980
981 }
982 else {
983 /*
984 * Lock thread and check whether it is already halted.
985 */
986 s = splsched();
987 thread_lock(thread);
988 if (thread->state & TH_HALTED0x10) {
989 thread->suspend_count++;
990 thread_unlock(thread)((void)(&(thread)->lock));
991 (void) splx(s);
992 return KERN_SUCCESS0;
993 }
994 }
995
996 /*
997 * Suspend thread - inline version of thread_hold() because
998 * thread is already locked.
999 */
1000 thread->suspend_count++;
1001 thread->state |= TH_SUSP0x02;
1002
1003 /*
1004 * If someone else is halting it, wait for that to complete.
1005 * Fail if wait interrupted and must_halt is false.
1006 */
1007 while ((thread->ast & AST_HALT0x1) && (!(thread->state & TH_HALTED0x10))) {
1008 thread->wake_active = TRUE((boolean_t) 1);
1009 thread_sleep((event_t) &thread->wake_active,
1010 simple_lock_addr(thread->lock)((simple_lock_t)0), TRUE((boolean_t) 1));
1011
1012 if (thread->state & TH_HALTED0x10) {
1013 (void) splx(s);
1014 return KERN_SUCCESS0;
1015 }
1016 if ((current_thread()(active_threads[(0)])->wait_result != THREAD_AWAKENED0)
1017 && !(must_halt)) {
1018 (void) splx(s);
1019 thread_release(thread);
1020 return KERN_FAILURE5;
1021 }
1022 thread_lock(thread);
1023 }
1024
1025 /*
1026 * Otherwise, have to do it ourselves.
1027 */
1028
1029 thread_ast_set(thread, AST_HALT)(thread)->ast |= (0x1);
1030
1031 while (TRUE((boolean_t) 1)) {
1032 /*
1033 * Wait for thread to stop.
1034 */
1035 thread_unlock(thread)((void)(&(thread)->lock));
1036 (void) splx(s);
1037
1038 ret = thread_dowait(thread, must_halt);
1039
1040 /*
1041 * If the dowait failed, so do we. Drop AST_HALT, and
1042 * wake up anyone else who might be waiting for it.
1043 */
1044 if (ret != KERN_SUCCESS0) {
1045 s = splsched();
1046 thread_lock(thread);
1047 thread_ast_clear(thread, AST_HALT)(thread)->ast &= ~(0x1);
1048 thread_wakeup_with_result((event_t)&thread->wake_active,thread_wakeup_prim(((event_t)&thread->wake_active), ((
boolean_t) 0), (2))
1049 THREAD_INTERRUPTED)thread_wakeup_prim(((event_t)&thread->wake_active), ((
boolean_t) 0), (2))
;
1050 thread_unlock(thread)((void)(&(thread)->lock));
1051 (void) splx(s);
1052
1053 thread_release(thread);
1054 return ret;
1055 }
1056
1057 /*
1058 * Clear any interruptible wait.
1059 */
1060 clear_wait(thread, THREAD_INTERRUPTED2, TRUE((boolean_t) 1));
1061
1062 /*
1063 * If the thread's at a clean point, we're done.
1064 * Don't need a lock because it really is stopped.
1065 */
1066 if (thread->state & TH_HALTED0x10) {
1067 return KERN_SUCCESS0;
1068 }
1069
1070 /*
1071 * If the thread is at a nice continuation,
1072 * or a continuation with a cleanup routine,
1073 * call the cleanup routine.
1074 */
1075 if ((((thread->swap_func == mach_msg_continue) ||
1076 (thread->swap_func == mach_msg_receive_continue)) &&
1077 mach_msg_interrupt(thread)) ||
1078 (thread->swap_func == thread_exception_return) ||
1079 (thread->swap_func == thread_bootstrap_return)) {
1080 s = splsched();
1081 thread_lock(thread);
1082 thread->state |= TH_HALTED0x10;
1083 thread_ast_clear(thread, AST_HALT)(thread)->ast &= ~(0x1);
1084 thread_unlock(thread)((void)(&(thread)->lock));
1085 splx(s);
1086
1087 return KERN_SUCCESS0;
1088 }
1089
1090 /*
1091 * Force the thread to stop at a clean
1092 * point, and arrange to wait for it.
1093 *
1094 * Set it running, so it can notice. Override
1095 * the suspend count. We know that the thread
1096 * is suspended and not waiting.
1097 *
1098 * Since the thread may hit an interruptible wait
1099 * before it reaches a clean point, we must force it
1100 * to wake us up when it does so. This involves some
1101 * trickery:
1102 * We mark the thread SUSPENDED so that thread_block
1103 * will suspend it and wake us up.
1104 * We mark the thread RUNNING so that it will run.
1105 * We mark the thread UN-INTERRUPTIBLE (!) so that
1106 * some other thread trying to halt or suspend it won't
1107 * take it off the run queue before it runs. Since
1108 * dispatching a thread (the tail of thread_invoke) marks
1109 * the thread interruptible, it will stop at the next
1110 * context switch or interruptible wait.
1111 */
1112
1113 s = splsched();
1114 thread_lock(thread);
1115 if ((thread->state & TH_SCHED_STATE(0x01|0x02|0x04|0x08)) != TH_SUSP0x02)
1116 panic("thread_halt");
1117 thread->state |= TH_RUN0x04 | TH_UNINT0x08;
1118 thread_setrun(thread, FALSE((boolean_t) 0));
1119
1120 /*
1121 * Continue loop and wait for thread to stop.
1122 */
1123 }
1124}
1125
1126void __attribute__((noreturn)) walking_zombie(void)
1127{
1128 panic("the zombie walks!");
1129}
1130
1131/*
1132 * Thread calls this routine on exit from the kernel when it
1133 * notices a halt request.
1134 */
1135void thread_halt_self(void)
1136{
1137 thread_t thread = current_thread()(active_threads[(0)]);
1138 spl_t s;
1139
1140 if (thread->ast & AST_TERMINATE0x2) {
1141 /*
1142 * Thread is terminating itself. Shut
1143 * down IPC, then queue it up for the
1144 * reaper thread.
1145 */
1146 ipc_thread_terminate(thread);
1147
1148 thread_hold(thread);
1149
1150 s = splsched();
1151 simple_lock(&reaper_lock);
1152 enqueue_tail(&reaper_queue, &(thread->links));
1153 simple_unlock(&reaper_lock)((void)(&reaper_lock));
1154
1155 thread_lock(thread);
1156 thread->state |= TH_HALTED0x10;
1157 thread_unlock(thread)((void)(&(thread)->lock));
1158 (void) splx(s);
1159
1160 thread_wakeup((event_t)&reaper_queue)thread_wakeup_prim(((event_t)&reaper_queue), ((boolean_t)
0), 0)
;
1161 counter(c_thread_halt_self_block++);
1162 thread_block(walking_zombie);
1163 /*NOTREACHED*/
1164 } else {
1165 /*
1166 * Thread was asked to halt - show that it
1167 * has done so.
1168 */
1169 s = splsched();
1170 thread_lock(thread);
1171 thread->state |= TH_HALTED0x10;
1172 thread_ast_clear(thread, AST_HALT)(thread)->ast &= ~(0x1);
1173 thread_unlock(thread)((void)(&(thread)->lock));
1174 splx(s);
1175 counter(c_thread_halt_self_block++);
1176 thread_block(thread_exception_return);
1177 /*
1178 * thread_release resets TH_HALTED.
1179 */
1180 }
1181}
1182
1183/*
1184 * thread_hold:
1185 *
1186 * Suspend execution of the specified thread.
1187 * This is a recursive-style suspension of the thread, a count of
1188 * suspends is maintained.
1189 */
1190void thread_hold(
1191 thread_t thread)
1192{
1193 spl_t s;
1194
1195 s = splsched();
1196 thread_lock(thread);
1197 thread->suspend_count++;
1198 thread->state |= TH_SUSP0x02;
1199 thread_unlock(thread)((void)(&(thread)->lock));
1200 (void) splx(s);
1201}
1202
1203/*
1204 * thread_dowait:
1205 *
1206 * Wait for a thread to actually enter stopped state.
1207 *
1208 * must_halt argument indicates if this may fail on interruption.
1209 * This is FALSE only if called from thread_abort via thread_halt.
1210 */
1211kern_return_t
1212thread_dowait(
1213 thread_t thread,
1214 boolean_t must_halt)
1215{
1216 boolean_t need_wakeup;
1217 kern_return_t ret = KERN_SUCCESS0;
1218 spl_t s;
1219
1220 if (thread == current_thread()(active_threads[(0)]))
1221 panic("thread_dowait");
1222
1223 /*
1224 * If a thread is not interruptible, it may not be suspended
1225 * until it becomes interruptible. In this case, we wait for
1226 * the thread to stop itself, and indicate that we are waiting
1227 * for it to stop so that it can wake us up when it does stop.
1228 *
1229 * If the thread is interruptible, we may be able to suspend
1230 * it immediately. There are several cases:
1231 *
1232 * 1) The thread is already stopped (trivial)
1233 * 2) The thread is runnable (marked RUN and on a run queue).
1234 * We pull it off the run queue and mark it stopped.
1235 * 3) The thread is running. We wait for it to stop.
1236 */
1237
1238 need_wakeup = FALSE((boolean_t) 0);
1239 s = splsched();
1240 thread_lock(thread);
1241
1242 for (;;) {
1243 switch (thread->state & TH_SCHED_STATE(0x01|0x02|0x04|0x08)) {
1244 case TH_SUSP0x02:
1245 case TH_WAIT0x01 | TH_SUSP0x02:
1246 /*
1247 * Thread is already suspended, or sleeping in an
1248 * interruptible wait. We win!
1249 */
1250 break;
1251
1252 case TH_RUN0x04 | TH_SUSP0x02:
1253 /*
1254 * The thread is interruptible. If we can pull
1255 * it off a runq, stop it here.
1256 */
1257 if (rem_runq(thread) != RUN_QUEUE_NULL((run_queue_t) 0)) {
1258 thread->state &= ~TH_RUN0x04;
1259 need_wakeup = thread->wake_active;
1260 thread->wake_active = FALSE((boolean_t) 0);
1261 break;
1262 }
1263#if NCPUS1 > 1
1264 /*
1265 * The thread must be running, so make its
1266 * processor execute ast_check(). This
1267 * should cause the thread to take an ast and
1268 * context switch to suspend for us.
1269 */
1270 cause_ast_check(thread->last_processor);
1271#endif /* NCPUS > 1 */
1272
1273 /*
1274 * Fall through to wait for thread to stop.
1275 */
1276
1277 case TH_RUN0x04 | TH_SUSP0x02 | TH_UNINT0x08:
1278 case TH_RUN0x04 | TH_WAIT0x01 | TH_SUSP0x02:
1279 case TH_RUN0x04 | TH_WAIT0x01 | TH_SUSP0x02 | TH_UNINT0x08:
1280 case TH_WAIT0x01 | TH_SUSP0x02 | TH_UNINT0x08:
1281 /*
1282 * Wait for the thread to stop, or sleep interruptibly
1283 * (thread_block will stop it in the latter case).
1284 * Check for failure if interrupted.
1285 */
1286 thread->wake_active = TRUE((boolean_t) 1);
1287 thread_sleep((event_t) &thread->wake_active,
1288 simple_lock_addr(thread->lock)((simple_lock_t)0), TRUE((boolean_t) 1));
1289 thread_lock(thread);
1290 if ((current_thread()(active_threads[(0)])->wait_result != THREAD_AWAKENED0) &&
1291 !must_halt) {
1292 ret = KERN_FAILURE5;
1293 break;
1294 }
1295
1296 /*
1297 * Repeat loop to check thread`s state.
1298 */
1299 continue;
1300 }
1301 /*
1302 * Thread is stopped at this point.
1303 */
1304 break;
1305 }
1306
1307 thread_unlock(thread)((void)(&(thread)->lock));
1308 (void) splx(s);
1309
1310 if (need_wakeup)
1311 thread_wakeup((event_t) &thread->wake_active)thread_wakeup_prim(((event_t) &thread->wake_active), (
(boolean_t) 0), 0)
;
1312
1313 return ret;
1314}
1315
1316void thread_release(
1317 thread_t thread)
1318{
1319 spl_t s;
1320
1321 s = splsched();
1322 thread_lock(thread);
1323 if (--thread->suspend_count == 0) {
1324 thread->state &= ~(TH_SUSP0x02 | TH_HALTED0x10);
1325 if ((thread->state & (TH_WAIT0x01 | TH_RUN0x04)) == 0) {
1326 /* was only suspended */
1327 thread->state |= TH_RUN0x04;
1328 thread_setrun(thread, TRUE((boolean_t) 1));
1329 }
1330 }
1331 thread_unlock(thread)((void)(&(thread)->lock));
1332 (void) splx(s);
1333}
1334
1335kern_return_t thread_suspend(
1336 thread_t thread)
1337{
1338 boolean_t hold;
1339 spl_t spl;
1340
1341 if (thread == THREAD_NULL((thread_t) 0))
1342 return KERN_INVALID_ARGUMENT4;
1343
1344 hold = FALSE((boolean_t) 0);
1345 spl = splsched();
1346 thread_lock(thread);
1347 /* Wait for thread to get interruptible */
1348 while (thread->state & TH_UNINT0x08) {
1349 assert_wait(&thread->state, TRUE((boolean_t) 1));
1350 thread_unlock(thread)((void)(&(thread)->lock));
1351 thread_block(NULL((void *) 0));
1352 thread_lock(thread);
1353 }
1354 if (thread->user_stop_count++ == 0) {
1355 hold = TRUE((boolean_t) 1);
1356 thread->suspend_count++;
1357 thread->state |= TH_SUSP0x02;
1358 }
1359 thread_unlock(thread)((void)(&(thread)->lock));
1360 (void) splx(spl);
1361
1362 /*
1363 * Now wait for the thread if necessary.
1364 */
1365 if (hold) {
1366 if (thread == current_thread()(active_threads[(0)])) {
1367 /*
1368 * We want to call thread_block on our way out,
1369 * to stop running.
1370 */
1371 spl = splsched();
1372 ast_on(cpu_number(), AST_BLOCK)({ if ((need_ast[(0)] |= (0x4)) != 0x0) { ; } });
1373 (void) splx(spl);
1374 } else
1375 (void) thread_dowait(thread, TRUE((boolean_t) 1));
1376 }
1377 return KERN_SUCCESS0;
1378}
1379
1380
1381kern_return_t thread_resume(
1382 thread_t thread)
1383{
1384 kern_return_t ret;
1385 spl_t s;
1386
1387 if (thread == THREAD_NULL((thread_t) 0))
1388 return KERN_INVALID_ARGUMENT4;
1389
1390 ret = KERN_SUCCESS0;
1391
1392 s = splsched();
1393 thread_lock(thread);
1394 if (thread->user_stop_count > 0) {
1395 if (--thread->user_stop_count == 0) {
1396 if (--thread->suspend_count == 0) {
1397 thread->state &= ~(TH_SUSP0x02 | TH_HALTED0x10);
1398 if ((thread->state & (TH_WAIT0x01 | TH_RUN0x04)) == 0) {
1399 /* was only suspended */
1400 thread->state |= TH_RUN0x04;
1401 thread_setrun(thread, TRUE((boolean_t) 1));
1402 }
1403 }
1404 }
1405 }
1406 else {
1407 ret = KERN_FAILURE5;
1408 }
1409
1410 thread_unlock(thread)((void)(&(thread)->lock));
1411 (void) splx(s);
1412
1413 return ret;
1414}
1415
1416/*
1417 * Return thread's machine-dependent state.
1418 */
1419kern_return_t thread_get_state(
1420 thread_t thread,
1421 int flavor,
1422 thread_state_t old_state, /* pointer to OUT array */
1423 natural_t *old_state_count) /*IN/OUT*/
1424{
1425 kern_return_t ret;
1426
1427 if (thread == THREAD_NULL((thread_t) 0) || thread == current_thread()(active_threads[(0)])) {
1428 return KERN_INVALID_ARGUMENT4;
1429 }
1430
1431 thread_hold(thread);
1432 (void) thread_dowait(thread, TRUE((boolean_t) 1));
1433
1434 ret = thread_getstatus(thread, flavor, old_state, old_state_count);
1435
1436 thread_release(thread);
1437 return ret;
1438}
1439
1440/*
1441 * Change thread's machine-dependent state.
1442 */
1443kern_return_t thread_set_state(
1444 thread_t thread,
1445 int flavor,
1446 thread_state_t new_state,
1447 natural_t new_state_count)
1448{
1449 kern_return_t ret;
1450
1451 if (thread == THREAD_NULL((thread_t) 0) || thread == current_thread()(active_threads[(0)])) {
1452 return KERN_INVALID_ARGUMENT4;
1453 }
1454
1455 thread_hold(thread);
1456 (void) thread_dowait(thread, TRUE((boolean_t) 1));
1457
1458 ret = thread_setstatus(thread, flavor, new_state, new_state_count);
1459
1460 thread_release(thread);
1461 return ret;
1462}
1463
1464kern_return_t thread_info(
1465 thread_t thread,
1466 int flavor,
1467 thread_info_t thread_info_out, /* pointer to OUT array */
1468 natural_t *thread_info_count) /*IN/OUT*/
1469{
1470 int state, flags;
1471 spl_t s;
1472
1473 if (thread == THREAD_NULL((thread_t) 0))
1474 return KERN_INVALID_ARGUMENT4;
1475
1476 if (flavor == THREAD_BASIC_INFO1) {
1477 thread_basic_info_t basic_info;
1478
1479 /* Allow *thread_info_count to be one smaller than the
1480 usual amount, because creation_time is a new member
1481 that some callers might not know about. */
1482
1483 if (*thread_info_count < THREAD_BASIC_INFO_COUNT(sizeof(thread_basic_info_data_t) / sizeof(natural_t)) - 1) {
1484 return KERN_INVALID_ARGUMENT4;
1485 }
1486
1487 basic_info = (thread_basic_info_t) thread_info_out;
1488
1489 s = splsched();
1490 thread_lock(thread);
1491
1492 /*
1493 * Update lazy-evaluated scheduler info because someone wants it.
1494 */
1495 if ((thread->state & TH_RUN0x04) == 0 &&
1496 thread->sched_stamp != sched_tick)
1497 update_priority(thread);
1498
1499 /* fill in info */
1500
1501 thread_read_times(thread,
1502 &basic_info->user_time,
1503 &basic_info->system_time);
1504 basic_info->base_priority = thread->priority;
1505 basic_info->cur_priority = thread->sched_pri;
1506 basic_info->creation_time = thread->creation_time;
1507
1508 /*
1509 * To calculate cpu_usage, first correct for timer rate,
1510 * then for 5/8 ageing. The correction factor [3/5] is
1511 * (1/(5/8) - 1).
1512 */
1513 basic_info->cpu_usage = thread->cpu_usage /
1514 (TIMER_RATE1000000/TH_USAGE_SCALE1000);
1515 basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5;
1516#if SIMPLE_CLOCK0
1517 /*
1518 * Clock drift compensation.
1519 */
1520 basic_info->cpu_usage =
1521 (basic_info->cpu_usage * 1000000)/sched_usec;
1522#endif /* SIMPLE_CLOCK */
1523
1524 flags = 0;
1525 if (thread->state & TH_SWAPPED0x0100)
1526 flags |= TH_FLAGS_SWAPPED0x1;
1527 if (thread->state & TH_IDLE0x80)
1528 flags |= TH_FLAGS_IDLE0x2;
1529
1530 if (thread->state & TH_HALTED0x10)
1531 state = TH_STATE_HALTED5;
1532 else
1533 if (thread->state & TH_RUN0x04)
1534 state = TH_STATE_RUNNING1;
1535 else
1536 if (thread->state & TH_UNINT0x08)
1537 state = TH_STATE_UNINTERRUPTIBLE4;
1538 else
1539 if (thread->state & TH_SUSP0x02)
1540 state = TH_STATE_STOPPED2;
1541 else
1542 if (thread->state & TH_WAIT0x01)
1543 state = TH_STATE_WAITING3;
1544 else
1545 state = 0; /* ? */
1546
1547 basic_info->run_state = state;
1548 basic_info->flags = flags;
1549 basic_info->suspend_count = thread->user_stop_count;
1550 if (state == TH_STATE_RUNNING1)
1551 basic_info->sleep_time = 0;
1552 else
1553 basic_info->sleep_time = sched_tick - thread->sched_stamp;
1554
1555 thread_unlock(thread)((void)(&(thread)->lock));
1556 splx(s);
1557
1558 if (*thread_info_count > THREAD_BASIC_INFO_COUNT(sizeof(thread_basic_info_data_t) / sizeof(natural_t)))
1559 *thread_info_count = THREAD_BASIC_INFO_COUNT(sizeof(thread_basic_info_data_t) / sizeof(natural_t));
1560 return KERN_SUCCESS0;
1561 }
1562 else if (flavor == THREAD_SCHED_INFO2) {
1563 thread_sched_info_t sched_info;
1564
1565 if (*thread_info_count < THREAD_SCHED_INFO_COUNT(sizeof(thread_sched_info_data_t) / sizeof(natural_t))) {
1566 return KERN_INVALID_ARGUMENT4;
1567 }
1568
1569 sched_info = (thread_sched_info_t) thread_info_out;
1570
1571 s = splsched();
1572 thread_lock(thread);
1573
1574#if MACH_FIXPRI1
1575 sched_info->policy = thread->policy;
1576 if (thread->policy == POLICY_FIXEDPRI2) {
1577 sched_info->data = (thread->sched_data * tick)/1000;
1578 }
1579 else {
1580 sched_info->data = 0;
1581 }
1582#else /* MACH_FIXPRI */
1583 sched_info->policy = POLICY_TIMESHARE1;
1584 sched_info->data = 0;
1585#endif /* MACH_FIXPRI */
1586
1587 sched_info->base_priority = thread->priority;
1588 sched_info->max_priority = thread->max_priority;
1589 sched_info->cur_priority = thread->sched_pri;
1590
1591 sched_info->depressed = (thread->depress_priority >= 0);
1592 sched_info->depress_priority = thread->depress_priority;
1593
1594 thread_unlock(thread)((void)(&(thread)->lock));
1595 splx(s);
1596
1597 *thread_info_count = THREAD_SCHED_INFO_COUNT(sizeof(thread_sched_info_data_t) / sizeof(natural_t));
1598 return KERN_SUCCESS0;
1599 }
1600
1601 return KERN_INVALID_ARGUMENT4;
1602}
1603
1604kern_return_t thread_abort(
1605 thread_t thread)
1606{
1607 if (thread == THREAD_NULL((thread_t) 0) || thread == current_thread()(active_threads[(0)])) {
1608 return KERN_INVALID_ARGUMENT4;
1609 }
1610
1611 /*
1612 *
1613 * clear it of an event wait
1614 */
1615 evc_notify_abort(thread);
1616
1617 /*
1618 * Try to force the thread to a clean point
1619 * If the halt operation fails return KERN_ABORTED.
1620 * ipc code will convert this to an ipc interrupted error code.
1621 */
1622 if (thread_halt(thread, FALSE((boolean_t) 0)) != KERN_SUCCESS0)
1623 return KERN_ABORTED14;
1624
1625 /*
1626 * If the thread was in an exception, abort that too.
1627 */
1628 mach_msg_abort_rpc(thread);
1629
1630 /*
1631 * Then set it going again.
1632 */
1633 thread_release(thread);
1634
1635 /*
1636 * Also abort any depression.
1637 */
1638 if (thread->depress_priority != -1)
1639 thread_depress_abort(thread);
1640
1641 return KERN_SUCCESS0;
1642}
1643
1644/*
1645 * thread_start:
1646 *
1647 * Start a thread at the specified routine.
1648 * The thread must be in a swapped state.
1649 */
1650
1651void
1652thread_start(
1653 thread_t thread,
1654 continuation_t start)
1655{
1656 thread->swap_func = start;
1657}
1658
1659/*
1660 * kernel_thread:
1661 *
1662 * Start up a kernel thread in the specified task.
1663 */
1664
1665thread_t kernel_thread(
1666 task_t task,
1667 continuation_t start,
1668 void * arg)
1669{
1670 thread_t thread;
1
Variable 'thread' declared without an initial value
1671
1672 (void) thread_create(task, &thread);
2
Calling 'thread_create'
29
Returning from 'thread_create'
1673 /* release "extra" ref that thread_create gave us */
1674 thread_deallocate(thread);
30
Function call argument is an uninitialized value
1675 thread_start(thread, start);
1676 thread->ith_othersaved.other = arg;
1677
1678 /*
1679 * We ensure that the kernel thread starts with a stack.
1680 * The swapin mechanism might not be operational yet.
1681 */
1682 thread_doswapin(thread);
1683 thread->max_priority = BASEPRI_SYSTEM6;
1684 thread->priority = BASEPRI_SYSTEM6;
1685 thread->sched_pri = BASEPRI_SYSTEM6;
1686 (void) thread_resume(thread);
1687 return thread;
1688}
1689
1690/*
1691 * reaper_thread:
1692 *
1693 * This kernel thread runs forever looking for threads to destroy
1694 * (when they request that they be destroyed, of course).
1695 */
1696void __attribute__((noreturn)) reaper_thread_continue(void)
1697{
1698 for (;;) {
1699 thread_t thread;
1700 spl_t s;
1701
1702 s = splsched();
1703 simple_lock(&reaper_lock);
1704
1705 while ((thread = (thread_t) dequeue_head(&reaper_queue))
1706 != THREAD_NULL((thread_t) 0)) {
1707 simple_unlock(&reaper_lock)((void)(&reaper_lock));
1708 (void) splx(s);
1709
1710 (void) thread_dowait(thread, TRUE((boolean_t) 1)); /* may block */
1711 thread_deallocate(thread); /* may block */
1712
1713 s = splsched();
1714 simple_lock(&reaper_lock);
1715 }
1716
1717 assert_wait((event_t) &reaper_queue, FALSE((boolean_t) 0));
1718 simple_unlock(&reaper_lock)((void)(&reaper_lock));
1719 (void) splx(s);
1720 counter(c_reaper_thread_block++);
1721 thread_block(reaper_thread_continue);
1722 }
1723}
1724
1725void reaper_thread(void)
1726{
1727 reaper_thread_continue();
1728 /*NOTREACHED*/
1729}
1730
1731#if MACH_HOST0
1732/*
1733 * thread_assign:
1734 *
1735 * Change processor set assignment.
1736 * Caller must hold an extra reference to the thread (if this is
1737 * called directly from the ipc interface, this is an operation
1738 * in progress reference). Caller must hold no locks -- this may block.
1739 */
1740
1741kern_return_t
1742thread_assign(
1743 thread_t thread,
1744 processor_set_t new_pset)
1745{
1746 if (thread == THREAD_NULL((thread_t) 0) || new_pset == PROCESSOR_SET_NULL((processor_set_t) 0)) {
1747 return KERN_INVALID_ARGUMENT4;
1748 }
1749
1750 thread_freeze(thread);
1751 thread_doassign(thread, new_pset, TRUE((boolean_t) 1));
1752
1753 return KERN_SUCCESS0;
1754}
1755
1756/*
1757 * thread_freeze:
1758 *
1759 * Freeze thread's assignment. Prelude to assigning thread.
1760 * Only one freeze may be held per thread.
1761 */
1762void
1763thread_freeze(
1764 thread_t thread)
1765{
1766 spl_t s;
1767 /*
1768 * Freeze the assignment, deferring to a prior freeze.
1769 */
1770 s = splsched();
1771 thread_lock(thread);
1772 while (thread->may_assign == FALSE((boolean_t) 0)) {
1773 thread->assign_active = TRUE((boolean_t) 1);
1774 thread_sleep((event_t) &thread->assign_active,
1775 simple_lock_addr(thread->lock)((simple_lock_t)0), FALSE((boolean_t) 0));
1776 thread_lock(thread);
1777 }
1778 thread->may_assign = FALSE((boolean_t) 0);
1779 thread_unlock(thread)((void)(&(thread)->lock));
1780 (void) splx(s);
1781
1782}
1783
1784/*
1785 * thread_unfreeze: release freeze on thread's assignment.
1786 */
1787void
1788thread_unfreeze(
1789 thread_t thread)
1790{
1791 spl_t s;
1792
1793 s = splsched();
1794 thread_lock(thread);
1795 thread->may_assign = TRUE((boolean_t) 1);
1796 if (thread->assign_active) {
1797 thread->assign_active = FALSE((boolean_t) 0);
1798 thread_wakeup((event_t)&thread->assign_active)thread_wakeup_prim(((event_t)&thread->assign_active), (
(boolean_t) 0), 0)
;
1799 }
1800 thread_unlock(thread)((void)(&(thread)->lock));
1801 splx(s);
1802}
1803
1804/*
1805 * thread_doassign:
1806 *
1807 * Actually do thread assignment. thread_will_assign must have been
1808 * called on the thread. release_freeze argument indicates whether
1809 * to release freeze on thread.
1810 */
1811
1812void
1813thread_doassign(
1814 thread_t thread,
1815 processor_set_t new_pset,
1816 boolean_t release_freeze)
1817{
1818 processor_set_t pset;
1819 boolean_t old_empty, new_empty;
1820 boolean_t recompute_pri = FALSE((boolean_t) 0);
1821 spl_t s;
1822
1823 /*
1824 * Check for silly no-op.
1825 */
1826 pset = thread->processor_set;
1827 if (pset == new_pset) {
1828 if (release_freeze)
1829 thread_unfreeze(thread);
1830 return;
1831 }
1832 /*
1833 * Suspend the thread and stop it if it's not the current thread.
1834 */
1835 thread_hold(thread);
1836 if (thread != current_thread()(active_threads[(0)]))
1837 (void) thread_dowait(thread, TRUE((boolean_t) 1));
1838
1839 /*
1840 * Lock both psets now, use ordering to avoid deadlocks.
1841 */
1842Restart:
1843 if ((vm_offset_t)pset < (vm_offset_t)new_pset) {
1844 pset_lock(pset);
1845 pset_lock(new_pset);
1846 }
1847 else {
1848 pset_lock(new_pset);
1849 pset_lock(pset);
1850 }
1851
1852 /*
1853 * Check if new_pset is ok to assign to. If not, reassign
1854 * to default_pset.
1855 */
1856 if (!new_pset->active) {
1857 pset_unlock(pset)((void)(&(pset)->lock));
1858 pset_unlock(new_pset)((void)(&(new_pset)->lock));
1859 new_pset = &default_pset;
1860 goto Restart;
1861 }
1862
1863 pset_reference(new_pset);
1864
1865 /*
1866 * Grab the thread lock and move the thread.
1867 * Then drop the lock on the old pset and the thread's
1868 * reference to it.
1869 */
1870 s = splsched();
1871 thread_lock(thread);
1872
1873 thread_change_psets(thread, pset, new_pset);
1874
1875 old_empty = pset->empty;
1876 new_empty = new_pset->empty;
1877
1878 pset_unlock(pset)((void)(&(pset)->lock));
1879
1880 /*
1881 * Reset policy and priorities if needed.
1882 */
1883#if MACH_FIXPRI1
1884 if (thread->policy & new_pset->policies == 0) {
1885 thread->policy = POLICY_TIMESHARE1;
1886 recompute_pri = TRUE((boolean_t) 1);
1887 }
1888#endif /* MACH_FIXPRI */
1889
1890 if (thread->max_priority < new_pset->max_priority) {
1891 thread->max_priority = new_pset->max_priority;
1892 if (thread->priority < thread->max_priority) {
1893 thread->priority = thread->max_priority;
1894 recompute_pri = TRUE((boolean_t) 1);
1895 }
1896 else {
1897 if ((thread->depress_priority >= 0) &&
1898 (thread->depress_priority < thread->max_priority)) {
1899 thread->depress_priority = thread->max_priority;
1900 }
1901 }
1902 }
1903
1904 pset_unlock(new_pset)((void)(&(new_pset)->lock));
1905
1906 if (recompute_pri)
1907 compute_priority(thread, TRUE((boolean_t) 1));
1908
1909 if (release_freeze) {
1910 thread->may_assign = TRUE((boolean_t) 1);
1911 if (thread->assign_active) {
1912 thread->assign_active = FALSE((boolean_t) 0);
1913 thread_wakeup((event_t)&thread->assign_active)thread_wakeup_prim(((event_t)&thread->assign_active), (
(boolean_t) 0), 0)
;
1914 }
1915 }
1916
1917 thread_unlock(thread)((void)(&(thread)->lock));
1918 splx(s);
1919
1920 pset_deallocate(pset);
1921
1922 /*
1923 * Figure out hold status of thread. Threads assigned to empty
1924 * psets must be held. Therefore:
1925 * If old pset was empty release its hold.
1926 * Release our hold from above unless new pset is empty.
1927 */
1928
1929 if (old_empty)
1930 thread_release(thread);
1931 if (!new_empty)
1932 thread_release(thread);
1933
1934 /*
1935 * If current_thread is assigned, context switch to force
1936 * assignment to happen. This also causes hold to take
1937 * effect if the new pset is empty.
1938 */
1939 if (thread == current_thread()(active_threads[(0)])) {
1940 s = splsched();
1941 ast_on(cpu_number(), AST_BLOCK)({ if ((need_ast[(0)] |= (0x4)) != 0x0) { ; } });
1942 (void) splx(s);
1943 }
1944}
1945#else /* MACH_HOST */
1946kern_return_t
1947thread_assign(
1948 thread_t thread,
1949 processor_set_t new_pset)
1950{
1951 return KERN_FAILURE5;
1952}
1953#endif /* MACH_HOST */
1954
1955/*
1956 * thread_assign_default:
1957 *
1958 * Special version of thread_assign for assigning threads to default
1959 * processor set.
1960 */
1961kern_return_t
1962thread_assign_default(
1963 thread_t thread)
1964{
1965 return thread_assign(thread, &default_pset);
1966}
1967
1968/*
1969 * thread_get_assignment
1970 *
1971 * Return current assignment for this thread.
1972 */
1973kern_return_t thread_get_assignment(
1974 thread_t thread,
1975 processor_set_t *pset)
1976{
1977 *pset = thread->processor_set;
1978 pset_reference(*pset);
1979 return KERN_SUCCESS0;
1980}
1981
1982/*
1983 * thread_priority:
1984 *
1985 * Set priority (and possibly max priority) for thread.
1986 */
1987kern_return_t
1988thread_priority(
1989 thread_t thread,
1990 int priority,
1991 boolean_t set_max)
1992{
1993 spl_t s;
1994 kern_return_t ret = KERN_SUCCESS0;
1995
1996 if ((thread == THREAD_NULL((thread_t) 0)) || invalid_pri(priority)(((priority) < 0) || ((priority) >= 50)))
1997 return KERN_INVALID_ARGUMENT4;
1998
1999 s = splsched();
2000 thread_lock(thread);
2001
2002 /*
2003 * Check for violation of max priority
2004 */
2005 if (priority < thread->max_priority) {
2006 ret = KERN_FAILURE5;
2007 }
2008 else {
2009 /*
2010 * Set priorities. If a depression is in progress,
2011 * change the priority to restore.
2012 */
2013 if (thread->depress_priority >= 0) {
2014 thread->depress_priority = priority;
2015 }
2016 else {
2017 thread->priority = priority;
2018 compute_priority(thread, TRUE((boolean_t) 1));
2019 }
2020
2021 if (set_max)
2022 thread->max_priority = priority;
2023 }
2024 thread_unlock(thread)((void)(&(thread)->lock));
2025 (void) splx(s);
2026
2027 return ret;
2028}
2029
2030/*
2031 * thread_set_own_priority:
2032 *
2033 * Internal use only; sets the priority of the calling thread.
2034 * Will adjust max_priority if necessary.
2035 */
2036void
2037thread_set_own_priority(
2038 int priority)
2039{
2040 spl_t s;
2041 thread_t thread = current_thread()(active_threads[(0)]);
2042
2043 s = splsched();
2044 thread_lock(thread);
2045
2046 if (priority < thread->max_priority)
2047 thread->max_priority = priority;
2048 thread->priority = priority;
2049 compute_priority(thread, TRUE((boolean_t) 1));
2050
2051 thread_unlock(thread)((void)(&(thread)->lock));
2052 (void) splx(s);
2053}
2054
2055/*
2056 * thread_max_priority:
2057 *
2058 * Reset the max priority for a thread.
2059 */
2060kern_return_t
2061thread_max_priority(
2062 thread_t thread,
2063 processor_set_t pset,
2064 int max_priority)
2065{
2066 spl_t s;
2067 kern_return_t ret = KERN_SUCCESS0;
2068
2069 if ((thread == THREAD_NULL((thread_t) 0)) || (pset == PROCESSOR_SET_NULL((processor_set_t) 0)) ||
2070 invalid_pri(max_priority)(((max_priority) < 0) || ((max_priority) >= 50)))
2071 return KERN_INVALID_ARGUMENT4;
2072
2073 s = splsched();
2074 thread_lock(thread);
2075
2076#if MACH_HOST0
2077 /*
2078 * Check for wrong processor set.
2079 */
2080 if (pset != thread->processor_set) {
2081 ret = KERN_FAILURE5;
2082 }
2083 else {
2084#endif /* MACH_HOST */
2085 thread->max_priority = max_priority;
2086
2087 /*
2088 * Reset priority if it violates new max priority
2089 */
2090 if (max_priority > thread->priority) {
2091 thread->priority = max_priority;
2092
2093 compute_priority(thread, TRUE((boolean_t) 1));
2094 }
2095 else {
2096 if (thread->depress_priority >= 0 &&
2097 max_priority > thread->depress_priority)
2098 thread->depress_priority = max_priority;
2099 }
2100#if MACH_HOST0
2101 }
2102#endif /* MACH_HOST */
2103
2104 thread_unlock(thread)((void)(&(thread)->lock));
2105 (void) splx(s);
2106
2107 return ret;
2108}
2109
2110/*
2111 * thread_policy:
2112 *
2113 * Set scheduling policy for thread.
2114 */
2115kern_return_t
2116thread_policy(
2117 thread_t thread,
2118 int policy,
2119 int data)
2120{
2121#if MACH_FIXPRI1
2122 kern_return_t ret = KERN_SUCCESS0;
2123 int temp;
2124 spl_t s;
2125#endif /* MACH_FIXPRI */
2126
2127 if ((thread == THREAD_NULL((thread_t) 0)) || invalid_policy(policy)(((policy) <= 0) || ((policy) > 2)))
2128 return KERN_INVALID_ARGUMENT4;
2129
2130#if MACH_FIXPRI1
2131 s = splsched();
2132 thread_lock(thread);
2133
2134 /*
2135 * Check if changing policy.
2136 */
2137 if (policy == thread->policy) {
2138 /*
2139 * Just changing data. This is meaningless for
2140 * timesharing, quantum for fixed priority (but
2141 * has no effect until current quantum runs out).
2142 */
2143 if (policy == POLICY_FIXEDPRI2) {
2144 temp = data * 1000;
2145 if (temp % tick)
2146 temp += tick;
2147 thread->sched_data = temp/tick;
2148 }
2149 }
2150 else {
2151 /*
2152 * Changing policy. Check if new policy is allowed.
2153 */
2154 if ((thread->processor_set->policies & policy) == 0) {
2155 ret = KERN_FAILURE5;
2156 }
2157 else {
2158 /*
2159 * Changing policy. Save data and calculate new
2160 * priority.
2161 */
2162 thread->policy = policy;
2163 if (policy == POLICY_FIXEDPRI2) {
2164 temp = data * 1000;
2165 if (temp % tick)
2166 temp += tick;
2167 thread->sched_data = temp/tick;
2168 }
2169 compute_priority(thread, TRUE((boolean_t) 1));
2170 }
2171 }
2172 thread_unlock(thread)((void)(&(thread)->lock));
2173 (void) splx(s);
2174
2175 return ret;
2176#else /* MACH_FIXPRI */
2177 if (policy == POLICY_TIMESHARE1)
2178 return KERN_SUCCESS0;
2179 else
2180 return KERN_FAILURE5;
2181#endif /* MACH_FIXPRI */
2182}
2183
2184/*
2185 * thread_wire:
2186 *
2187 * Specify that the target thread must always be able
2188 * to run and to allocate memory.
2189 */
2190kern_return_t
2191thread_wire(
2192 host_t host,
2193 thread_t thread,
2194 boolean_t wired)
2195{
2196 spl_t s;
2197
2198 if (host == HOST_NULL((host_t)0))
2199 return KERN_INVALID_ARGUMENT4;
2200
2201 if (thread == THREAD_NULL((thread_t) 0))
2202 return KERN_INVALID_ARGUMENT4;
2203
2204 /*
2205 * This implementation only works for the current thread.
2206 * See stack_privilege.
2207 */
2208 if (thread != current_thread()(active_threads[(0)]))
2209 return KERN_INVALID_ARGUMENT4;
2210
2211 s = splsched();
2212 thread_lock(thread);
2213
2214 if (wired) {
2215 thread->vm_privilege = TRUE((boolean_t) 1);
2216 stack_privilege(thread);
2217 }
2218 else {
2219 thread->vm_privilege = FALSE((boolean_t) 0);
2220/*XXX stack_unprivilege(thread); */
2221 thread->stack_privilege = 0;
2222 }
2223
2224 thread_unlock(thread)((void)(&(thread)->lock));
2225 splx(s);
2226
2227 return KERN_SUCCESS0;
2228}
2229
2230/*
2231 * thread_collect_scan:
2232 *
2233 * Attempt to free resources owned by threads.
2234 * pcb_collect doesn't do anything yet.
2235 */
2236
2237void thread_collect_scan(void)
2238{
2239#if 0
2240 register thread_t thread, prev_thread;
2241 processor_set_t pset, prev_pset;
2242
2243 prev_thread = THREAD_NULL((thread_t) 0);
2244 prev_pset = PROCESSOR_SET_NULL((processor_set_t) 0);
2245
2246 simple_lock(&all_psets_lock);
2247 queue_iterate(&all_psets, pset, processor_set_t, all_psets)for ((pset) = (processor_set_t) ((&all_psets)->next); !
(((&all_psets)) == ((queue_entry_t)(pset))); (pset) = (processor_set_t
) ((&(pset)->all_psets)->next))
{
2248 pset_lock(pset);
2249 queue_iterate(&pset->threads, thread, thread_t, pset_threads)for ((thread) = (thread_t) ((&pset->threads)->next)
; !(((&pset->threads)) == ((queue_entry_t)(thread))); (
thread) = (thread_t) ((&(thread)->pset_threads)->next
))
{
2250 spl_t s = splsched();
2251 thread_lock(thread);
2252
2253 /*
2254 * Only collect threads which are
2255 * not runnable and are swapped.
2256 */
2257
2258 if ((thread->state & (TH_RUN0x04|TH_SWAPPED0x0100))
2259 == TH_SWAPPED0x0100) {
2260 thread->ref_count++;
2261 thread_unlock(thread)((void)(&(thread)->lock));
2262 (void) splx(s);
2263 pset->ref_count++;
2264 pset_unlock(pset)((void)(&(pset)->lock));
2265 simple_unlock(&all_psets_lock)((void)(&all_psets_lock));
2266
2267 pcb_collect(thread);
2268
2269 if (prev_thread != THREAD_NULL((thread_t) 0))
2270 thread_deallocate(prev_thread);
2271 prev_thread = thread;
2272
2273 if (prev_pset != PROCESSOR_SET_NULL((processor_set_t) 0))
2274 pset_deallocate(prev_pset);
2275 prev_pset = pset;
2276
2277 simple_lock(&all_psets_lock);
2278 pset_lock(pset);
2279 } else {
2280 thread_unlock(thread)((void)(&(thread)->lock));
2281 (void) splx(s);
2282 }
2283 }
2284 pset_unlock(pset)((void)(&(pset)->lock));
2285 }
2286 simple_unlock(&all_psets_lock)((void)(&all_psets_lock));
2287
2288 if (prev_thread != THREAD_NULL((thread_t) 0))
2289 thread_deallocate(prev_thread);
2290 if (prev_pset != PROCESSOR_SET_NULL((processor_set_t) 0))
2291 pset_deallocate(prev_pset);
2292#endif /* 0 */
2293}
2294
2295boolean_t thread_collect_allowed = TRUE((boolean_t) 1);
2296unsigned thread_collect_last_tick = 0;
2297unsigned thread_collect_max_rate = 0; /* in ticks */
2298
2299/*
2300 * consider_thread_collect:
2301 *
2302 * Called by the pageout daemon when the system needs more free pages.
2303 */
2304
2305void consider_thread_collect(void)
2306{
2307 /*
2308 * By default, don't attempt thread collection more frequently
2309 * than once a second.
2310 */
2311
2312 if (thread_collect_max_rate == 0)
2313 thread_collect_max_rate = hz;
2314
2315 if (thread_collect_allowed &&
2316 (sched_tick >
2317 (thread_collect_last_tick + thread_collect_max_rate))) {
2318 thread_collect_last_tick = sched_tick;
2319 thread_collect_scan();
2320 }
2321}
2322
2323#if MACH_DEBUG1
2324
2325vm_size_t stack_usage(
2326 vm_offset_t stack)
2327{
2328 int i;
2329
2330 for (i = 0; i < KERNEL_STACK_SIZE(1*4096)/sizeof(unsigned int); i++)
2331 if (((unsigned int *)stack)[i] != STACK_MARKER0xdeadbeefU)
2332 break;
2333
2334 return KERNEL_STACK_SIZE(1*4096) - i * sizeof(unsigned int);
2335}
2336
2337/*
2338 * Machine-dependent code should call stack_init
2339 * before doing its own initialization of the stack.
2340 */
2341
2342void stack_init(
2343 vm_offset_t stack)
2344{
2345 if (stack_check_usage) {
2346 int i;
2347
2348 for (i = 0; i < KERNEL_STACK_SIZE(1*4096)/sizeof(unsigned int); i++)
2349 ((unsigned int *)stack)[i] = STACK_MARKER0xdeadbeefU;
2350 }
2351}
2352
2353/*
2354 * Machine-dependent code should call stack_finalize
2355 * before releasing the stack memory.
2356 */
2357
2358void stack_finalize(
2359 vm_offset_t stack)
2360{
2361 if (stack_check_usage) {
2362 vm_size_t used = stack_usage(stack);
2363
2364 simple_lock(&stack_usage_lock);
2365 if (used > stack_max_usage)
2366 stack_max_usage = used;
2367 simple_unlock(&stack_usage_lock)((void)(&stack_usage_lock));
2368 }
2369}
2370
2371#ifndef MACHINE_STACK
2372/*
2373 * stack_statistics:
2374 *
2375 * Return statistics on cached kernel stacks.
2376 * *maxusagep must be initialized by the caller.
2377 */
2378
2379void stack_statistics(
2380 natural_t *totalp,
2381 vm_size_t *maxusagep)
2382{
2383 spl_t s;
2384
2385 s = splsched();
2386 stack_lock();
2387 if (stack_check_usage) {
2388 vm_offset_t stack;
2389
2390 /*
2391 * This is pretty expensive to do at splsched,
2392 * but it only happens when someone makes
2393 * a debugging call, so it should be OK.
2394 */
2395
2396 for (stack = stack_free_list; stack != 0;
2397 stack = stack_next(stack)(*((vm_offset_t *)((stack) + (1*4096)) - 1))) {
2398 vm_size_t usage = stack_usage(stack);
2399
2400 if (usage > *maxusagep)
2401 *maxusagep = usage;
2402 }
2403 }
2404
2405 *totalp = stack_free_count;
2406 stack_unlock()((void)(&stack_lock_data));
2407 (void) splx(s);
2408}
2409#endif /* MACHINE_STACK */
2410
2411kern_return_t host_stack_usage(
2412 host_t host,
2413 vm_size_t *reservedp,
2414 unsigned int *totalp,
2415 vm_size_t *spacep,
2416 vm_size_t *residentp,
2417 vm_size_t *maxusagep,
2418 vm_offset_t *maxstackp)
2419{
2420 natural_t total;
2421 vm_size_t maxusage;
2422
2423 if (host == HOST_NULL((host_t)0))
2424 return KERN_INVALID_HOST22;
2425
2426 simple_lock(&stack_usage_lock);
2427 maxusage = stack_max_usage;
2428 simple_unlock(&stack_usage_lock)((void)(&stack_usage_lock));
2429
2430 stack_statistics(&total, &maxusage);
2431
2432 *reservedp = 0;
2433 *totalp = total;
2434 *spacep = *residentp = total * round_page(KERNEL_STACK_SIZE)((vm_offset_t)((((vm_offset_t)((1*4096))) + ((1 << 12)-
1)) & ~((1 << 12)-1)))
;
2435 *maxusagep = maxusage;
2436 *maxstackp = 0;
2437 return KERN_SUCCESS0;
2438}
2439
2440kern_return_t processor_set_stack_usage(
2441 processor_set_t pset,
2442 unsigned int *totalp,
2443 vm_size_t *spacep,
2444 vm_size_t *residentp,
2445 vm_size_t *maxusagep,
2446 vm_offset_t *maxstackp)
2447{
2448 unsigned int total;
2449 vm_size_t maxusage;
2450 vm_offset_t maxstack;
2451
2452 thread_t *threads;
2453 thread_t tmp_thread;
2454
2455 unsigned int actual; /* this many things */
2456 unsigned int i;
2457
2458 vm_size_t size, size_needed;
2459 vm_offset_t addr;
2460
2461 if (pset == PROCESSOR_SET_NULL((processor_set_t) 0))
2462 return KERN_INVALID_ARGUMENT4;
2463
2464 size = 0; addr = 0;
2465
2466 for (;;) {
2467 pset_lock(pset);
2468 if (!pset->active) {
2469 pset_unlock(pset)((void)(&(pset)->lock));
2470 return KERN_INVALID_ARGUMENT4;
2471 }
2472
2473 actual = pset->thread_count;
2474
2475 /* do we have the memory we need? */
2476
2477 size_needed = actual * sizeof(thread_t);
2478 if (size_needed <= size)
2479 break;
2480
2481 /* unlock the pset and allocate more memory */
2482 pset_unlock(pset)((void)(&(pset)->lock));
2483
2484 if (size != 0)
2485 kfree(addr, size);
2486
2487 assert(size_needed > 0)({ if (!(size_needed > 0)) Assert("size_needed > 0", "../kern/thread.c"
, 2487); })
;
2488 size = size_needed;
2489
2490 addr = kalloc(size);
2491 if (addr == 0)
2492 return KERN_RESOURCE_SHORTAGE6;
2493 }
2494
2495 /* OK, have memory and the processor_set is locked & active */
2496
2497 threads = (thread_t *) addr;
2498 for (i = 0, tmp_thread = (thread_t) queue_first(&pset->threads)((&pset->threads)->next);
2499 i < actual;
2500 i++,
2501 tmp_thread = (thread_t) queue_next(&tmp_thread->pset_threads)((&tmp_thread->pset_threads)->next)) {
2502 thread_reference(tmp_thread);
2503 threads[i] = tmp_thread;
2504 }
2505 assert(queue_end(&pset->threads, (queue_entry_t) tmp_thread))({ if (!(((&pset->threads) == ((queue_entry_t) tmp_thread
)))) Assert("queue_end(&pset->threads, (queue_entry_t) tmp_thread)"
, "../kern/thread.c", 2505); })
;
2506
2507 /* can unlock processor set now that we have the thread refs */
2508 pset_unlock(pset)((void)(&(pset)->lock));
2509
2510 /* calculate maxusage and free thread references */
2511
2512 total = 0;
2513 maxusage = 0;
2514 maxstack = 0;
2515 for (i = 0; i < actual; i++) {
2516 thread_t thread = threads[i];
2517 vm_offset_t stack = 0;
2518
2519 /*
2520 * thread->kernel_stack is only accurate if the
2521 * thread isn't swapped and is not executing.
2522 *
2523 * Of course, we don't have the appropriate locks
2524 * for these shenanigans.
2525 */
2526
2527 if ((thread->state & TH_SWAPPED0x0100) == 0) {
2528 int cpu;
2529
2530 stack = thread->kernel_stack;
2531
2532 for (cpu = 0; cpu < NCPUS1; cpu++)
2533 if (active_threads[cpu] == thread) {
2534 stack = active_stacks[cpu];
2535 break;
2536 }
2537 }
2538
2539 if (stack != 0) {
2540 total++;
2541
2542 if (stack_check_usage) {
2543 vm_size_t usage = stack_usage(stack);
2544
2545 if (usage > maxusage) {
2546 maxusage = usage;
2547 maxstack = (vm_offset_t) thread;
2548 }
2549 }
2550 }
2551
2552 thread_deallocate(thread);
2553 }
2554
2555 if (size != 0)
2556 kfree(addr, size);
2557
2558 *totalp = total;
2559 *residentp = *spacep = total * round_page(KERNEL_STACK_SIZE)((vm_offset_t)((((vm_offset_t)((1*4096))) + ((1 << 12)-
1)) & ~((1 << 12)-1)))
;
2560 *maxusagep = maxusage;
2561 *maxstackp = maxstack;
2562 return KERN_SUCCESS0;
2563}
2564
2565/*
2566 * Useful in the debugger:
2567 */
2568void
2569thread_stats(void)
2570{
2571 thread_t thread;
2572 int total = 0, rpcreply = 0;
2573
2574 queue_iterate(&default_pset.threads, thread, thread_t, pset_threads)for ((thread) = (thread_t) ((&default_pset.threads)->next
); !(((&default_pset.threads)) == ((queue_entry_t)(thread
))); (thread) = (thread_t) ((&(thread)->pset_threads)->
next))
{
2575 total++;
2576 if (thread->ith_rpc_reply != IP_NULL((ipc_port_t) ((ipc_object_t) 0)))
2577 rpcreply++;
2578 }
2579
2580 printf("%d total threads.\n", total);
2581 printf("%d using rpc_reply.\n", rpcreply);
2582}
2583#endif /* MACH_DEBUG */