Bug Summary

File:obj-scan-build/../kern/thread.c
Location:line 1655, column 2
Description:Function call argument is an uninitialized value

Annotated Source Code

1/*
2 * Mach Operating System
3 * Copyright (c) 1994-1987 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26/*
27 * File: kern/thread.c
28 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub
29 * Date: 1986
30 *
31 * Thread management primitives implementation.
32 */
33
34#include <kern/printf.h>
35#include <mach/std_types.h>
36#include <mach/policy.h>
37#include <mach/thread_info.h>
38#include <mach/thread_special_ports.h>
39#include <mach/thread_status.h>
40#include <mach/time_value.h>
41#include <machine/vm_param.h>
42#include <kern/ast.h>
43#include <kern/counters.h>
44#include <kern/debug.h>
45#include <kern/eventcount.h>
46#include <kern/ipc_mig.h>
47#include <kern/ipc_tt.h>
48#include <kern/processor.h>
49#include <kern/queue.h>
50#include <kern/sched.h>
51#include <kern/sched_prim.h>
52#include <kern/syscall_subr.h>
53#include <kern/thread.h>
54#include <kern/thread_swap.h>
55#include <kern/host.h>
56#include <kern/kalloc.h>
57#include <kern/slab.h>
58#include <kern/mach_clock.h>
59#include <vm/vm_kern.h>
60#include <ipc/ipc_kmsg.h>
61#include <ipc/ipc_port.h>
62#include <ipc/mach_msg.h>
63#include <machine/machspl.h> /* for splsched */
64#include <machine/pcb.h>
65#include <machine/thread.h> /* for MACHINE_STACK */
66
67thread_t active_threads[NCPUS1];
68vm_offset_t active_stacks[NCPUS1];
69
70struct kmem_cache thread_cache;
71
72queue_head_t reaper_queue;
73decl_simple_lock_data(, reaper_lock)
74
75extern void pcb_module_init(void);
76
77/* private */
78struct thread thread_template;
79
80#if MACH_DEBUG1
81void stack_init(vm_offset_t stack); /* forward */
82void stack_finalize(vm_offset_t stack); /* forward */
83
84#define STACK_MARKER0xdeadbeefU 0xdeadbeefU
85boolean_t stack_check_usage = FALSE((boolean_t) 0);
86decl_simple_lock_data(, stack_usage_lock)
87vm_size_t stack_max_usage = 0;
88#endif /* MACH_DEBUG */
89
90/*
91 * Machine-dependent code must define:
92 * pcb_init
93 * pcb_terminate
94 * pcb_collect
95 *
96 * The thread->pcb field is reserved for machine-dependent code.
97 */
98
99#ifdef MACHINE_STACK
100/*
101 * Machine-dependent code must define:
102 * stack_alloc_try
103 * stack_alloc
104 * stack_free
105 * stack_handoff
106 * stack_collect
107 * and if MACH_DEBUG:
108 * stack_statistics
109 */
110#else /* MACHINE_STACK */
111/*
112 * We allocate stacks from generic kernel VM.
113 * Machine-dependent code must define:
114 * stack_attach
115 * stack_detach
116 * stack_handoff
117 *
118 * The stack_free_list can only be accessed at splsched,
119 * because stack_alloc_try/thread_invoke operate at splsched.
120 */
121
122decl_simple_lock_data(, stack_lock_data)/* splsched only */
123#define stack_lock() simple_lock(&stack_lock_data)
124#define stack_unlock() simple_unlock(&stack_lock_data)
125
126vm_offset_t stack_free_list; /* splsched only */
127unsigned int stack_free_count = 0; /* splsched only */
128unsigned int stack_free_limit = 1; /* patchable */
129
130unsigned int stack_alloc_hits = 0; /* debugging */
131unsigned int stack_alloc_misses = 0; /* debugging */
132unsigned int stack_alloc_max = 0; /* debugging */
133
134/*
135 * The next field is at the base of the stack,
136 * so the low end is left unsullied.
137 */
138
139#define stack_next(stack)(*((vm_offset_t *)((stack) + (1*4096)) - 1)) (*((vm_offset_t *)((stack) + KERNEL_STACK_SIZE(1*4096)) - 1))
140
141/*
142 * stack_alloc_try:
143 *
144 * Non-blocking attempt to allocate a kernel stack.
145 * Called at splsched with the thread locked.
146 */
147
148boolean_t stack_alloc_try(
149 thread_t thread,
150 void (*resume)(thread_t))
151{
152 register vm_offset_t stack;
153
154 stack_lock();
155 stack = stack_free_list;
156 if (stack != 0) {
157 stack_free_list = stack_next(stack)(*((vm_offset_t *)((stack) + (1*4096)) - 1));
158 stack_free_count--;
159 } else {
160 stack = thread->stack_privilege;
161 }
162 stack_unlock();
163
164 if (stack != 0) {
165 stack_attach(thread, stack, resume);
166 stack_alloc_hits++;
167 return TRUE((boolean_t) 1);
168 } else {
169 stack_alloc_misses++;
170 return FALSE((boolean_t) 0);
171 }
172}
173
174/*
175 * stack_alloc:
176 *
177 * Allocate a kernel stack for a thread.
178 * May block.
179 */
180
181void stack_alloc(
182 thread_t thread,
183 void (*resume)(thread_t))
184{
185 vm_offset_t stack;
186 spl_t s;
187
188 /*
189 * We first try the free list. It is probably empty,
190 * or stack_alloc_try would have succeeded, but possibly
191 * a stack was freed before the swapin thread got to us.
192 */
193
194 s = splsched();
195 stack_lock();
196 stack = stack_free_list;
197 if (stack != 0) {
198 stack_free_list = stack_next(stack)(*((vm_offset_t *)((stack) + (1*4096)) - 1));
199 stack_free_count--;
200 }
201 stack_unlock();
202 (void) splx(s);
203
204 if (stack == 0) {
205 /*
206 * Kernel stacks should be naturally aligned,
207 * so that it is easy to find the starting/ending
208 * addresses of a stack given an address in the middle.
209 */
210
211 if (kmem_alloc_aligned(kmem_map, &stack, KERNEL_STACK_SIZE(1*4096))
212 != KERN_SUCCESS0)
213 panic("stack_alloc");
214
215#if MACH_DEBUG1
216 stack_init(stack);
217#endif /* MACH_DEBUG */
218 }
219
220 stack_attach(thread, stack, resume);
221}
222
223/*
224 * stack_free:
225 *
226 * Free a thread's kernel stack.
227 * Called at splsched with the thread locked.
228 */
229
230void stack_free(
231 thread_t thread)
232{
233 register vm_offset_t stack;
234
235 stack = stack_detach(thread);
236
237 if (stack != thread->stack_privilege) {
238 stack_lock();
239 stack_next(stack)(*((vm_offset_t *)((stack) + (1*4096)) - 1)) = stack_free_list;
240 stack_free_list = stack;
241 if (++stack_free_count > stack_alloc_max)
242 stack_alloc_max = stack_free_count;
243 stack_unlock();
244 }
245}
246
247/*
248 * stack_collect:
249 *
250 * Free excess kernel stacks.
251 * May block.
252 */
253
254void stack_collect(void)
255{
256 register vm_offset_t stack;
257 spl_t s;
258
259 s = splsched();
260 stack_lock();
261 while (stack_free_count > stack_free_limit) {
262 stack = stack_free_list;
263 stack_free_list = stack_next(stack)(*((vm_offset_t *)((stack) + (1*4096)) - 1));
264 stack_free_count--;
265 stack_unlock();
266 (void) splx(s);
267
268#if MACH_DEBUG1
269 stack_finalize(stack);
270#endif /* MACH_DEBUG */
271 kmem_free(kmem_map, stack, KERNEL_STACK_SIZE(1*4096));
272
273 s = splsched();
274 stack_lock();
275 }
276 stack_unlock();
277 (void) splx(s);
278}
279#endif /* MACHINE_STACK */
280
281/*
282 * stack_privilege:
283 *
284 * stack_alloc_try on this thread must always succeed.
285 */
286
287void stack_privilege(
288 register thread_t thread)
289{
290 /*
291 * This implementation only works for the current thread.
292 */
293
294 if (thread != current_thread()(active_threads[(0)]))
295 panic("stack_privilege");
296
297 if (thread->stack_privilege == 0)
298 thread->stack_privilege = current_stack()(active_stacks[(0)]);
299}
300
301void thread_init(void)
302{
303 kmem_cache_init(&thread_cache, "thread", sizeof(struct thread), 0,
304 NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0);
305
306 /*
307 * Fill in a template thread for fast initialization.
308 * [Fields that must be (or are typically) reset at
309 * time of creation are so noted.]
310 */
311
312 /* thread_template.links (none) */
313 thread_template.runq = RUN_QUEUE_NULL((run_queue_t) 0);
314
315 /* thread_template.task (later) */
316 /* thread_template.thread_list (later) */
317 /* thread_template.pset_threads (later) */
318
319 /* thread_template.lock (later) */
320 /* one ref for being alive; one for the guy who creates the thread */
321 thread_template.ref_count = 2;
322
323 thread_template.pcb = (pcb_t) 0; /* (reset) */
324 thread_template.kernel_stack = (vm_offset_t) 0;
325 thread_template.stack_privilege = (vm_offset_t) 0;
326
327 thread_template.wait_event = 0;
328 /* thread_template.suspend_count (later) */
329 thread_template.wait_result = KERN_SUCCESS0;
330 thread_template.wake_active = FALSE((boolean_t) 0);
331 thread_template.state = TH_SUSP0x02 | TH_SWAPPED0x0100;
332 thread_template.swap_func = thread_bootstrap_return;
333
334/* thread_template.priority (later) */
335 thread_template.max_priority = BASEPRI_USER25;
336/* thread_template.sched_pri (later - compute_priority) */
337#if MACH_FIXPRI1
338 thread_template.sched_data = 0;
339 thread_template.policy = POLICY_TIMESHARE1;
340#endif /* MACH_FIXPRI */
341 thread_template.depress_priority = -1;
342 thread_template.cpu_usage = 0;
343 thread_template.sched_usage = 0;
344 /* thread_template.sched_stamp (later) */
345
346 thread_template.recover = (vm_offset_t) 0;
347 thread_template.vm_privilege = FALSE((boolean_t) 0);
348
349 thread_template.user_stop_count = 1;
350
351 /* thread_template.<IPC structures> (later) */
352
353 timer_init(&(thread_template.user_timer));
354 timer_init(&(thread_template.system_timer));
355 thread_template.user_timer_save.low = 0;
356 thread_template.user_timer_save.high = 0;
357 thread_template.system_timer_save.low = 0;
358 thread_template.system_timer_save.high = 0;
359 thread_template.cpu_delta = 0;
360 thread_template.sched_delta = 0;
361
362 thread_template.active = FALSE((boolean_t) 0); /* reset */
363 thread_template.ast = AST_ZILCH0x0;
364
365 /* thread_template.processor_set (later) */
366 thread_template.bound_processor = PROCESSOR_NULL((processor_t) 0);
367#if MACH_HOST0
368 thread_template.may_assign = TRUE((boolean_t) 1);
369 thread_template.assign_active = FALSE((boolean_t) 0);
370#endif /* MACH_HOST */
371
372#if NCPUS1 > 1
373 /* thread_template.last_processor (later) */
374#endif /* NCPUS > 1 */
375
376 /*
377 * Initialize other data structures used in
378 * this module.
379 */
380
381 queue_init(&reaper_queue)((&reaper_queue)->next = (&reaper_queue)->prev =
&reaper_queue)
;
382 simple_lock_init(&reaper_lock);
383
384#ifndef MACHINE_STACK
385 simple_lock_init(&stack_lock_data);
386#endif /* MACHINE_STACK */
387
388#if MACH_DEBUG1
389 simple_lock_init(&stack_usage_lock);
390#endif /* MACH_DEBUG */
391
392 /*
393 * Initialize any machine-dependent
394 * per-thread structures necessary.
395 */
396
397 pcb_module_init();
398}
399
400kern_return_t thread_create(
401 register task_t parent_task,
402 thread_t *child_thread) /* OUT */
403{
404 register thread_t new_thread;
405 register processor_set_t pset;
406
407 if (parent_task == TASK_NULL((task_t) 0))
3
Assuming 'parent_task' is not equal to null
4
Taking false branch
408 return KERN_INVALID_ARGUMENT4;
409
410 /*
411 * Allocate a thread and initialize static fields
412 */
413
414 new_thread = (thread_t) kmem_cache_alloc(&thread_cache);
415
416 if (new_thread == THREAD_NULL((thread_t) 0))
5
Assuming 'new_thread' is not equal to null
6
Taking false branch
417 return KERN_RESOURCE_SHORTAGE6;
418
419 *new_thread = thread_template;
420
421 record_time_stamp (&new_thread->creation_time);
422
423 /*
424 * Initialize runtime-dependent fields
425 */
426
427 new_thread->task = parent_task;
428 simple_lock_init(&new_thread->lock);
429 new_thread->sched_stamp = sched_tick;
430 thread_timeout_setup(new_thread);
431
432 /*
433 * Create a pcb. The kernel stack is created later,
434 * when the thread is swapped-in.
435 */
436 pcb_init(new_thread);
437
438 ipc_thread_init(new_thread);
439
440 /*
441 * Find the processor set for the parent task.
442 */
443 task_lock(parent_task);
444 pset = parent_task->processor_set;
445 pset_reference(pset);
446 task_unlock(parent_task);
447
448 /*
449 * Lock both the processor set and the task,
450 * so that the thread can be added to both
451 * simultaneously. Processor set must be
452 * locked first.
453 */
454
455 Restart:
456 pset_lock(pset);
457 task_lock(parent_task);
458
459 /*
460 * If the task has changed processor sets,
461 * catch up (involves lots of lock juggling).
462 */
463 {
464 processor_set_t cur_pset;
465
466 cur_pset = parent_task->processor_set;
467 if (!cur_pset->active)
7
Taking false branch
468 cur_pset = &default_pset;
469
470 if (cur_pset != pset) {
8
Taking false branch
471 pset_reference(cur_pset);
472 task_unlock(parent_task);
473 pset_unlock(pset);
474 pset_deallocate(pset);
475 pset = cur_pset;
476 goto Restart;
477 }
478 }
479
480 /*
481 * Set the thread`s priority from the pset and task.
482 */
483
484 new_thread->priority = parent_task->priority;
485 if (pset->max_priority > new_thread->max_priority)
9
Taking false branch
486 new_thread->max_priority = pset->max_priority;
487 if (new_thread->max_priority > new_thread->priority)
10
Taking false branch
488 new_thread->priority = new_thread->max_priority;
489 /*
490 * Don't need to lock thread here because it can't
491 * possibly execute and no one else knows about it.
492 */
493 compute_priority(new_thread, TRUE((boolean_t) 1));
494
495 /*
496 * Thread is suspended if the task is. Add 1 to
497 * suspend count since thread is created in suspended
498 * state.
499 */
500 new_thread->suspend_count = parent_task->suspend_count + 1;
501
502 /*
503 * Add the thread to the processor set.
504 * If the pset is empty, suspend the thread again.
505 */
506
507 pset_add_thread(pset, new_thread);
508 if (pset->empty)
11
Taking false branch
509 new_thread->suspend_count++;
510
511#if HW_FOOTPRINT0
512 /*
513 * Need to set last_processor, idle processor would be best, but
514 * that requires extra locking nonsense. Go for tail of
515 * processors queue to avoid master.
516 */
517 if (!pset->empty) {
518 new_thread->last_processor =
519 (processor_t)queue_first(&pset->processors)((&pset->processors)->next);
520 }
521 else {
522 /*
523 * Thread created in empty processor set. Pick
524 * master processor as an acceptable legal value.
525 */
526 new_thread->last_processor = master_processor;
527 }
528#else /* HW_FOOTPRINT */
529 /*
530 * Don't need to initialize because the context switch
531 * code will set it before it can be used.
532 */
533#endif /* HW_FOOTPRINT */
534
535#if MACH_PCSAMPLE1
536 new_thread->pc_sample.seqno = 0;
537 new_thread->pc_sample.sampletypes = 0;
538#endif /* MACH_PCSAMPLE */
539
540 new_thread->pc_sample.buffer = 0;
541 /*
542 * Add the thread to the task`s list of threads.
543 * The new thread holds another reference to the task.
544 */
545
546 parent_task->ref_count++;
547
548 parent_task->thread_count++;
549 queue_enter(&parent_task->thread_list, new_thread, thread_t,{ register queue_entry_t prev; prev = (&parent_task->thread_list
)->prev; if ((&parent_task->thread_list) == prev) {
(&parent_task->thread_list)->next = (queue_entry_t
) (new_thread); } else { ((thread_t)prev)->thread_list.next
= (queue_entry_t)(new_thread); } (new_thread)->thread_list
.prev = prev; (new_thread)->thread_list.next = &parent_task
->thread_list; (&parent_task->thread_list)->prev
= (queue_entry_t) new_thread; }
12
Within the expansion of the macro 'queue_enter':
550 thread_list){ register queue_entry_t prev; prev = (&parent_task->thread_list
)->prev; if ((&parent_task->thread_list) == prev) {
(&parent_task->thread_list)->next = (queue_entry_t
) (new_thread); } else { ((thread_t)prev)->thread_list.next
= (queue_entry_t)(new_thread); } (new_thread)->thread_list
.prev = prev; (new_thread)->thread_list.next = &parent_task
->thread_list; (&parent_task->thread_list)->prev
= (queue_entry_t) new_thread; }
;
551
552 /*
553 * Finally, mark the thread active.
554 */
555
556 new_thread->active = TRUE((boolean_t) 1);
557
558 if (!parent_task->active) {
13
Taking true branch
559 task_unlock(parent_task);
560 pset_unlock(pset);
561 (void) thread_terminate(new_thread);
14
Calling 'thread_terminate'
20
Returning from 'thread_terminate'
562 /* release ref we would have given our caller */
563 thread_deallocate(new_thread);
21
Calling 'thread_deallocate'
24
Returning from 'thread_deallocate'
564 return KERN_FAILURE5;
565 }
566 task_unlock(parent_task);
567 pset_unlock(pset);
568
569 ipc_thread_enable(new_thread);
570
571 *child_thread = new_thread;
572 return KERN_SUCCESS0;
573}
574
575unsigned int thread_deallocate_stack = 0;
576
577void thread_deallocate(
578 register thread_t thread)
579{
580 spl_t s;
581 register task_t task;
582 register processor_set_t pset;
583
584 time_value_t user_time, system_time;
585
586 if (thread == THREAD_NULL((thread_t) 0))
22
Taking false branch
587 return;
588
589 /*
590 * First, check for new count > 0 (the common case).
591 * Only the thread needs to be locked.
592 */
593 s = splsched();
594 thread_lock(thread);
595 if (--thread->ref_count > 0) {
23
Taking true branch
596 thread_unlock(thread);
597 (void) splx(s);
598 return;
599 }
600
601 /*
602 * Count is zero. However, the task's and processor set's
603 * thread lists have implicit references to
604 * the thread, and may make new ones. Their locks also
605 * dominate the thread lock. To check for this, we
606 * temporarily restore the one thread reference, unlock
607 * the thread, and then lock the other structures in
608 * the proper order.
609 */
610 thread->ref_count = 1;
611 thread_unlock(thread);
612 (void) splx(s);
613
614 pset = thread->processor_set;
615 pset_lock(pset);
616
617#if MACH_HOST0
618 /*
619 * The thread might have moved.
620 */
621 while (pset != thread->processor_set) {
622 pset_unlock(pset);
623 pset = thread->processor_set;
624 pset_lock(pset);
625 }
626#endif /* MACH_HOST */
627
628 task = thread->task;
629 task_lock(task);
630
631 s = splsched();
632 thread_lock(thread);
633
634 if (--thread->ref_count > 0) {
635 /*
636 * Task or processor_set made extra reference.
637 */
638 thread_unlock(thread);
639 (void) splx(s);
640 task_unlock(task);
641 pset_unlock(pset);
642 return;
643 }
644
645 /*
646 * Thread has no references - we can remove it.
647 */
648
649 /*
650 * Remove pending timeouts.
651 */
652 reset_timeout_check(&thread->timer)({ if ((&thread->timer)->set) reset_timeout((&thread
->timer)); })
;
653
654 reset_timeout_check(&thread->depress_timer)({ if ((&thread->depress_timer)->set) reset_timeout
((&thread->depress_timer)); })
;
655 thread->depress_priority = -1;
656
657 /*
658 * Accumulate times for dead threads in task.
659 */
660 thread_read_times(thread, &user_time, &system_time);
661 time_value_add(&task->total_user_time, &user_time){ (&task->total_user_time)->microseconds += (&user_time
)->microseconds; (&task->total_user_time)->seconds
+= (&user_time)->seconds; if ((&task->total_user_time
)->microseconds >= (1000000)) { (&task->total_user_time
)->microseconds -= (1000000); (&task->total_user_time
)->seconds++; } }
;
662 time_value_add(&task->total_system_time, &system_time){ (&task->total_system_time)->microseconds += (&
system_time)->microseconds; (&task->total_system_time
)->seconds += (&system_time)->seconds; if ((&task
->total_system_time)->microseconds >= (1000000)) { (
&task->total_system_time)->microseconds -= (1000000
); (&task->total_system_time)->seconds++; } }
;
663
664 /*
665 * Remove thread from task list and processor_set threads list.
666 */
667 task->thread_count--;
668 queue_remove(&task->thread_list, thread, thread_t, thread_list){ register queue_entry_t next, prev; next = (thread)->thread_list
.next; prev = (thread)->thread_list.prev; if ((&task->
thread_list) == next) (&task->thread_list)->prev = prev
; else ((thread_t)next)->thread_list.prev = prev; if ((&
task->thread_list) == prev) (&task->thread_list)->
next = next; else ((thread_t)prev)->thread_list.next = next
; }
;
669
670 pset_remove_thread(pset, thread);
671
672 thread_unlock(thread); /* no more references - safe */
673 (void) splx(s);
674 task_unlock(task);
675 pset_unlock(pset);
676 pset_deallocate(pset);
677
678 /*
679 * A couple of quick sanity checks
680 */
681
682 if (thread == current_thread()(active_threads[(0)])) {
683 panic("thread deallocating itself");
684 }
685 if ((thread->state & ~(TH_RUN0x04 | TH_HALTED0x10 | TH_SWAPPED0x0100)) != TH_SUSP0x02)
686 panic("unstopped thread destroyed!");
687
688 /*
689 * Deallocate the task reference, since we know the thread
690 * is not running.
691 */
692 task_deallocate(thread->task); /* may block */
693
694 /*
695 * Clean up any machine-dependent resources.
696 */
697 if ((thread->state & TH_SWAPPED0x0100) == 0) {
698 splsched();
699 stack_free(thread);
700 (void) splx(s);
701 thread_deallocate_stack++;
702 }
703 /*
704 * Rattle the event count machinery (gag)
705 */
706 evc_notify_abort(thread);
707
708 pcb_terminate(thread);
709 kmem_cache_free(&thread_cache, (vm_offset_t) thread);
710}
711
712void thread_reference(
713 register thread_t thread)
714{
715 spl_t s;
716
717 if (thread == THREAD_NULL((thread_t) 0))
718 return;
719
720 s = splsched();
721 thread_lock(thread);
722 thread->ref_count++;
723 thread_unlock(thread);
724 (void) splx(s);
725}
726
727/*
728 * thread_terminate:
729 *
730 * Permanently stop execution of the specified thread.
731 *
732 * A thread to be terminated must be allowed to clean up any state
733 * that it has before it exits. The thread is broken out of any
734 * wait condition that it is in, and signalled to exit. It then
735 * cleans up its state and calls thread_halt_self on its way out of
736 * the kernel. The caller waits for the thread to halt, terminates
737 * its IPC state, and then deallocates it.
738 *
739 * If the caller is the current thread, it must still exit the kernel
740 * to clean up any state (thread and port references, messages, etc).
741 * When it exits the kernel, it then terminates its IPC state and
742 * queues itself for the reaper thread, which will wait for the thread
743 * to stop and then deallocate it. (A thread cannot deallocate itself,
744 * since it needs a kernel stack to execute.)
745 */
746kern_return_t thread_terminate(
747 register thread_t thread)
748{
749 register thread_t cur_thread = current_thread()(active_threads[(0)]);
750 register task_t cur_task;
751 spl_t s;
752
753 if (thread == THREAD_NULL((thread_t) 0))
15
Taking false branch
754 return KERN_INVALID_ARGUMENT4;
755
756 /*
757 * Break IPC control over the thread.
758 */
759 ipc_thread_disable(thread);
760
761 if (thread == cur_thread) {
16
Taking false branch
762
763 /*
764 * Current thread will queue itself for reaper when
765 * exiting kernel.
766 */
767 s = splsched();
768 thread_lock(thread);
769 if (thread->active) {
770 thread->active = FALSE((boolean_t) 0);
771 thread_ast_set(thread, AST_TERMINATE)(thread)->ast |= (0x2);
772 }
773 thread_unlock(thread);
774 ast_on(cpu_number(), AST_TERMINATE)({ if ((need_ast[(0)] |= (0x2)) != 0x0) { ; } });
775 splx(s);
776 return KERN_SUCCESS0;
777 }
778
779 /*
780 * Lock both threads and the current task
781 * to check termination races and prevent deadlocks.
782 */
783 cur_task = current_task()((active_threads[(0)])->task);
784 task_lock(cur_task);
785 s = splsched();
786 if ((vm_offset_t)thread < (vm_offset_t)cur_thread) {
17
Taking false branch
787 thread_lock(thread);
788 thread_lock(cur_thread);
789 }
790 else {
791 thread_lock(cur_thread);
792 thread_lock(thread);
793 }
794
795 /*
796 * If the current thread is being terminated, help out.
797 */
798 if ((!cur_task->active) || (!cur_thread->active)) {
18
Taking false branch
799 thread_unlock(cur_thread);
800 thread_unlock(thread);
801 (void) splx(s);
802 task_unlock(cur_task);
803 thread_terminate(cur_thread);
804 return KERN_FAILURE5;
805 }
806
807 thread_unlock(cur_thread);
808 task_unlock(cur_task);
809
810 /*
811 * Terminate victim thread.
812 */
813 if (!thread->active) {
19
Taking true branch
814 /*
815 * Someone else got there first.
816 */
817 thread_unlock(thread);
818 (void) splx(s);
819 return KERN_FAILURE5;
820 }
821
822 thread->active = FALSE((boolean_t) 0);
823
824 thread_unlock(thread);
825 (void) splx(s);
826
827#if MACH_HOST0
828 /*
829 * Reassign thread to default pset if needed.
830 */
831 thread_freeze(thread);
832 if (thread->processor_set != &default_pset) {
833 thread_doassign(thread, &default_pset, FALSE((boolean_t) 0));
834 }
835#endif /* MACH_HOST */
836
837 /*
838 * Halt the victim at the clean point.
839 */
840 (void) thread_halt(thread, TRUE((boolean_t) 1));
841#if MACH_HOST0
842 thread_unfreeze(thread);
843#endif /* MACH_HOST */
844 /*
845 * Shut down the victims IPC and deallocate its
846 * reference to itself.
847 */
848 ipc_thread_terminate(thread);
849 thread_deallocate(thread);
850 return KERN_SUCCESS0;
851}
852
853/*
854 * thread_force_terminate:
855 *
856 * Version of thread_terminate called by task_terminate. thread is
857 * not the current thread. task_terminate is the dominant operation,
858 * so we can force this thread to stop.
859 */
860void
861thread_force_terminate(
862 register thread_t thread)
863{
864 boolean_t deallocate_here;
865 spl_t s;
866
867 ipc_thread_disable(thread);
868
869#if MACH_HOST0
870 /*
871 * Reassign thread to default pset if needed.
872 */
873 thread_freeze(thread);
874 if (thread->processor_set != &default_pset)
875 thread_doassign(thread, &default_pset, FALSE((boolean_t) 0));
876#endif /* MACH_HOST */
877
878 s = splsched();
879 thread_lock(thread);
880 deallocate_here = thread->active;
881 thread->active = FALSE((boolean_t) 0);
882 thread_unlock(thread);
883 (void) splx(s);
884
885 (void) thread_halt(thread, TRUE((boolean_t) 1));
886 ipc_thread_terminate(thread);
887
888#if MACH_HOST0
889 thread_unfreeze(thread);
890#endif /* MACH_HOST */
891
892 if (deallocate_here)
893 thread_deallocate(thread);
894}
895
896
897/*
898 * Halt a thread at a clean point, leaving it suspended.
899 *
900 * must_halt indicates whether thread must halt.
901 *
902 */
903kern_return_t thread_halt(
904 register thread_t thread,
905 boolean_t must_halt)
906{
907 register thread_t cur_thread = current_thread()(active_threads[(0)]);
908 register kern_return_t ret;
909 spl_t s;
910
911 if (thread == cur_thread)
912 panic("thread_halt: trying to halt current thread.");
913 /*
914 * If must_halt is FALSE, then a check must be made for
915 * a cycle of halt operations.
916 */
917 if (!must_halt) {
918 /*
919 * Grab both thread locks.
920 */
921 s = splsched();
922 if ((vm_offset_t)thread < (vm_offset_t)cur_thread) {
923 thread_lock(thread);
924 thread_lock(cur_thread);
925 }
926 else {
927 thread_lock(cur_thread);
928 thread_lock(thread);
929 }
930
931 /*
932 * If target thread is already halted, grab a hold
933 * on it and return.
934 */
935 if (thread->state & TH_HALTED0x10) {
936 thread->suspend_count++;
937 thread_unlock(cur_thread);
938 thread_unlock(thread);
939 (void) splx(s);
940 return KERN_SUCCESS0;
941 }
942
943 /*
944 * If someone is trying to halt us, we have a potential
945 * halt cycle. Break the cycle by interrupting anyone
946 * who is trying to halt us, and causing this operation
947 * to fail; retry logic will only retry operations
948 * that cannot deadlock. (If must_halt is TRUE, this
949 * operation can never cause a deadlock.)
950 */
951 if (cur_thread->ast & AST_HALT0x1) {
952 thread_wakeup_with_result((event_t)&cur_thread->wake_active,thread_wakeup_prim(((event_t)&cur_thread->wake_active)
, ((boolean_t) 0), (2))
953 THREAD_INTERRUPTED)thread_wakeup_prim(((event_t)&cur_thread->wake_active)
, ((boolean_t) 0), (2))
;
954 thread_unlock(thread);
955 thread_unlock(cur_thread);
956 (void) splx(s);
957 return KERN_FAILURE5;
958 }
959
960 thread_unlock(cur_thread);
961
962 }
963 else {
964 /*
965 * Lock thread and check whether it is already halted.
966 */
967 s = splsched();
968 thread_lock(thread);
969 if (thread->state & TH_HALTED0x10) {
970 thread->suspend_count++;
971 thread_unlock(thread);
972 (void) splx(s);
973 return KERN_SUCCESS0;
974 }
975 }
976
977 /*
978 * Suspend thread - inline version of thread_hold() because
979 * thread is already locked.
980 */
981 thread->suspend_count++;
982 thread->state |= TH_SUSP0x02;
983
984 /*
985 * If someone else is halting it, wait for that to complete.
986 * Fail if wait interrupted and must_halt is false.
987 */
988 while ((thread->ast & AST_HALT0x1) && (!(thread->state & TH_HALTED0x10))) {
989 thread->wake_active = TRUE((boolean_t) 1);
990 thread_sleep((event_t) &thread->wake_active,
991 simple_lock_addr(thread->lock)((simple_lock_t)0), TRUE((boolean_t) 1));
992
993 if (thread->state & TH_HALTED0x10) {
994 (void) splx(s);
995 return KERN_SUCCESS0;
996 }
997 if ((current_thread()(active_threads[(0)])->wait_result != THREAD_AWAKENED0)
998 && !(must_halt)) {
999 (void) splx(s);
1000 thread_release(thread);
1001 return KERN_FAILURE5;
1002 }
1003 thread_lock(thread);
1004 }
1005
1006 /*
1007 * Otherwise, have to do it ourselves.
1008 */
1009
1010 thread_ast_set(thread, AST_HALT)(thread)->ast |= (0x1);
1011
1012 while (TRUE((boolean_t) 1)) {
1013 /*
1014 * Wait for thread to stop.
1015 */
1016 thread_unlock(thread);
1017 (void) splx(s);
1018
1019 ret = thread_dowait(thread, must_halt);
1020
1021 /*
1022 * If the dowait failed, so do we. Drop AST_HALT, and
1023 * wake up anyone else who might be waiting for it.
1024 */
1025 if (ret != KERN_SUCCESS0) {
1026 s = splsched();
1027 thread_lock(thread);
1028 thread_ast_clear(thread, AST_HALT)(thread)->ast &= ~(0x1);
1029 thread_wakeup_with_result((event_t)&thread->wake_active,thread_wakeup_prim(((event_t)&thread->wake_active), ((
boolean_t) 0), (2))
1030 THREAD_INTERRUPTED)thread_wakeup_prim(((event_t)&thread->wake_active), ((
boolean_t) 0), (2))
;
1031 thread_unlock(thread);
1032 (void) splx(s);
1033
1034 thread_release(thread);
1035 return ret;
1036 }
1037
1038 /*
1039 * Clear any interruptible wait.
1040 */
1041 clear_wait(thread, THREAD_INTERRUPTED2, TRUE((boolean_t) 1));
1042
1043 /*
1044 * If the thread's at a clean point, we're done.
1045 * Don't need a lock because it really is stopped.
1046 */
1047 if (thread->state & TH_HALTED0x10) {
1048 return KERN_SUCCESS0;
1049 }
1050
1051 /*
1052 * If the thread is at a nice continuation,
1053 * or a continuation with a cleanup routine,
1054 * call the cleanup routine.
1055 */
1056 if ((((thread->swap_func == mach_msg_continue) ||
1057 (thread->swap_func == mach_msg_receive_continue)) &&
1058 mach_msg_interrupt(thread)) ||
1059 (thread->swap_func == thread_exception_return) ||
1060 (thread->swap_func == thread_bootstrap_return)) {
1061 s = splsched();
1062 thread_lock(thread);
1063 thread->state |= TH_HALTED0x10;
1064 thread_ast_clear(thread, AST_HALT)(thread)->ast &= ~(0x1);
1065 thread_unlock(thread);
1066 splx(s);
1067
1068 return KERN_SUCCESS0;
1069 }
1070
1071 /*
1072 * Force the thread to stop at a clean
1073 * point, and arrange to wait for it.
1074 *
1075 * Set it running, so it can notice. Override
1076 * the suspend count. We know that the thread
1077 * is suspended and not waiting.
1078 *
1079 * Since the thread may hit an interruptible wait
1080 * before it reaches a clean point, we must force it
1081 * to wake us up when it does so. This involves some
1082 * trickery:
1083 * We mark the thread SUSPENDED so that thread_block
1084 * will suspend it and wake us up.
1085 * We mark the thread RUNNING so that it will run.
1086 * We mark the thread UN-INTERRUPTIBLE (!) so that
1087 * some other thread trying to halt or suspend it won't
1088 * take it off the run queue before it runs. Since
1089 * dispatching a thread (the tail of thread_invoke) marks
1090 * the thread interruptible, it will stop at the next
1091 * context switch or interruptible wait.
1092 */
1093
1094 s = splsched();
1095 thread_lock(thread);
1096 if ((thread->state & TH_SCHED_STATE(0x01|0x02|0x04|0x08)) != TH_SUSP0x02)
1097 panic("thread_halt");
1098 thread->state |= TH_RUN0x04 | TH_UNINT0x08;
1099 thread_setrun(thread, FALSE((boolean_t) 0));
1100
1101 /*
1102 * Continue loop and wait for thread to stop.
1103 */
1104 }
1105}
1106
1107void walking_zombie(void)
1108{
1109 panic("the zombie walks!");
1110}
1111
1112/*
1113 * Thread calls this routine on exit from the kernel when it
1114 * notices a halt request.
1115 */
1116void thread_halt_self(void)
1117{
1118 register thread_t thread = current_thread()(active_threads[(0)]);
1119 spl_t s;
1120
1121 if (thread->ast & AST_TERMINATE0x2) {
1122 /*
1123 * Thread is terminating itself. Shut
1124 * down IPC, then queue it up for the
1125 * reaper thread.
1126 */
1127 ipc_thread_terminate(thread);
1128
1129 thread_hold(thread);
1130
1131 s = splsched();
1132 simple_lock(&reaper_lock);
1133 enqueue_tail(&reaper_queue, (queue_entry_t) thread);
1134 simple_unlock(&reaper_lock);
1135
1136 thread_lock(thread);
1137 thread->state |= TH_HALTED0x10;
1138 thread_unlock(thread);
1139 (void) splx(s);
1140
1141 thread_wakeup((event_t)&reaper_queue)thread_wakeup_prim(((event_t)&reaper_queue), ((boolean_t)
0), 0)
;
1142 counter(c_thread_halt_self_block++);
1143 thread_block(walking_zombie);
1144 /*NOTREACHED*/
1145 } else {
1146 /*
1147 * Thread was asked to halt - show that it
1148 * has done so.
1149 */
1150 s = splsched();
1151 thread_lock(thread);
1152 thread->state |= TH_HALTED0x10;
1153 thread_ast_clear(thread, AST_HALT)(thread)->ast &= ~(0x1);
1154 thread_unlock(thread);
1155 splx(s);
1156 counter(c_thread_halt_self_block++);
1157 thread_block(thread_exception_return);
1158 /*
1159 * thread_release resets TH_HALTED.
1160 */
1161 }
1162}
1163
1164/*
1165 * thread_hold:
1166 *
1167 * Suspend execution of the specified thread.
1168 * This is a recursive-style suspension of the thread, a count of
1169 * suspends is maintained.
1170 */
1171void thread_hold(
1172 register thread_t thread)
1173{
1174 spl_t s;
1175
1176 s = splsched();
1177 thread_lock(thread);
1178 thread->suspend_count++;
1179 thread->state |= TH_SUSP0x02;
1180 thread_unlock(thread);
1181 (void) splx(s);
1182}
1183
1184/*
1185 * thread_dowait:
1186 *
1187 * Wait for a thread to actually enter stopped state.
1188 *
1189 * must_halt argument indicates if this may fail on interruption.
1190 * This is FALSE only if called from thread_abort via thread_halt.
1191 */
1192kern_return_t
1193thread_dowait(
1194 register thread_t thread,
1195 boolean_t must_halt)
1196{
1197 register boolean_t need_wakeup;
1198 register kern_return_t ret = KERN_SUCCESS0;
1199 spl_t s;
1200
1201 if (thread == current_thread()(active_threads[(0)]))
1202 panic("thread_dowait");
1203
1204 /*
1205 * If a thread is not interruptible, it may not be suspended
1206 * until it becomes interruptible. In this case, we wait for
1207 * the thread to stop itself, and indicate that we are waiting
1208 * for it to stop so that it can wake us up when it does stop.
1209 *
1210 * If the thread is interruptible, we may be able to suspend
1211 * it immediately. There are several cases:
1212 *
1213 * 1) The thread is already stopped (trivial)
1214 * 2) The thread is runnable (marked RUN and on a run queue).
1215 * We pull it off the run queue and mark it stopped.
1216 * 3) The thread is running. We wait for it to stop.
1217 */
1218
1219 need_wakeup = FALSE((boolean_t) 0);
1220 s = splsched();
1221 thread_lock(thread);
1222
1223 for (;;) {
1224 switch (thread->state & TH_SCHED_STATE(0x01|0x02|0x04|0x08)) {
1225 case TH_SUSP0x02:
1226 case TH_WAIT0x01 | TH_SUSP0x02:
1227 /*
1228 * Thread is already suspended, or sleeping in an
1229 * interruptible wait. We win!
1230 */
1231 break;
1232
1233 case TH_RUN0x04 | TH_SUSP0x02:
1234 /*
1235 * The thread is interruptible. If we can pull
1236 * it off a runq, stop it here.
1237 */
1238 if (rem_runq(thread) != RUN_QUEUE_NULL((run_queue_t) 0)) {
1239 thread->state &= ~TH_RUN0x04;
1240 need_wakeup = thread->wake_active;
1241 thread->wake_active = FALSE((boolean_t) 0);
1242 break;
1243 }
1244#if NCPUS1 > 1
1245 /*
1246 * The thread must be running, so make its
1247 * processor execute ast_check(). This
1248 * should cause the thread to take an ast and
1249 * context switch to suspend for us.
1250 */
1251 cause_ast_check(thread->last_processor);
1252#endif /* NCPUS > 1 */
1253
1254 /*
1255 * Fall through to wait for thread to stop.
1256 */
1257
1258 case TH_RUN0x04 | TH_SUSP0x02 | TH_UNINT0x08:
1259 case TH_RUN0x04 | TH_WAIT0x01 | TH_SUSP0x02:
1260 case TH_RUN0x04 | TH_WAIT0x01 | TH_SUSP0x02 | TH_UNINT0x08:
1261 case TH_WAIT0x01 | TH_SUSP0x02 | TH_UNINT0x08:
1262 /*
1263 * Wait for the thread to stop, or sleep interruptibly
1264 * (thread_block will stop it in the latter case).
1265 * Check for failure if interrupted.
1266 */
1267 thread->wake_active = TRUE((boolean_t) 1);
1268 thread_sleep((event_t) &thread->wake_active,
1269 simple_lock_addr(thread->lock)((simple_lock_t)0), TRUE((boolean_t) 1));
1270 thread_lock(thread);
1271 if ((current_thread()(active_threads[(0)])->wait_result != THREAD_AWAKENED0) &&
1272 !must_halt) {
1273 ret = KERN_FAILURE5;
1274 break;
1275 }
1276
1277 /*
1278 * Repeat loop to check thread`s state.
1279 */
1280 continue;
1281 }
1282 /*
1283 * Thread is stopped at this point.
1284 */
1285 break;
1286 }
1287
1288 thread_unlock(thread);
1289 (void) splx(s);
1290
1291 if (need_wakeup)
1292 thread_wakeup((event_t) &thread->wake_active)thread_wakeup_prim(((event_t) &thread->wake_active), (
(boolean_t) 0), 0)
;
1293
1294 return ret;
1295}
1296
1297void thread_release(
1298 register thread_t thread)
1299{
1300 spl_t s;
1301
1302 s = splsched();
1303 thread_lock(thread);
1304 if (--thread->suspend_count == 0) {
1305 thread->state &= ~(TH_SUSP0x02 | TH_HALTED0x10);
1306 if ((thread->state & (TH_WAIT0x01 | TH_RUN0x04)) == 0) {
1307 /* was only suspended */
1308 thread->state |= TH_RUN0x04;
1309 thread_setrun(thread, TRUE((boolean_t) 1));
1310 }
1311 }
1312 thread_unlock(thread);
1313 (void) splx(s);
1314}
1315
1316kern_return_t thread_suspend(
1317 register thread_t thread)
1318{
1319 register boolean_t hold;
1320 spl_t spl;
1321
1322 if (thread == THREAD_NULL((thread_t) 0))
1323 return KERN_INVALID_ARGUMENT4;
1324
1325 hold = FALSE((boolean_t) 0);
1326 spl = splsched();
1327 thread_lock(thread);
1328 /* Wait for thread to get interruptible */
1329 while (thread->state & TH_UNINT0x08) {
1330 assert_wait(&thread->state, TRUE((boolean_t) 1));
1331 thread_unlock(thread);
1332 thread_block(NULL((void *) 0));
1333 thread_lock(thread);
1334 }
1335 if (thread->user_stop_count++ == 0) {
1336 hold = TRUE((boolean_t) 1);
1337 thread->suspend_count++;
1338 thread->state |= TH_SUSP0x02;
1339 }
1340 thread_unlock(thread);
1341 (void) splx(spl);
1342
1343 /*
1344 * Now wait for the thread if necessary.
1345 */
1346 if (hold) {
1347 if (thread == current_thread()(active_threads[(0)])) {
1348 /*
1349 * We want to call thread_block on our way out,
1350 * to stop running.
1351 */
1352 spl = splsched();
1353 ast_on(cpu_number(), AST_BLOCK)({ if ((need_ast[(0)] |= (0x4)) != 0x0) { ; } });
1354 (void) splx(spl);
1355 } else
1356 (void) thread_dowait(thread, TRUE((boolean_t) 1));
1357 }
1358 return KERN_SUCCESS0;
1359}
1360
1361
1362kern_return_t thread_resume(
1363 register thread_t thread)
1364{
1365 register kern_return_t ret;
1366 spl_t s;
1367
1368 if (thread == THREAD_NULL((thread_t) 0))
1369 return KERN_INVALID_ARGUMENT4;
1370
1371 ret = KERN_SUCCESS0;
1372
1373 s = splsched();
1374 thread_lock(thread);
1375 if (thread->user_stop_count > 0) {
1376 if (--thread->user_stop_count == 0) {
1377 if (--thread->suspend_count == 0) {
1378 thread->state &= ~(TH_SUSP0x02 | TH_HALTED0x10);
1379 if ((thread->state & (TH_WAIT0x01 | TH_RUN0x04)) == 0) {
1380 /* was only suspended */
1381 thread->state |= TH_RUN0x04;
1382 thread_setrun(thread, TRUE((boolean_t) 1));
1383 }
1384 }
1385 }
1386 }
1387 else {
1388 ret = KERN_FAILURE5;
1389 }
1390
1391 thread_unlock(thread);
1392 (void) splx(s);
1393
1394 return ret;
1395}
1396
1397/*
1398 * Return thread's machine-dependent state.
1399 */
1400kern_return_t thread_get_state(
1401 register thread_t thread,
1402 int flavor,
1403 thread_state_t old_state, /* pointer to OUT array */
1404 natural_t *old_state_count) /*IN/OUT*/
1405{
1406 kern_return_t ret;
1407
1408 if (thread == THREAD_NULL((thread_t) 0) || thread == current_thread()(active_threads[(0)])) {
1409 return KERN_INVALID_ARGUMENT4;
1410 }
1411
1412 thread_hold(thread);
1413 (void) thread_dowait(thread, TRUE((boolean_t) 1));
1414
1415 ret = thread_getstatus(thread, flavor, old_state, old_state_count);
1416
1417 thread_release(thread);
1418 return ret;
1419}
1420
1421/*
1422 * Change thread's machine-dependent state.
1423 */
1424kern_return_t thread_set_state(
1425 register thread_t thread,
1426 int flavor,
1427 thread_state_t new_state,
1428 natural_t new_state_count)
1429{
1430 kern_return_t ret;
1431
1432 if (thread == THREAD_NULL((thread_t) 0) || thread == current_thread()(active_threads[(0)])) {
1433 return KERN_INVALID_ARGUMENT4;
1434 }
1435
1436 thread_hold(thread);
1437 (void) thread_dowait(thread, TRUE((boolean_t) 1));
1438
1439 ret = thread_setstatus(thread, flavor, new_state, new_state_count);
1440
1441 thread_release(thread);
1442 return ret;
1443}
1444
1445kern_return_t thread_info(
1446 register thread_t thread,
1447 int flavor,
1448 thread_info_t thread_info_out, /* pointer to OUT array */
1449 natural_t *thread_info_count) /*IN/OUT*/
1450{
1451 int state, flags;
1452 spl_t s;
1453
1454 if (thread == THREAD_NULL((thread_t) 0))
1455 return KERN_INVALID_ARGUMENT4;
1456
1457 if (flavor == THREAD_BASIC_INFO1) {
1458 register thread_basic_info_t basic_info;
1459
1460 /* Allow *thread_info_count to be one smaller than the
1461 usual amount, because creation_time is a new member
1462 that some callers might not know about. */
1463
1464 if (*thread_info_count < THREAD_BASIC_INFO_COUNT(sizeof(thread_basic_info_data_t) / sizeof(natural_t)) - 1) {
1465 return KERN_INVALID_ARGUMENT4;
1466 }
1467
1468 basic_info = (thread_basic_info_t) thread_info_out;
1469
1470 s = splsched();
1471 thread_lock(thread);
1472
1473 /*
1474 * Update lazy-evaluated scheduler info because someone wants it.
1475 */
1476 if ((thread->state & TH_RUN0x04) == 0 &&
1477 thread->sched_stamp != sched_tick)
1478 update_priority(thread);
1479
1480 /* fill in info */
1481
1482 thread_read_times(thread,
1483 &basic_info->user_time,
1484 &basic_info->system_time);
1485 basic_info->base_priority = thread->priority;
1486 basic_info->cur_priority = thread->sched_pri;
1487 basic_info->creation_time = thread->creation_time;
1488
1489 /*
1490 * To calculate cpu_usage, first correct for timer rate,
1491 * then for 5/8 ageing. The correction factor [3/5] is
1492 * (1/(5/8) - 1).
1493 */
1494 basic_info->cpu_usage = thread->cpu_usage /
1495 (TIMER_RATE1000000/TH_USAGE_SCALE1000);
1496 basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5;
1497#if SIMPLE_CLOCK0
1498 /*
1499 * Clock drift compensation.
1500 */
1501 basic_info->cpu_usage =
1502 (basic_info->cpu_usage * 1000000)/sched_usec;
1503#endif /* SIMPLE_CLOCK */
1504
1505 flags = 0;
1506 if (thread->state & TH_SWAPPED0x0100)
1507 flags |= TH_FLAGS_SWAPPED0x1;
1508 if (thread->state & TH_IDLE0x80)
1509 flags |= TH_FLAGS_IDLE0x2;
1510
1511 if (thread->state & TH_HALTED0x10)
1512 state = TH_STATE_HALTED5;
1513 else
1514 if (thread->state & TH_RUN0x04)
1515 state = TH_STATE_RUNNING1;
1516 else
1517 if (thread->state & TH_UNINT0x08)
1518 state = TH_STATE_UNINTERRUPTIBLE4;
1519 else
1520 if (thread->state & TH_SUSP0x02)
1521 state = TH_STATE_STOPPED2;
1522 else
1523 if (thread->state & TH_WAIT0x01)
1524 state = TH_STATE_WAITING3;
1525 else
1526 state = 0; /* ? */
1527
1528 basic_info->run_state = state;
1529 basic_info->flags = flags;
1530 basic_info->suspend_count = thread->user_stop_count;
1531 if (state == TH_STATE_RUNNING1)
1532 basic_info->sleep_time = 0;
1533 else
1534 basic_info->sleep_time = sched_tick - thread->sched_stamp;
1535
1536 thread_unlock(thread);
1537 splx(s);
1538
1539 if (*thread_info_count > THREAD_BASIC_INFO_COUNT(sizeof(thread_basic_info_data_t) / sizeof(natural_t)))
1540 *thread_info_count = THREAD_BASIC_INFO_COUNT(sizeof(thread_basic_info_data_t) / sizeof(natural_t));
1541 return KERN_SUCCESS0;
1542 }
1543 else if (flavor == THREAD_SCHED_INFO2) {
1544 register thread_sched_info_t sched_info;
1545
1546 if (*thread_info_count < THREAD_SCHED_INFO_COUNT(sizeof(thread_sched_info_data_t) / sizeof(natural_t))) {
1547 return KERN_INVALID_ARGUMENT4;
1548 }
1549
1550 sched_info = (thread_sched_info_t) thread_info_out;
1551
1552 s = splsched();
1553 thread_lock(thread);
1554
1555#if MACH_FIXPRI1
1556 sched_info->policy = thread->policy;
1557 if (thread->policy == POLICY_FIXEDPRI2) {
1558 sched_info->data = (thread->sched_data * tick)/1000;
1559 }
1560 else {
1561 sched_info->data = 0;
1562 }
1563#else /* MACH_FIXPRI */
1564 sched_info->policy = POLICY_TIMESHARE1;
1565 sched_info->data = 0;
1566#endif /* MACH_FIXPRI */
1567
1568 sched_info->base_priority = thread->priority;
1569 sched_info->max_priority = thread->max_priority;
1570 sched_info->cur_priority = thread->sched_pri;
1571
1572 sched_info->depressed = (thread->depress_priority >= 0);
1573 sched_info->depress_priority = thread->depress_priority;
1574
1575 thread_unlock(thread);
1576 splx(s);
1577
1578 *thread_info_count = THREAD_SCHED_INFO_COUNT(sizeof(thread_sched_info_data_t) / sizeof(natural_t));
1579 return KERN_SUCCESS0;
1580 }
1581
1582 return KERN_INVALID_ARGUMENT4;
1583}
1584
1585kern_return_t thread_abort(
1586 register thread_t thread)
1587{
1588 if (thread == THREAD_NULL((thread_t) 0) || thread == current_thread()(active_threads[(0)])) {
1589 return KERN_INVALID_ARGUMENT4;
1590 }
1591
1592 /*
1593 *
1594 * clear it of an event wait
1595 */
1596 evc_notify_abort(thread);
1597
1598 /*
1599 * Try to force the thread to a clean point
1600 * If the halt operation fails return KERN_ABORTED.
1601 * ipc code will convert this to an ipc interrupted error code.
1602 */
1603 if (thread_halt(thread, FALSE((boolean_t) 0)) != KERN_SUCCESS0)
1604 return KERN_ABORTED14;
1605
1606 /*
1607 * If the thread was in an exception, abort that too.
1608 */
1609 mach_msg_abort_rpc(thread);
1610
1611 /*
1612 * Then set it going again.
1613 */
1614 thread_release(thread);
1615
1616 /*
1617 * Also abort any depression.
1618 */
1619 if (thread->depress_priority != -1)
1620 thread_depress_abort(thread);
1621
1622 return KERN_SUCCESS0;
1623}
1624
1625/*
1626 * thread_start:
1627 *
1628 * Start a thread at the specified routine.
1629 * The thread must be in a swapped state.
1630 */
1631
1632void
1633thread_start(
1634 thread_t thread,
1635 continuation_t start)
1636{
1637 thread->swap_func = start;
1638}
1639
1640/*
1641 * kernel_thread:
1642 *
1643 * Start up a kernel thread in the specified task.
1644 */
1645
1646thread_t kernel_thread(
1647 task_t task,
1648 continuation_t start,
1649 void * arg)
1650{
1651 thread_t thread;
1
Variable 'thread' declared without an initial value
1652
1653 (void) thread_create(task, &thread);
2
Calling 'thread_create'
25
Returning from 'thread_create'
1654 /* release "extra" ref that thread_create gave us */
1655 thread_deallocate(thread);
26
Function call argument is an uninitialized value
1656 thread_start(thread, start);
1657 thread->ith_othersaved.other = arg;
1658
1659 /*
1660 * We ensure that the kernel thread starts with a stack.
1661 * The swapin mechanism might not be operational yet.
1662 */
1663 thread_doswapin(thread);
1664 thread->max_priority = BASEPRI_SYSTEM6;
1665 thread->priority = BASEPRI_SYSTEM6;
1666 thread->sched_pri = BASEPRI_SYSTEM6;
1667 (void) thread_resume(thread);
1668 return thread;
1669}
1670
1671/*
1672 * reaper_thread:
1673 *
1674 * This kernel thread runs forever looking for threads to destroy
1675 * (when they request that they be destroyed, of course).
1676 */
1677void reaper_thread_continue(void)
1678{
1679 for (;;) {
1680 register thread_t thread;
1681 spl_t s;
1682
1683 s = splsched();
1684 simple_lock(&reaper_lock);
1685
1686 while ((thread = (thread_t) dequeue_head(&reaper_queue))
1687 != THREAD_NULL((thread_t) 0)) {
1688 simple_unlock(&reaper_lock);
1689 (void) splx(s);
1690
1691 (void) thread_dowait(thread, TRUE((boolean_t) 1)); /* may block */
1692 thread_deallocate(thread); /* may block */
1693
1694 s = splsched();
1695 simple_lock(&reaper_lock);
1696 }
1697
1698 assert_wait((event_t) &reaper_queue, FALSE((boolean_t) 0));
1699 simple_unlock(&reaper_lock);
1700 (void) splx(s);
1701 counter(c_reaper_thread_block++);
1702 thread_block(reaper_thread_continue);
1703 }
1704}
1705
1706void reaper_thread(void)
1707{
1708 reaper_thread_continue();
1709 /*NOTREACHED*/
1710}
1711
1712#if MACH_HOST0
1713/*
1714 * thread_assign:
1715 *
1716 * Change processor set assignment.
1717 * Caller must hold an extra reference to the thread (if this is
1718 * called directly from the ipc interface, this is an operation
1719 * in progress reference). Caller must hold no locks -- this may block.
1720 */
1721
1722kern_return_t
1723thread_assign(
1724 thread_t thread,
1725 processor_set_t new_pset)
1726{
1727 if (thread == THREAD_NULL((thread_t) 0) || new_pset == PROCESSOR_SET_NULL((processor_set_t) 0)) {
1728 return KERN_INVALID_ARGUMENT4;
1729 }
1730
1731 thread_freeze(thread);
1732 thread_doassign(thread, new_pset, TRUE((boolean_t) 1));
1733
1734 return KERN_SUCCESS0;
1735}
1736
1737/*
1738 * thread_freeze:
1739 *
1740 * Freeze thread's assignment. Prelude to assigning thread.
1741 * Only one freeze may be held per thread.
1742 */
1743void
1744thread_freeze(
1745 thread_t thread)
1746{
1747 spl_t s;
1748 /*
1749 * Freeze the assignment, deferring to a prior freeze.
1750 */
1751 s = splsched();
1752 thread_lock(thread);
1753 while (thread->may_assign == FALSE((boolean_t) 0)) {
1754 thread->assign_active = TRUE((boolean_t) 1);
1755 thread_sleep((event_t) &thread->assign_active,
1756 simple_lock_addr(thread->lock)((simple_lock_t)0), FALSE((boolean_t) 0));
1757 thread_lock(thread);
1758 }
1759 thread->may_assign = FALSE((boolean_t) 0);
1760 thread_unlock(thread);
1761 (void) splx(s);
1762
1763}
1764
1765/*
1766 * thread_unfreeze: release freeze on thread's assignment.
1767 */
1768void
1769thread_unfreeze(
1770 thread_t thread)
1771{
1772 spl_t s;
1773
1774 s = splsched();
1775 thread_lock(thread);
1776 thread->may_assign = TRUE((boolean_t) 1);
1777 if (thread->assign_active) {
1778 thread->assign_active = FALSE((boolean_t) 0);
1779 thread_wakeup((event_t)&thread->assign_active)thread_wakeup_prim(((event_t)&thread->assign_active), (
(boolean_t) 0), 0)
;
1780 }
1781 thread_unlock(thread);
1782 splx(s);
1783}
1784
1785/*
1786 * thread_doassign:
1787 *
1788 * Actually do thread assignment. thread_will_assign must have been
1789 * called on the thread. release_freeze argument indicates whether
1790 * to release freeze on thread.
1791 */
1792
1793void
1794thread_doassign(
1795 register thread_t thread,
1796 register processor_set_t new_pset,
1797 boolean_t release_freeze)
1798{
1799 register processor_set_t pset;
1800 register boolean_t old_empty, new_empty;
1801 boolean_t recompute_pri = FALSE((boolean_t) 0);
1802 spl_t s;
1803
1804 /*
1805 * Check for silly no-op.
1806 */
1807 pset = thread->processor_set;
1808 if (pset == new_pset) {
1809 if (release_freeze)
1810 thread_unfreeze(thread);
1811 return;
1812 }
1813 /*
1814 * Suspend the thread and stop it if it's not the current thread.
1815 */
1816 thread_hold(thread);
1817 if (thread != current_thread()(active_threads[(0)]))
1818 (void) thread_dowait(thread, TRUE((boolean_t) 1));
1819
1820 /*
1821 * Lock both psets now, use ordering to avoid deadlocks.
1822 */
1823Restart:
1824 if ((vm_offset_t)pset < (vm_offset_t)new_pset) {
1825 pset_lock(pset);
1826 pset_lock(new_pset);
1827 }
1828 else {
1829 pset_lock(new_pset);
1830 pset_lock(pset);
1831 }
1832
1833 /*
1834 * Check if new_pset is ok to assign to. If not, reassign
1835 * to default_pset.
1836 */
1837 if (!new_pset->active) {
1838 pset_unlock(pset);
1839 pset_unlock(new_pset);
1840 new_pset = &default_pset;
1841 goto Restart;
1842 }
1843
1844 pset_reference(new_pset);
1845
1846 /*
1847 * Grab the thread lock and move the thread.
1848 * Then drop the lock on the old pset and the thread's
1849 * reference to it.
1850 */
1851 s = splsched();
1852 thread_lock(thread);
1853
1854 thread_change_psets(thread, pset, new_pset);
1855
1856 old_empty = pset->empty;
1857 new_empty = new_pset->empty;
1858
1859 pset_unlock(pset);
1860
1861 /*
1862 * Reset policy and priorities if needed.
1863 */
1864#if MACH_FIXPRI1
1865 if (thread->policy & new_pset->policies == 0) {
1866 thread->policy = POLICY_TIMESHARE1;
1867 recompute_pri = TRUE((boolean_t) 1);
1868 }
1869#endif /* MACH_FIXPRI */
1870
1871 if (thread->max_priority < new_pset->max_priority) {
1872 thread->max_priority = new_pset->max_priority;
1873 if (thread->priority < thread->max_priority) {
1874 thread->priority = thread->max_priority;
1875 recompute_pri = TRUE((boolean_t) 1);
1876 }
1877 else {
1878 if ((thread->depress_priority >= 0) &&
1879 (thread->depress_priority < thread->max_priority)) {
1880 thread->depress_priority = thread->max_priority;
1881 }
1882 }
1883 }
1884
1885 pset_unlock(new_pset);
1886
1887 if (recompute_pri)
1888 compute_priority(thread, TRUE((boolean_t) 1));
1889
1890 if (release_freeze) {
1891 thread->may_assign = TRUE((boolean_t) 1);
1892 if (thread->assign_active) {
1893 thread->assign_active = FALSE((boolean_t) 0);
1894 thread_wakeup((event_t)&thread->assign_active)thread_wakeup_prim(((event_t)&thread->assign_active), (
(boolean_t) 0), 0)
;
1895 }
1896 }
1897
1898 thread_unlock(thread);
1899 splx(s);
1900
1901 pset_deallocate(pset);
1902
1903 /*
1904 * Figure out hold status of thread. Threads assigned to empty
1905 * psets must be held. Therefore:
1906 * If old pset was empty release its hold.
1907 * Release our hold from above unless new pset is empty.
1908 */
1909
1910 if (old_empty)
1911 thread_release(thread);
1912 if (!new_empty)
1913 thread_release(thread);
1914
1915 /*
1916 * If current_thread is assigned, context switch to force
1917 * assignment to happen. This also causes hold to take
1918 * effect if the new pset is empty.
1919 */
1920 if (thread == current_thread()(active_threads[(0)])) {
1921 s = splsched();
1922 ast_on(cpu_number(), AST_BLOCK)({ if ((need_ast[(0)] |= (0x4)) != 0x0) { ; } });
1923 (void) splx(s);
1924 }
1925}
1926#else /* MACH_HOST */
1927kern_return_t
1928thread_assign(
1929 thread_t thread,
1930 processor_set_t new_pset)
1931{
1932 return KERN_FAILURE5;
1933}
1934#endif /* MACH_HOST */
1935
1936/*
1937 * thread_assign_default:
1938 *
1939 * Special version of thread_assign for assigning threads to default
1940 * processor set.
1941 */
1942kern_return_t
1943thread_assign_default(
1944 thread_t thread)
1945{
1946 return thread_assign(thread, &default_pset);
1947}
1948
1949/*
1950 * thread_get_assignment
1951 *
1952 * Return current assignment for this thread.
1953 */
1954kern_return_t thread_get_assignment(
1955 thread_t thread,
1956 processor_set_t *pset)
1957{
1958 *pset = thread->processor_set;
1959 pset_reference(*pset);
1960 return KERN_SUCCESS0;
1961}
1962
1963/*
1964 * thread_priority:
1965 *
1966 * Set priority (and possibly max priority) for thread.
1967 */
1968kern_return_t
1969thread_priority(
1970 thread_t thread,
1971 int priority,
1972 boolean_t set_max)
1973{
1974 spl_t s;
1975 kern_return_t ret = KERN_SUCCESS0;
1976
1977 if ((thread == THREAD_NULL((thread_t) 0)) || invalid_pri(priority)(((priority) < 0) || ((priority) >= 50)))
1978 return KERN_INVALID_ARGUMENT4;
1979
1980 s = splsched();
1981 thread_lock(thread);
1982
1983 /*
1984 * Check for violation of max priority
1985 */
1986 if (priority < thread->max_priority) {
1987 ret = KERN_FAILURE5;
1988 }
1989 else {
1990 /*
1991 * Set priorities. If a depression is in progress,
1992 * change the priority to restore.
1993 */
1994 if (thread->depress_priority >= 0) {
1995 thread->depress_priority = priority;
1996 }
1997 else {
1998 thread->priority = priority;
1999 compute_priority(thread, TRUE((boolean_t) 1));
2000 }
2001
2002 if (set_max)
2003 thread->max_priority = priority;
2004 }
2005 thread_unlock(thread);
2006 (void) splx(s);
2007
2008 return ret;
2009}
2010
2011/*
2012 * thread_set_own_priority:
2013 *
2014 * Internal use only; sets the priority of the calling thread.
2015 * Will adjust max_priority if necessary.
2016 */
2017void
2018thread_set_own_priority(
2019 int priority)
2020{
2021 spl_t s;
2022 thread_t thread = current_thread()(active_threads[(0)]);
2023
2024 s = splsched();
2025 thread_lock(thread);
2026
2027 if (priority < thread->max_priority)
2028 thread->max_priority = priority;
2029 thread->priority = priority;
2030 compute_priority(thread, TRUE((boolean_t) 1));
2031
2032 thread_unlock(thread);
2033 (void) splx(s);
2034}
2035
2036/*
2037 * thread_max_priority:
2038 *
2039 * Reset the max priority for a thread.
2040 */
2041kern_return_t
2042thread_max_priority(
2043 thread_t thread,
2044 processor_set_t pset,
2045 int max_priority)
2046{
2047 spl_t s;
2048 kern_return_t ret = KERN_SUCCESS0;
2049
2050 if ((thread == THREAD_NULL((thread_t) 0)) || (pset == PROCESSOR_SET_NULL((processor_set_t) 0)) ||
2051 invalid_pri(max_priority)(((max_priority) < 0) || ((max_priority) >= 50)))
2052 return KERN_INVALID_ARGUMENT4;
2053
2054 s = splsched();
2055 thread_lock(thread);
2056
2057#if MACH_HOST0
2058 /*
2059 * Check for wrong processor set.
2060 */
2061 if (pset != thread->processor_set) {
2062 ret = KERN_FAILURE5;
2063 }
2064 else {
2065#endif /* MACH_HOST */
2066 thread->max_priority = max_priority;
2067
2068 /*
2069 * Reset priority if it violates new max priority
2070 */
2071 if (max_priority > thread->priority) {
2072 thread->priority = max_priority;
2073
2074 compute_priority(thread, TRUE((boolean_t) 1));
2075 }
2076 else {
2077 if (thread->depress_priority >= 0 &&
2078 max_priority > thread->depress_priority)
2079 thread->depress_priority = max_priority;
2080 }
2081#if MACH_HOST0
2082 }
2083#endif /* MACH_HOST */
2084
2085 thread_unlock(thread);
2086 (void) splx(s);
2087
2088 return ret;
2089}
2090
2091/*
2092 * thread_policy:
2093 *
2094 * Set scheduling policy for thread.
2095 */
2096kern_return_t
2097thread_policy(
2098 thread_t thread,
2099 int policy,
2100 int data)
2101{
2102#if MACH_FIXPRI1
2103 register kern_return_t ret = KERN_SUCCESS0;
2104 register int temp;
2105 spl_t s;
2106#endif /* MACH_FIXPRI */
2107
2108 if ((thread == THREAD_NULL((thread_t) 0)) || invalid_policy(policy)(((policy) <= 0) || ((policy) > 2)))
2109 return KERN_INVALID_ARGUMENT4;
2110
2111#if MACH_FIXPRI1
2112 s = splsched();
2113 thread_lock(thread);
2114
2115 /*
2116 * Check if changing policy.
2117 */
2118 if (policy == thread->policy) {
2119 /*
2120 * Just changing data. This is meaningless for
2121 * timesharing, quantum for fixed priority (but
2122 * has no effect until current quantum runs out).
2123 */
2124 if (policy == POLICY_FIXEDPRI2) {
2125 temp = data * 1000;
2126 if (temp % tick)
2127 temp += tick;
2128 thread->sched_data = temp/tick;
2129 }
2130 }
2131 else {
2132 /*
2133 * Changing policy. Check if new policy is allowed.
2134 */
2135 if ((thread->processor_set->policies & policy) == 0) {
2136 ret = KERN_FAILURE5;
2137 }
2138 else {
2139 /*
2140 * Changing policy. Save data and calculate new
2141 * priority.
2142 */
2143 thread->policy = policy;
2144 if (policy == POLICY_FIXEDPRI2) {
2145 temp = data * 1000;
2146 if (temp % tick)
2147 temp += tick;
2148 thread->sched_data = temp/tick;
2149 }
2150 compute_priority(thread, TRUE((boolean_t) 1));
2151 }
2152 }
2153 thread_unlock(thread);
2154 (void) splx(s);
2155
2156 return ret;
2157#else /* MACH_FIXPRI */
2158 if (policy == POLICY_TIMESHARE1)
2159 return KERN_SUCCESS0;
2160 else
2161 return KERN_FAILURE5;
2162#endif /* MACH_FIXPRI */
2163}
2164
2165/*
2166 * thread_wire:
2167 *
2168 * Specify that the target thread must always be able
2169 * to run and to allocate memory.
2170 */
2171kern_return_t
2172thread_wire(
2173 host_t host,
2174 thread_t thread,
2175 boolean_t wired)
2176{
2177 spl_t s;
2178
2179 if (host == HOST_NULL((host_t)0))
2180 return KERN_INVALID_ARGUMENT4;
2181
2182 if (thread == THREAD_NULL((thread_t) 0))
2183 return KERN_INVALID_ARGUMENT4;
2184
2185 /*
2186 * This implementation only works for the current thread.
2187 * See stack_privilege.
2188 */
2189 if (thread != current_thread()(active_threads[(0)]))
2190 return KERN_INVALID_ARGUMENT4;
2191
2192 s = splsched();
2193 thread_lock(thread);
2194
2195 if (wired) {
2196 thread->vm_privilege = TRUE((boolean_t) 1);
2197 stack_privilege(thread);
2198 }
2199 else {
2200 thread->vm_privilege = FALSE((boolean_t) 0);
2201/*XXX stack_unprivilege(thread); */
2202 thread->stack_privilege = 0;
2203 }
2204
2205 thread_unlock(thread);
2206 splx(s);
2207
2208 return KERN_SUCCESS0;
2209}
2210
2211/*
2212 * thread_collect_scan:
2213 *
2214 * Attempt to free resources owned by threads.
2215 * pcb_collect doesn't do anything yet.
2216 */
2217
2218void thread_collect_scan(void)
2219{
2220#if 0
2221 register thread_t thread, prev_thread;
2222 processor_set_t pset, prev_pset;
2223
2224 prev_thread = THREAD_NULL((thread_t) 0);
2225 prev_pset = PROCESSOR_SET_NULL((processor_set_t) 0);
2226
2227 simple_lock(&all_psets_lock);
2228 queue_iterate(&all_psets, pset, processor_set_t, all_psets)for ((pset) = (processor_set_t) ((&all_psets)->next); !
(((&all_psets)) == ((queue_entry_t)(pset))); (pset) = (processor_set_t
) ((&(pset)->all_psets)->next))
{
2229 pset_lock(pset);
2230 queue_iterate(&pset->threads, thread, thread_t, pset_threads)for ((thread) = (thread_t) ((&pset->threads)->next)
; !(((&pset->threads)) == ((queue_entry_t)(thread))); (
thread) = (thread_t) ((&(thread)->pset_threads)->next
))
{
2231 spl_t s = splsched();
2232 thread_lock(thread);
2233
2234 /*
2235 * Only collect threads which are
2236 * not runnable and are swapped.
2237 */
2238
2239 if ((thread->state & (TH_RUN0x04|TH_SWAPPED0x0100))
2240 == TH_SWAPPED0x0100) {
2241 thread->ref_count++;
2242 thread_unlock(thread);
2243 (void) splx(s);
2244 pset->ref_count++;
2245 pset_unlock(pset);
2246 simple_unlock(&all_psets_lock);
2247
2248 pcb_collect(thread);
2249
2250 if (prev_thread != THREAD_NULL((thread_t) 0))
2251 thread_deallocate(prev_thread);
2252 prev_thread = thread;
2253
2254 if (prev_pset != PROCESSOR_SET_NULL((processor_set_t) 0))
2255 pset_deallocate(prev_pset);
2256 prev_pset = pset;
2257
2258 simple_lock(&all_psets_lock);
2259 pset_lock(pset);
2260 } else {
2261 thread_unlock(thread);
2262 (void) splx(s);
2263 }
2264 }
2265 pset_unlock(pset);
2266 }
2267 simple_unlock(&all_psets_lock);
2268
2269 if (prev_thread != THREAD_NULL((thread_t) 0))
2270 thread_deallocate(prev_thread);
2271 if (prev_pset != PROCESSOR_SET_NULL((processor_set_t) 0))
2272 pset_deallocate(prev_pset);
2273#endif /* 0 */
2274}
2275
2276boolean_t thread_collect_allowed = TRUE((boolean_t) 1);
2277unsigned thread_collect_last_tick = 0;
2278unsigned thread_collect_max_rate = 0; /* in ticks */
2279
2280/*
2281 * consider_thread_collect:
2282 *
2283 * Called by the pageout daemon when the system needs more free pages.
2284 */
2285
2286void consider_thread_collect(void)
2287{
2288 /*
2289 * By default, don't attempt thread collection more frequently
2290 * than once a second.
2291 */
2292
2293 if (thread_collect_max_rate == 0)
2294 thread_collect_max_rate = hz;
2295
2296 if (thread_collect_allowed &&
2297 (sched_tick >
2298 (thread_collect_last_tick + thread_collect_max_rate))) {
2299 thread_collect_last_tick = sched_tick;
2300 thread_collect_scan();
2301 }
2302}
2303
2304#if MACH_DEBUG1
2305
2306vm_size_t stack_usage(
2307 register vm_offset_t stack)
2308{
2309 int i;
2310
2311 for (i = 0; i < KERNEL_STACK_SIZE(1*4096)/sizeof(unsigned int); i++)
2312 if (((unsigned int *)stack)[i] != STACK_MARKER0xdeadbeefU)
2313 break;
2314
2315 return KERNEL_STACK_SIZE(1*4096) - i * sizeof(unsigned int);
2316}
2317
2318/*
2319 * Machine-dependent code should call stack_init
2320 * before doing its own initialization of the stack.
2321 */
2322
2323void stack_init(
2324 register vm_offset_t stack)
2325{
2326 if (stack_check_usage) {
2327 int i;
2328
2329 for (i = 0; i < KERNEL_STACK_SIZE(1*4096)/sizeof(unsigned int); i++)
2330 ((unsigned int *)stack)[i] = STACK_MARKER0xdeadbeefU;
2331 }
2332}
2333
2334/*
2335 * Machine-dependent code should call stack_finalize
2336 * before releasing the stack memory.
2337 */
2338
2339void stack_finalize(
2340 register vm_offset_t stack)
2341{
2342 if (stack_check_usage) {
2343 vm_size_t used = stack_usage(stack);
2344
2345 simple_lock(&stack_usage_lock);
2346 if (used > stack_max_usage)
2347 stack_max_usage = used;
2348 simple_unlock(&stack_usage_lock);
2349 }
2350}
2351
2352#ifndef MACHINE_STACK
2353/*
2354 * stack_statistics:
2355 *
2356 * Return statistics on cached kernel stacks.
2357 * *maxusagep must be initialized by the caller.
2358 */
2359
2360void stack_statistics(
2361 natural_t *totalp,
2362 vm_size_t *maxusagep)
2363{
2364 spl_t s;
2365
2366 s = splsched();
2367 stack_lock();
2368 if (stack_check_usage) {
2369 vm_offset_t stack;
2370
2371 /*
2372 * This is pretty expensive to do at splsched,
2373 * but it only happens when someone makes
2374 * a debugging call, so it should be OK.
2375 */
2376
2377 for (stack = stack_free_list; stack != 0;
2378 stack = stack_next(stack)(*((vm_offset_t *)((stack) + (1*4096)) - 1))) {
2379 vm_size_t usage = stack_usage(stack);
2380
2381 if (usage > *maxusagep)
2382 *maxusagep = usage;
2383 }
2384 }
2385
2386 *totalp = stack_free_count;
2387 stack_unlock();
2388 (void) splx(s);
2389}
2390#endif /* MACHINE_STACK */
2391
2392kern_return_t host_stack_usage(
2393 host_t host,
2394 vm_size_t *reservedp,
2395 unsigned int *totalp,
2396 vm_size_t *spacep,
2397 vm_size_t *residentp,
2398 vm_size_t *maxusagep,
2399 vm_offset_t *maxstackp)
2400{
2401 natural_t total;
2402 vm_size_t maxusage;
2403
2404 if (host == HOST_NULL((host_t)0))
2405 return KERN_INVALID_HOST22;
2406
2407 simple_lock(&stack_usage_lock);
2408 maxusage = stack_max_usage;
2409 simple_unlock(&stack_usage_lock);
2410
2411 stack_statistics(&total, &maxusage);
2412
2413 *reservedp = 0;
2414 *totalp = total;
2415 *spacep = *residentp = total * round_page(KERNEL_STACK_SIZE)((vm_offset_t)((((vm_offset_t)((1*4096))) + ((1 << 12)-
1)) & ~((1 << 12)-1)))
;
2416 *maxusagep = maxusage;
2417 *maxstackp = 0;
2418 return KERN_SUCCESS0;
2419}
2420
2421kern_return_t processor_set_stack_usage(
2422 processor_set_t pset,
2423 unsigned int *totalp,
2424 vm_size_t *spacep,
2425 vm_size_t *residentp,
2426 vm_size_t *maxusagep,
2427 vm_offset_t *maxstackp)
2428{
2429 unsigned int total;
2430 vm_size_t maxusage;
2431 vm_offset_t maxstack;
2432
2433 register thread_t *threads;
2434 register thread_t tmp_thread;
2435
2436 unsigned int actual; /* this many things */
2437 unsigned int i;
2438
2439 vm_size_t size, size_needed;
2440 vm_offset_t addr;
2441
2442 if (pset == PROCESSOR_SET_NULL((processor_set_t) 0))
2443 return KERN_INVALID_ARGUMENT4;
2444
2445 size = 0; addr = 0;
2446
2447 for (;;) {
2448 pset_lock(pset);
2449 if (!pset->active) {
2450 pset_unlock(pset);
2451 return KERN_INVALID_ARGUMENT4;
2452 }
2453
2454 actual = pset->thread_count;
2455
2456 /* do we have the memory we need? */
2457
2458 size_needed = actual * sizeof(thread_t);
2459 if (size_needed <= size)
2460 break;
2461
2462 /* unlock the pset and allocate more memory */
2463 pset_unlock(pset);
2464
2465 if (size != 0)
2466 kfree(addr, size);
2467
2468 assert(size_needed > 0)({ if (!(size_needed > 0)) Assert("size_needed > 0", "../kern/thread.c"
, 2468); })
;
2469 size = size_needed;
2470
2471 addr = kalloc(size);
2472 if (addr == 0)
2473 return KERN_RESOURCE_SHORTAGE6;
2474 }
2475
2476 /* OK, have memory and the processor_set is locked & active */
2477
2478 threads = (thread_t *) addr;
2479 for (i = 0, tmp_thread = (thread_t) queue_first(&pset->threads)((&pset->threads)->next);
2480 i < actual;
2481 i++,
2482 tmp_thread = (thread_t) queue_next(&tmp_thread->pset_threads)((&tmp_thread->pset_threads)->next)) {
2483 thread_reference(tmp_thread);
2484 threads[i] = tmp_thread;
2485 }
2486 assert(queue_end(&pset->threads, (queue_entry_t) tmp_thread))({ if (!(((&pset->threads) == ((queue_entry_t) tmp_thread
)))) Assert("queue_end(&pset->threads, (queue_entry_t) tmp_thread)"
, "../kern/thread.c", 2486); })
;
2487
2488 /* can unlock processor set now that we have the thread refs */
2489 pset_unlock(pset);
2490
2491 /* calculate maxusage and free thread references */
2492
2493 total = 0;
2494 maxusage = 0;
2495 maxstack = 0;
2496 for (i = 0; i < actual; i++) {
2497 thread_t thread = threads[i];
2498 vm_offset_t stack = 0;
2499
2500 /*
2501 * thread->kernel_stack is only accurate if the
2502 * thread isn't swapped and is not executing.
2503 *
2504 * Of course, we don't have the appropriate locks
2505 * for these shenanigans.
2506 */
2507
2508 if ((thread->state & TH_SWAPPED0x0100) == 0) {
2509 int cpu;
2510
2511 stack = thread->kernel_stack;
2512
2513 for (cpu = 0; cpu < NCPUS1; cpu++)
2514 if (active_threads[cpu] == thread) {
2515 stack = active_stacks[cpu];
2516 break;
2517 }
2518 }
2519
2520 if (stack != 0) {
2521 total++;
2522
2523 if (stack_check_usage) {
2524 vm_size_t usage = stack_usage(stack);
2525
2526 if (usage > maxusage) {
2527 maxusage = usage;
2528 maxstack = (vm_offset_t) thread;
2529 }
2530 }
2531 }
2532
2533 thread_deallocate(thread);
2534 }
2535
2536 if (size != 0)
2537 kfree(addr, size);
2538
2539 *totalp = total;
2540 *residentp = *spacep = total * round_page(KERNEL_STACK_SIZE)((vm_offset_t)((((vm_offset_t)((1*4096))) + ((1 << 12)-
1)) & ~((1 << 12)-1)))
;
2541 *maxusagep = maxusage;
2542 *maxstackp = maxstack;
2543 return KERN_SUCCESS0;
2544}
2545
2546/*
2547 * Useful in the debugger:
2548 */
2549void
2550thread_stats(void)
2551{
2552 register thread_t thread;
2553 int total = 0, rpcreply = 0;
2554
2555 queue_iterate(&default_pset.threads, thread, thread_t, pset_threads)for ((thread) = (thread_t) ((&default_pset.threads)->next
); !(((&default_pset.threads)) == ((queue_entry_t)(thread
))); (thread) = (thread_t) ((&(thread)->pset_threads)->
next))
{
2556 total++;
2557 if (thread->ith_rpc_reply != IP_NULL((ipc_port_t) ((ipc_object_t) 0)))
2558 rpcreply++;
2559 }
2560
2561 printf("%d total threads.\n", total);
2562 printf("%d using rpc_reply.\n", rpcreply);
2563}
2564#endif /* MACH_DEBUG */