Bug Summary

File:obj-scan-build/../kern/thread.c
Location:line 1676, column 2
Description:Function call argument is an uninitialized value

Annotated Source Code

1/*
2 * Mach Operating System
3 * Copyright (c) 1994-1987 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26/*
27 * File: kern/thread.c
28 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub
29 * Date: 1986
30 *
31 * Thread management primitives implementation.
32 */
33
34#include <kern/printf.h>
35#include <mach/std_types.h>
36#include <mach/policy.h>
37#include <mach/thread_info.h>
38#include <mach/thread_special_ports.h>
39#include <mach/thread_status.h>
40#include <mach/time_value.h>
41#include <machine/vm_param.h>
42#include <kern/ast.h>
43#include <kern/counters.h>
44#include <kern/debug.h>
45#include <kern/eventcount.h>
46#include <kern/ipc_mig.h>
47#include <kern/ipc_tt.h>
48#include <kern/processor.h>
49#include <kern/queue.h>
50#include <kern/sched.h>
51#include <kern/sched_prim.h>
52#include <kern/syscall_subr.h>
53#include <kern/thread.h>
54#include <kern/thread_swap.h>
55#include <kern/host.h>
56#include <kern/kalloc.h>
57#include <kern/slab.h>
58#include <kern/mach_clock.h>
59#include <vm/vm_kern.h>
60#include <vm/vm_user.h>
61#include <ipc/ipc_kmsg.h>
62#include <ipc/ipc_port.h>
63#include <ipc/mach_msg.h>
64#include <ipc/mach_port.h>
65#include <machine/machspl.h> /* for splsched */
66#include <machine/pcb.h>
67#include <machine/thread.h> /* for MACHINE_STACK */
68
69thread_t active_threads[NCPUS1];
70vm_offset_t active_stacks[NCPUS1];
71
72struct kmem_cache thread_cache;
73
74queue_head_t reaper_queue;
75decl_simple_lock_data(, reaper_lock)
76
77extern void pcb_module_init(void);
78
79/* private */
80struct thread thread_template;
81
82#if MACH_DEBUG1
83#define STACK_MARKER0xdeadbeefU 0xdeadbeefU
84boolean_t stack_check_usage = FALSE((boolean_t) 0);
85decl_simple_lock_data(, stack_usage_lock)
86vm_size_t stack_max_usage = 0;
87#endif /* MACH_DEBUG */
88
89/*
90 * Machine-dependent code must define:
91 * pcb_init
92 * pcb_terminate
93 * pcb_collect
94 *
95 * The thread->pcb field is reserved for machine-dependent code.
96 */
97
98#ifdef MACHINE_STACK
99/*
100 * Machine-dependent code must define:
101 * stack_alloc_try
102 * stack_alloc
103 * stack_free
104 * stack_handoff
105 * stack_collect
106 * and if MACH_DEBUG:
107 * stack_statistics
108 */
109#else /* MACHINE_STACK */
110/*
111 * We allocate stacks from generic kernel VM.
112 * Machine-dependent code must define:
113 * stack_attach
114 * stack_detach
115 * stack_handoff
116 *
117 * The stack_free_list can only be accessed at splsched,
118 * because stack_alloc_try/thread_invoke operate at splsched.
119 */
120
121decl_simple_lock_data(, stack_lock_data)/* splsched only */
122#define stack_lock() simple_lock(&stack_lock_data)
123#define stack_unlock() simple_unlock(&stack_lock_data)
124
125vm_offset_t stack_free_list; /* splsched only */
126unsigned int stack_free_count = 0; /* splsched only */
127unsigned int stack_free_limit = 1; /* patchable */
128
129unsigned int stack_alloc_hits = 0; /* debugging */
130unsigned int stack_alloc_misses = 0; /* debugging */
131unsigned int stack_alloc_max = 0; /* debugging */
132
133/*
134 * The next field is at the base of the stack,
135 * so the low end is left unsullied.
136 */
137
138#define stack_next(stack)(*((vm_offset_t *)((stack) + (1*4096)) - 1)) (*((vm_offset_t *)((stack) + KERNEL_STACK_SIZE(1*4096)) - 1))
139
140/*
141 * stack_alloc_try:
142 *
143 * Non-blocking attempt to allocate a kernel stack.
144 * Called at splsched with the thread locked.
145 */
146
147boolean_t stack_alloc_try(
148 thread_t thread,
149 void (*resume)(thread_t))
150{
151 vm_offset_t stack;
152
153 stack_lock();
154 stack = stack_free_list;
155 if (stack != 0) {
156 stack_free_list = stack_next(stack)(*((vm_offset_t *)((stack) + (1*4096)) - 1));
157 stack_free_count--;
158 } else {
159 stack = thread->stack_privilege;
160 }
161 stack_unlock();
162
163 if (stack != 0) {
164 stack_attach(thread, stack, resume);
165 stack_alloc_hits++;
166 return TRUE((boolean_t) 1);
167 } else {
168 stack_alloc_misses++;
169 return FALSE((boolean_t) 0);
170 }
171}
172
173/*
174 * stack_alloc:
175 *
176 * Allocate a kernel stack for a thread.
177 * May block.
178 */
179
180void stack_alloc(
181 thread_t thread,
182 void (*resume)(thread_t))
183{
184 vm_offset_t stack;
185 spl_t s;
186
187 /*
188 * We first try the free list. It is probably empty,
189 * or stack_alloc_try would have succeeded, but possibly
190 * a stack was freed before the swapin thread got to us.
191 */
192
193 s = splsched();
194 stack_lock();
195 stack = stack_free_list;
196 if (stack != 0) {
197 stack_free_list = stack_next(stack)(*((vm_offset_t *)((stack) + (1*4096)) - 1));
198 stack_free_count--;
199 }
200 stack_unlock();
201 (void) splx(s);
202
203 if (stack == 0) {
204 /*
205 * Kernel stacks should be naturally aligned,
206 * so that it is easy to find the starting/ending
207 * addresses of a stack given an address in the middle.
208 */
209
210 if (kmem_alloc_aligned(kmem_map, &stack, KERNEL_STACK_SIZE(1*4096))
211 != KERN_SUCCESS0)
212 panic("stack_alloc");
213
214#if MACH_DEBUG1
215 stack_init(stack);
216#endif /* MACH_DEBUG */
217 }
218
219 stack_attach(thread, stack, resume);
220}
221
222/*
223 * stack_free:
224 *
225 * Free a thread's kernel stack.
226 * Called at splsched with the thread locked.
227 */
228
229void stack_free(
230 thread_t thread)
231{
232 vm_offset_t stack;
233
234 stack = stack_detach(thread);
235
236 if (stack != thread->stack_privilege) {
237 stack_lock();
238 stack_next(stack)(*((vm_offset_t *)((stack) + (1*4096)) - 1)) = stack_free_list;
239 stack_free_list = stack;
240 if (++stack_free_count > stack_alloc_max)
241 stack_alloc_max = stack_free_count;
242 stack_unlock();
243 }
244}
245
246/*
247 * stack_collect:
248 *
249 * Free excess kernel stacks.
250 * May block.
251 */
252
253void stack_collect(void)
254{
255 vm_offset_t stack;
256 spl_t s;
257
258 s = splsched();
259 stack_lock();
260 while (stack_free_count > stack_free_limit) {
261 stack = stack_free_list;
262 stack_free_list = stack_next(stack)(*((vm_offset_t *)((stack) + (1*4096)) - 1));
263 stack_free_count--;
264 stack_unlock();
265 (void) splx(s);
266
267#if MACH_DEBUG1
268 stack_finalize(stack);
269#endif /* MACH_DEBUG */
270 kmem_free(kmem_map, stack, KERNEL_STACK_SIZE(1*4096));
271
272 s = splsched();
273 stack_lock();
274 }
275 stack_unlock();
276 (void) splx(s);
277}
278#endif /* MACHINE_STACK */
279
280/*
281 * stack_privilege:
282 *
283 * stack_alloc_try on this thread must always succeed.
284 */
285
286void stack_privilege(
287 thread_t thread)
288{
289 /*
290 * This implementation only works for the current thread.
291 */
292
293 if (thread != current_thread()(active_threads[(0)]))
294 panic("stack_privilege");
295
296 if (thread->stack_privilege == 0)
297 thread->stack_privilege = current_stack()(active_stacks[(0)]);
298}
299
300void thread_init(void)
301{
302 kmem_cache_init(&thread_cache, "thread", sizeof(struct thread), 0,
303 NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0);
304
305 /*
306 * Fill in a template thread for fast initialization.
307 * [Fields that must be (or are typically) reset at
308 * time of creation are so noted.]
309 */
310
311 /* thread_template.links (none) */
312 thread_template.runq = RUN_QUEUE_NULL((run_queue_t) 0);
313
314 /* thread_template.task (later) */
315 /* thread_template.thread_list (later) */
316 /* thread_template.pset_threads (later) */
317
318 /* thread_template.lock (later) */
319 /* one ref for being alive; one for the guy who creates the thread */
320 thread_template.ref_count = 2;
321
322 thread_template.pcb = (pcb_t) 0; /* (reset) */
323 thread_template.kernel_stack = (vm_offset_t) 0;
324 thread_template.stack_privilege = (vm_offset_t) 0;
325
326 thread_template.wait_event = 0;
327 /* thread_template.suspend_count (later) */
328 thread_template.wait_result = KERN_SUCCESS0;
329 thread_template.wake_active = FALSE((boolean_t) 0);
330 thread_template.state = TH_SUSP0x02 | TH_SWAPPED0x0100;
331 thread_template.swap_func = thread_bootstrap_return;
332
333/* thread_template.priority (later) */
334 thread_template.max_priority = BASEPRI_USER25;
335/* thread_template.sched_pri (later - compute_priority) */
336#if MACH_FIXPRI1
337 thread_template.sched_data = 0;
338 thread_template.policy = POLICY_TIMESHARE1;
339#endif /* MACH_FIXPRI */
340 thread_template.depress_priority = -1;
341 thread_template.cpu_usage = 0;
342 thread_template.sched_usage = 0;
343 /* thread_template.sched_stamp (later) */
344
345 thread_template.recover = (vm_offset_t) 0;
346 thread_template.vm_privilege = FALSE((boolean_t) 0);
347
348 thread_template.user_stop_count = 1;
349
350 /* thread_template.<IPC structures> (later) */
351
352 timer_init(&(thread_template.user_timer));
353 timer_init(&(thread_template.system_timer));
354 thread_template.user_timer_save.low = 0;
355 thread_template.user_timer_save.high = 0;
356 thread_template.system_timer_save.low = 0;
357 thread_template.system_timer_save.high = 0;
358 thread_template.cpu_delta = 0;
359 thread_template.sched_delta = 0;
360
361 thread_template.active = FALSE((boolean_t) 0); /* reset */
362 thread_template.ast = AST_ZILCH0x0;
363
364 /* thread_template.processor_set (later) */
365 thread_template.bound_processor = PROCESSOR_NULL((processor_t) 0);
366#if MACH_HOST0
367 thread_template.may_assign = TRUE((boolean_t) 1);
368 thread_template.assign_active = FALSE((boolean_t) 0);
369#endif /* MACH_HOST */
370
371#if NCPUS1 > 1
372 /* thread_template.last_processor (later) */
373#endif /* NCPUS > 1 */
374
375 /*
376 * Initialize other data structures used in
377 * this module.
378 */
379
380 queue_init(&reaper_queue)((&reaper_queue)->next = (&reaper_queue)->prev =
&reaper_queue)
;
381 simple_lock_init(&reaper_lock);
382
383#ifndef MACHINE_STACK
384 simple_lock_init(&stack_lock_data);
385#endif /* MACHINE_STACK */
386
387#if MACH_DEBUG1
388 simple_lock_init(&stack_usage_lock);
389#endif /* MACH_DEBUG */
390
391 /*
392 * Initialize any machine-dependent
393 * per-thread structures necessary.
394 */
395
396 pcb_module_init();
397}
398
399kern_return_t thread_create(
400 task_t parent_task,
401 thread_t *child_thread) /* OUT */
402{
403 thread_t new_thread;
404 processor_set_t pset;
405
406 if (parent_task == TASK_NULL((task_t) 0))
3
Assuming 'parent_task' is not equal to null
4
Taking false branch
407 return KERN_INVALID_ARGUMENT4;
408
409 /*
410 * Allocate a thread and initialize static fields
411 */
412
413 new_thread = (thread_t) kmem_cache_alloc(&thread_cache);
414
415 if (new_thread == THREAD_NULL((thread_t) 0))
5
Assuming 'new_thread' is not equal to null
6
Taking false branch
416 return KERN_RESOURCE_SHORTAGE6;
417
418 *new_thread = thread_template;
419
420 record_time_stamp (&new_thread->creation_time);
421
422 /*
423 * Initialize runtime-dependent fields
424 */
425
426 new_thread->task = parent_task;
427 simple_lock_init(&new_thread->lock);
428 new_thread->sched_stamp = sched_tick;
429 thread_timeout_setup(new_thread);
430
431 /*
432 * Create a pcb. The kernel stack is created later,
433 * when the thread is swapped-in.
434 */
435 pcb_init(new_thread);
436
437 ipc_thread_init(new_thread);
438
439 /*
440 * Find the processor set for the parent task.
441 */
442 task_lock(parent_task);
443 pset = parent_task->processor_set;
444 pset_reference(pset);
445 task_unlock(parent_task);
446
447 /*
448 * Lock both the processor set and the task,
449 * so that the thread can be added to both
450 * simultaneously. Processor set must be
451 * locked first.
452 */
453
454 Restart:
455 pset_lock(pset);
456 task_lock(parent_task);
457
458 /*
459 * If the task has changed processor sets,
460 * catch up (involves lots of lock juggling).
461 */
462 {
463 processor_set_t cur_pset;
464
465 cur_pset = parent_task->processor_set;
466 if (!cur_pset->active)
7
Taking false branch
467 cur_pset = &default_pset;
468
469 if (cur_pset != pset) {
8
Taking false branch
470 pset_reference(cur_pset);
471 task_unlock(parent_task);
472 pset_unlock(pset);
473 pset_deallocate(pset);
474 pset = cur_pset;
475 goto Restart;
476 }
477 }
478
479 /*
480 * Set the thread`s priority from the pset and task.
481 */
482
483 new_thread->priority = parent_task->priority;
484 if (pset->max_priority > new_thread->max_priority)
9
Taking false branch
485 new_thread->max_priority = pset->max_priority;
486 if (new_thread->max_priority > new_thread->priority)
10
Taking false branch
487 new_thread->priority = new_thread->max_priority;
488 /*
489 * Don't need to lock thread here because it can't
490 * possibly execute and no one else knows about it.
491 */
492 compute_priority(new_thread, TRUE((boolean_t) 1));
493
494 /*
495 * Thread is suspended if the task is. Add 1 to
496 * suspend count since thread is created in suspended
497 * state.
498 */
499 new_thread->suspend_count = parent_task->suspend_count + 1;
500
501 /*
502 * Add the thread to the processor set.
503 * If the pset is empty, suspend the thread again.
504 */
505
506 pset_add_thread(pset, new_thread);
507 if (pset->empty)
11
Taking false branch
508 new_thread->suspend_count++;
509
510#if HW_FOOTPRINT0
511 /*
512 * Need to set last_processor, idle processor would be best, but
513 * that requires extra locking nonsense. Go for tail of
514 * processors queue to avoid master.
515 */
516 if (!pset->empty) {
517 new_thread->last_processor =
518 (processor_t)queue_first(&pset->processors)((&pset->processors)->next);
519 }
520 else {
521 /*
522 * Thread created in empty processor set. Pick
523 * master processor as an acceptable legal value.
524 */
525 new_thread->last_processor = master_processor;
526 }
527#else /* HW_FOOTPRINT */
528 /*
529 * Don't need to initialize because the context switch
530 * code will set it before it can be used.
531 */
532#endif /* HW_FOOTPRINT */
533
534#if MACH_PCSAMPLE1
535 new_thread->pc_sample.seqno = 0;
536 new_thread->pc_sample.sampletypes = 0;
537#endif /* MACH_PCSAMPLE */
538
539 new_thread->pc_sample.buffer = 0;
540 /*
541 * Add the thread to the task`s list of threads.
542 * The new thread holds another reference to the task.
543 */
544
545 parent_task->ref_count++;
546
547 parent_task->thread_count++;
548 queue_enter(&parent_task->thread_list, new_thread, thread_t,{ queue_entry_t prev; prev = (&parent_task->thread_list
)->prev; if ((&parent_task->thread_list) == prev) {
(&parent_task->thread_list)->next = (queue_entry_t
) (new_thread); } else { ((thread_t)prev)->thread_list.next
= (queue_entry_t)(new_thread); } (new_thread)->thread_list
.prev = prev; (new_thread)->thread_list.next = &parent_task
->thread_list; (&parent_task->thread_list)->prev
= (queue_entry_t) new_thread; }
12
Within the expansion of the macro 'queue_enter':
549 thread_list){ queue_entry_t prev; prev = (&parent_task->thread_list
)->prev; if ((&parent_task->thread_list) == prev) {
(&parent_task->thread_list)->next = (queue_entry_t
) (new_thread); } else { ((thread_t)prev)->thread_list.next
= (queue_entry_t)(new_thread); } (new_thread)->thread_list
.prev = prev; (new_thread)->thread_list.next = &parent_task
->thread_list; (&parent_task->thread_list)->prev
= (queue_entry_t) new_thread; }
;
550
551 /*
552 * Finally, mark the thread active.
553 */
554
555 new_thread->active = TRUE((boolean_t) 1);
556
557 if (!parent_task->active) {
13
Taking true branch
558 task_unlock(parent_task);
559 pset_unlock(pset);
560 (void) thread_terminate(new_thread);
14
Calling 'thread_terminate'
20
Returning from 'thread_terminate'
561 /* release ref we would have given our caller */
562 thread_deallocate(new_thread);
21
Calling 'thread_deallocate'
24
Returning from 'thread_deallocate'
563 return KERN_FAILURE5;
564 }
565 task_unlock(parent_task);
566 pset_unlock(pset);
567
568 ipc_thread_enable(new_thread);
569
570 *child_thread = new_thread;
571 return KERN_SUCCESS0;
572}
573
574unsigned int thread_deallocate_stack = 0;
575
576void thread_deallocate(
577 thread_t thread)
578{
579 spl_t s;
580 task_t task;
581 processor_set_t pset;
582
583 time_value_t user_time, system_time;
584
585 if (thread == THREAD_NULL((thread_t) 0))
22
Taking false branch
586 return;
587
588 /*
589 * First, check for new count > 0 (the common case).
590 * Only the thread needs to be locked.
591 */
592 s = splsched();
593 thread_lock(thread);
594 if (--thread->ref_count > 0) {
23
Taking true branch
595 thread_unlock(thread);
596 (void) splx(s);
597 return;
598 }
599
600 /*
601 * Count is zero. However, the task's and processor set's
602 * thread lists have implicit references to
603 * the thread, and may make new ones. Their locks also
604 * dominate the thread lock. To check for this, we
605 * temporarily restore the one thread reference, unlock
606 * the thread, and then lock the other structures in
607 * the proper order.
608 */
609 thread->ref_count = 1;
610 thread_unlock(thread);
611 (void) splx(s);
612
613 pset = thread->processor_set;
614 pset_lock(pset);
615
616#if MACH_HOST0
617 /*
618 * The thread might have moved.
619 */
620 while (pset != thread->processor_set) {
621 pset_unlock(pset);
622 pset = thread->processor_set;
623 pset_lock(pset);
624 }
625#endif /* MACH_HOST */
626
627 task = thread->task;
628 task_lock(task);
629
630 s = splsched();
631 thread_lock(thread);
632
633 if (--thread->ref_count > 0) {
634 /*
635 * Task or processor_set made extra reference.
636 */
637 thread_unlock(thread);
638 (void) splx(s);
639 task_unlock(task);
640 pset_unlock(pset);
641 return;
642 }
643
644 /*
645 * Thread has no references - we can remove it.
646 */
647
648 /*
649 * Remove pending timeouts.
650 */
651 reset_timeout_check(&thread->timer)({ if ((&thread->timer)->set) reset_timeout((&thread
->timer)); })
;
652
653 reset_timeout_check(&thread->depress_timer)({ if ((&thread->depress_timer)->set) reset_timeout
((&thread->depress_timer)); })
;
654 thread->depress_priority = -1;
655
656 /*
657 * Accumulate times for dead threads in task.
658 */
659 thread_read_times(thread, &user_time, &system_time);
660 time_value_add(&task->total_user_time, &user_time){ (&task->total_user_time)->microseconds += (&user_time
)->microseconds; (&task->total_user_time)->seconds
+= (&user_time)->seconds; if ((&task->total_user_time
)->microseconds >= (1000000)) { (&task->total_user_time
)->microseconds -= (1000000); (&task->total_user_time
)->seconds++; } }
;
661 time_value_add(&task->total_system_time, &system_time){ (&task->total_system_time)->microseconds += (&
system_time)->microseconds; (&task->total_system_time
)->seconds += (&system_time)->seconds; if ((&task
->total_system_time)->microseconds >= (1000000)) { (
&task->total_system_time)->microseconds -= (1000000
); (&task->total_system_time)->seconds++; } }
;
662
663 /*
664 * Remove thread from task list and processor_set threads list.
665 */
666 task->thread_count--;
667 queue_remove(&task->thread_list, thread, thread_t, thread_list){ queue_entry_t next, prev; next = (thread)->thread_list.next
; prev = (thread)->thread_list.prev; if ((&task->thread_list
) == next) (&task->thread_list)->prev = prev; else (
(thread_t)next)->thread_list.prev = prev; if ((&task->
thread_list) == prev) (&task->thread_list)->next = next
; else ((thread_t)prev)->thread_list.next = next; }
;
668
669 pset_remove_thread(pset, thread);
670
671 thread_unlock(thread); /* no more references - safe */
672 (void) splx(s);
673 task_unlock(task);
674 pset_unlock(pset);
675 pset_deallocate(pset);
676
677 /*
678 * A couple of quick sanity checks
679 */
680
681 if (thread == current_thread()(active_threads[(0)])) {
682 panic("thread deallocating itself");
683 }
684 if ((thread->state & ~(TH_RUN0x04 | TH_HALTED0x10 | TH_SWAPPED0x0100)) != TH_SUSP0x02)
685 panic("unstopped thread destroyed!");
686
687 /*
688 * Deallocate the task reference, since we know the thread
689 * is not running.
690 */
691 task_deallocate(thread->task); /* may block */
692
693 /*
694 * Clean up any machine-dependent resources.
695 */
696 if ((thread->state & TH_SWAPPED0x0100) == 0) {
697 splsched();
698 stack_free(thread);
699 (void) splx(s);
700 thread_deallocate_stack++;
701 }
702 /*
703 * Rattle the event count machinery (gag)
704 */
705 evc_notify_abort(thread);
706
707 pcb_terminate(thread);
708 kmem_cache_free(&thread_cache, (vm_offset_t) thread);
709}
710
711void thread_reference(
712 thread_t thread)
713{
714 spl_t s;
715
716 if (thread == THREAD_NULL((thread_t) 0))
717 return;
718
719 s = splsched();
720 thread_lock(thread);
721 thread->ref_count++;
722 thread_unlock(thread);
723 (void) splx(s);
724}
725
726/*
727 * thread_terminate:
728 *
729 * Permanently stop execution of the specified thread.
730 *
731 * A thread to be terminated must be allowed to clean up any state
732 * that it has before it exits. The thread is broken out of any
733 * wait condition that it is in, and signalled to exit. It then
734 * cleans up its state and calls thread_halt_self on its way out of
735 * the kernel. The caller waits for the thread to halt, terminates
736 * its IPC state, and then deallocates it.
737 *
738 * If the caller is the current thread, it must still exit the kernel
739 * to clean up any state (thread and port references, messages, etc).
740 * When it exits the kernel, it then terminates its IPC state and
741 * queues itself for the reaper thread, which will wait for the thread
742 * to stop and then deallocate it. (A thread cannot deallocate itself,
743 * since it needs a kernel stack to execute.)
744 */
745kern_return_t thread_terminate(
746 thread_t thread)
747{
748 thread_t cur_thread = current_thread()(active_threads[(0)]);
749 task_t cur_task;
750 spl_t s;
751
752 if (thread == THREAD_NULL((thread_t) 0))
15
Taking false branch
753 return KERN_INVALID_ARGUMENT4;
754
755 /*
756 * Break IPC control over the thread.
757 */
758 ipc_thread_disable(thread);
759
760 if (thread == cur_thread) {
16
Taking false branch
761
762 /*
763 * Current thread will queue itself for reaper when
764 * exiting kernel.
765 */
766 s = splsched();
767 thread_lock(thread);
768 if (thread->active) {
769 thread->active = FALSE((boolean_t) 0);
770 thread_ast_set(thread, AST_TERMINATE)(thread)->ast |= (0x2);
771 }
772 thread_unlock(thread);
773 ast_on(cpu_number(), AST_TERMINATE)({ if ((need_ast[(0)] |= (0x2)) != 0x0) { ; } });
774 splx(s);
775 return KERN_SUCCESS0;
776 }
777
778 /*
779 * Lock both threads and the current task
780 * to check termination races and prevent deadlocks.
781 */
782 cur_task = current_task()((active_threads[(0)])->task);
783 task_lock(cur_task);
784 s = splsched();
785 if ((vm_offset_t)thread < (vm_offset_t)cur_thread) {
17
Taking false branch
786 thread_lock(thread);
787 thread_lock(cur_thread);
788 }
789 else {
790 thread_lock(cur_thread);
791 thread_lock(thread);
792 }
793
794 /*
795 * If the current thread is being terminated, help out.
796 */
797 if ((!cur_task->active) || (!cur_thread->active)) {
18
Taking false branch
798 thread_unlock(cur_thread);
799 thread_unlock(thread);
800 (void) splx(s);
801 task_unlock(cur_task);
802 thread_terminate(cur_thread);
803 return KERN_FAILURE5;
804 }
805
806 thread_unlock(cur_thread);
807 task_unlock(cur_task);
808
809 /*
810 * Terminate victim thread.
811 */
812 if (!thread->active) {
19
Taking true branch
813 /*
814 * Someone else got there first.
815 */
816 thread_unlock(thread);
817 (void) splx(s);
818 return KERN_FAILURE5;
819 }
820
821 thread->active = FALSE((boolean_t) 0);
822
823 thread_unlock(thread);
824 (void) splx(s);
825
826#if MACH_HOST0
827 /*
828 * Reassign thread to default pset if needed.
829 */
830 thread_freeze(thread);
831 if (thread->processor_set != &default_pset) {
832 thread_doassign(thread, &default_pset, FALSE((boolean_t) 0));
833 }
834#endif /* MACH_HOST */
835
836 /*
837 * Halt the victim at the clean point.
838 */
839 (void) thread_halt(thread, TRUE((boolean_t) 1));
840#if MACH_HOST0
841 thread_unfreeze(thread);
842#endif /* MACH_HOST */
843 /*
844 * Shut down the victims IPC and deallocate its
845 * reference to itself.
846 */
847 ipc_thread_terminate(thread);
848 thread_deallocate(thread);
849 return KERN_SUCCESS0;
850}
851
852kern_return_t thread_terminate_release(
853 thread_t thread,
854 task_t task,
855 mach_port_t thread_name,
856 mach_port_t reply_port,
857 vm_offset_t address,
858 vm_size_t size)
859{
860 if (task == NULL((void *) 0))
861 return KERN_INVALID_ARGUMENT4;
862
863 mach_port_deallocate(task->itk_space, thread_name);
864
865 if (reply_port != MACH_PORT_NULL((mach_port_t) 0))
866 mach_port_destroy(task->itk_space, reply_port);
867
868 if ((address != 0) || (size != 0))
869 vm_deallocate(task->map, address, size);
870
871 return thread_terminate(thread);
872}
873
874/*
875 * thread_force_terminate:
876 *
877 * Version of thread_terminate called by task_terminate. thread is
878 * not the current thread. task_terminate is the dominant operation,
879 * so we can force this thread to stop.
880 */
881void
882thread_force_terminate(
883 thread_t thread)
884{
885 boolean_t deallocate_here;
886 spl_t s;
887
888 ipc_thread_disable(thread);
889
890#if MACH_HOST0
891 /*
892 * Reassign thread to default pset if needed.
893 */
894 thread_freeze(thread);
895 if (thread->processor_set != &default_pset)
896 thread_doassign(thread, &default_pset, FALSE((boolean_t) 0));
897#endif /* MACH_HOST */
898
899 s = splsched();
900 thread_lock(thread);
901 deallocate_here = thread->active;
902 thread->active = FALSE((boolean_t) 0);
903 thread_unlock(thread);
904 (void) splx(s);
905
906 (void) thread_halt(thread, TRUE((boolean_t) 1));
907 ipc_thread_terminate(thread);
908
909#if MACH_HOST0
910 thread_unfreeze(thread);
911#endif /* MACH_HOST */
912
913 if (deallocate_here)
914 thread_deallocate(thread);
915}
916
917
918/*
919 * Halt a thread at a clean point, leaving it suspended.
920 *
921 * must_halt indicates whether thread must halt.
922 *
923 */
924kern_return_t thread_halt(
925 thread_t thread,
926 boolean_t must_halt)
927{
928 thread_t cur_thread = current_thread()(active_threads[(0)]);
929 kern_return_t ret;
930 spl_t s;
931
932 if (thread == cur_thread)
933 panic("thread_halt: trying to halt current thread.");
934 /*
935 * If must_halt is FALSE, then a check must be made for
936 * a cycle of halt operations.
937 */
938 if (!must_halt) {
939 /*
940 * Grab both thread locks.
941 */
942 s = splsched();
943 if ((vm_offset_t)thread < (vm_offset_t)cur_thread) {
944 thread_lock(thread);
945 thread_lock(cur_thread);
946 }
947 else {
948 thread_lock(cur_thread);
949 thread_lock(thread);
950 }
951
952 /*
953 * If target thread is already halted, grab a hold
954 * on it and return.
955 */
956 if (thread->state & TH_HALTED0x10) {
957 thread->suspend_count++;
958 thread_unlock(cur_thread);
959 thread_unlock(thread);
960 (void) splx(s);
961 return KERN_SUCCESS0;
962 }
963
964 /*
965 * If someone is trying to halt us, we have a potential
966 * halt cycle. Break the cycle by interrupting anyone
967 * who is trying to halt us, and causing this operation
968 * to fail; retry logic will only retry operations
969 * that cannot deadlock. (If must_halt is TRUE, this
970 * operation can never cause a deadlock.)
971 */
972 if (cur_thread->ast & AST_HALT0x1) {
973 thread_wakeup_with_result((event_t)&cur_thread->wake_active,thread_wakeup_prim(((event_t)&cur_thread->wake_active)
, ((boolean_t) 0), (2))
974 THREAD_INTERRUPTED)thread_wakeup_prim(((event_t)&cur_thread->wake_active)
, ((boolean_t) 0), (2))
;
975 thread_unlock(thread);
976 thread_unlock(cur_thread);
977 (void) splx(s);
978 return KERN_FAILURE5;
979 }
980
981 thread_unlock(cur_thread);
982
983 }
984 else {
985 /*
986 * Lock thread and check whether it is already halted.
987 */
988 s = splsched();
989 thread_lock(thread);
990 if (thread->state & TH_HALTED0x10) {
991 thread->suspend_count++;
992 thread_unlock(thread);
993 (void) splx(s);
994 return KERN_SUCCESS0;
995 }
996 }
997
998 /*
999 * Suspend thread - inline version of thread_hold() because
1000 * thread is already locked.
1001 */
1002 thread->suspend_count++;
1003 thread->state |= TH_SUSP0x02;
1004
1005 /*
1006 * If someone else is halting it, wait for that to complete.
1007 * Fail if wait interrupted and must_halt is false.
1008 */
1009 while ((thread->ast & AST_HALT0x1) && (!(thread->state & TH_HALTED0x10))) {
1010 thread->wake_active = TRUE((boolean_t) 1);
1011 thread_sleep((event_t) &thread->wake_active,
1012 simple_lock_addr(thread->lock)((simple_lock_t)0), TRUE((boolean_t) 1));
1013
1014 if (thread->state & TH_HALTED0x10) {
1015 (void) splx(s);
1016 return KERN_SUCCESS0;
1017 }
1018 if ((current_thread()(active_threads[(0)])->wait_result != THREAD_AWAKENED0)
1019 && !(must_halt)) {
1020 (void) splx(s);
1021 thread_release(thread);
1022 return KERN_FAILURE5;
1023 }
1024 thread_lock(thread);
1025 }
1026
1027 /*
1028 * Otherwise, have to do it ourselves.
1029 */
1030
1031 thread_ast_set(thread, AST_HALT)(thread)->ast |= (0x1);
1032
1033 while (TRUE((boolean_t) 1)) {
1034 /*
1035 * Wait for thread to stop.
1036 */
1037 thread_unlock(thread);
1038 (void) splx(s);
1039
1040 ret = thread_dowait(thread, must_halt);
1041
1042 /*
1043 * If the dowait failed, so do we. Drop AST_HALT, and
1044 * wake up anyone else who might be waiting for it.
1045 */
1046 if (ret != KERN_SUCCESS0) {
1047 s = splsched();
1048 thread_lock(thread);
1049 thread_ast_clear(thread, AST_HALT)(thread)->ast &= ~(0x1);
1050 thread_wakeup_with_result((event_t)&thread->wake_active,thread_wakeup_prim(((event_t)&thread->wake_active), ((
boolean_t) 0), (2))
1051 THREAD_INTERRUPTED)thread_wakeup_prim(((event_t)&thread->wake_active), ((
boolean_t) 0), (2))
;
1052 thread_unlock(thread);
1053 (void) splx(s);
1054
1055 thread_release(thread);
1056 return ret;
1057 }
1058
1059 /*
1060 * Clear any interruptible wait.
1061 */
1062 clear_wait(thread, THREAD_INTERRUPTED2, TRUE((boolean_t) 1));
1063
1064 /*
1065 * If the thread's at a clean point, we're done.
1066 * Don't need a lock because it really is stopped.
1067 */
1068 if (thread->state & TH_HALTED0x10) {
1069 return KERN_SUCCESS0;
1070 }
1071
1072 /*
1073 * If the thread is at a nice continuation,
1074 * or a continuation with a cleanup routine,
1075 * call the cleanup routine.
1076 */
1077 if ((((thread->swap_func == mach_msg_continue) ||
1078 (thread->swap_func == mach_msg_receive_continue)) &&
1079 mach_msg_interrupt(thread)) ||
1080 (thread->swap_func == thread_exception_return) ||
1081 (thread->swap_func == thread_bootstrap_return)) {
1082 s = splsched();
1083 thread_lock(thread);
1084 thread->state |= TH_HALTED0x10;
1085 thread_ast_clear(thread, AST_HALT)(thread)->ast &= ~(0x1);
1086 thread_unlock(thread);
1087 splx(s);
1088
1089 return KERN_SUCCESS0;
1090 }
1091
1092 /*
1093 * Force the thread to stop at a clean
1094 * point, and arrange to wait for it.
1095 *
1096 * Set it running, so it can notice. Override
1097 * the suspend count. We know that the thread
1098 * is suspended and not waiting.
1099 *
1100 * Since the thread may hit an interruptible wait
1101 * before it reaches a clean point, we must force it
1102 * to wake us up when it does so. This involves some
1103 * trickery:
1104 * We mark the thread SUSPENDED so that thread_block
1105 * will suspend it and wake us up.
1106 * We mark the thread RUNNING so that it will run.
1107 * We mark the thread UN-INTERRUPTIBLE (!) so that
1108 * some other thread trying to halt or suspend it won't
1109 * take it off the run queue before it runs. Since
1110 * dispatching a thread (the tail of thread_invoke) marks
1111 * the thread interruptible, it will stop at the next
1112 * context switch or interruptible wait.
1113 */
1114
1115 s = splsched();
1116 thread_lock(thread);
1117 if ((thread->state & TH_SCHED_STATE(0x01|0x02|0x04|0x08)) != TH_SUSP0x02)
1118 panic("thread_halt");
1119 thread->state |= TH_RUN0x04 | TH_UNINT0x08;
1120 thread_setrun(thread, FALSE((boolean_t) 0));
1121
1122 /*
1123 * Continue loop and wait for thread to stop.
1124 */
1125 }
1126}
1127
1128void walking_zombie(void)
1129{
1130 panic("the zombie walks!");
1131}
1132
1133/*
1134 * Thread calls this routine on exit from the kernel when it
1135 * notices a halt request.
1136 */
1137void thread_halt_self(void)
1138{
1139 thread_t thread = current_thread()(active_threads[(0)]);
1140 spl_t s;
1141
1142 if (thread->ast & AST_TERMINATE0x2) {
1143 /*
1144 * Thread is terminating itself. Shut
1145 * down IPC, then queue it up for the
1146 * reaper thread.
1147 */
1148 ipc_thread_terminate(thread);
1149
1150 thread_hold(thread);
1151
1152 s = splsched();
1153 simple_lock(&reaper_lock);
1154 enqueue_tail(&reaper_queue, (queue_entry_t) thread);
1155 simple_unlock(&reaper_lock);
1156
1157 thread_lock(thread);
1158 thread->state |= TH_HALTED0x10;
1159 thread_unlock(thread);
1160 (void) splx(s);
1161
1162 thread_wakeup((event_t)&reaper_queue)thread_wakeup_prim(((event_t)&reaper_queue), ((boolean_t)
0), 0)
;
1163 counter(c_thread_halt_self_block++);
1164 thread_block(walking_zombie);
1165 /*NOTREACHED*/
1166 } else {
1167 /*
1168 * Thread was asked to halt - show that it
1169 * has done so.
1170 */
1171 s = splsched();
1172 thread_lock(thread);
1173 thread->state |= TH_HALTED0x10;
1174 thread_ast_clear(thread, AST_HALT)(thread)->ast &= ~(0x1);
1175 thread_unlock(thread);
1176 splx(s);
1177 counter(c_thread_halt_self_block++);
1178 thread_block(thread_exception_return);
1179 /*
1180 * thread_release resets TH_HALTED.
1181 */
1182 }
1183}
1184
1185/*
1186 * thread_hold:
1187 *
1188 * Suspend execution of the specified thread.
1189 * This is a recursive-style suspension of the thread, a count of
1190 * suspends is maintained.
1191 */
1192void thread_hold(
1193 thread_t thread)
1194{
1195 spl_t s;
1196
1197 s = splsched();
1198 thread_lock(thread);
1199 thread->suspend_count++;
1200 thread->state |= TH_SUSP0x02;
1201 thread_unlock(thread);
1202 (void) splx(s);
1203}
1204
1205/*
1206 * thread_dowait:
1207 *
1208 * Wait for a thread to actually enter stopped state.
1209 *
1210 * must_halt argument indicates if this may fail on interruption.
1211 * This is FALSE only if called from thread_abort via thread_halt.
1212 */
1213kern_return_t
1214thread_dowait(
1215 thread_t thread,
1216 boolean_t must_halt)
1217{
1218 boolean_t need_wakeup;
1219 kern_return_t ret = KERN_SUCCESS0;
1220 spl_t s;
1221
1222 if (thread == current_thread()(active_threads[(0)]))
1223 panic("thread_dowait");
1224
1225 /*
1226 * If a thread is not interruptible, it may not be suspended
1227 * until it becomes interruptible. In this case, we wait for
1228 * the thread to stop itself, and indicate that we are waiting
1229 * for it to stop so that it can wake us up when it does stop.
1230 *
1231 * If the thread is interruptible, we may be able to suspend
1232 * it immediately. There are several cases:
1233 *
1234 * 1) The thread is already stopped (trivial)
1235 * 2) The thread is runnable (marked RUN and on a run queue).
1236 * We pull it off the run queue and mark it stopped.
1237 * 3) The thread is running. We wait for it to stop.
1238 */
1239
1240 need_wakeup = FALSE((boolean_t) 0);
1241 s = splsched();
1242 thread_lock(thread);
1243
1244 for (;;) {
1245 switch (thread->state & TH_SCHED_STATE(0x01|0x02|0x04|0x08)) {
1246 case TH_SUSP0x02:
1247 case TH_WAIT0x01 | TH_SUSP0x02:
1248 /*
1249 * Thread is already suspended, or sleeping in an
1250 * interruptible wait. We win!
1251 */
1252 break;
1253
1254 case TH_RUN0x04 | TH_SUSP0x02:
1255 /*
1256 * The thread is interruptible. If we can pull
1257 * it off a runq, stop it here.
1258 */
1259 if (rem_runq(thread) != RUN_QUEUE_NULL((run_queue_t) 0)) {
1260 thread->state &= ~TH_RUN0x04;
1261 need_wakeup = thread->wake_active;
1262 thread->wake_active = FALSE((boolean_t) 0);
1263 break;
1264 }
1265#if NCPUS1 > 1
1266 /*
1267 * The thread must be running, so make its
1268 * processor execute ast_check(). This
1269 * should cause the thread to take an ast and
1270 * context switch to suspend for us.
1271 */
1272 cause_ast_check(thread->last_processor);
1273#endif /* NCPUS > 1 */
1274
1275 /*
1276 * Fall through to wait for thread to stop.
1277 */
1278
1279 case TH_RUN0x04 | TH_SUSP0x02 | TH_UNINT0x08:
1280 case TH_RUN0x04 | TH_WAIT0x01 | TH_SUSP0x02:
1281 case TH_RUN0x04 | TH_WAIT0x01 | TH_SUSP0x02 | TH_UNINT0x08:
1282 case TH_WAIT0x01 | TH_SUSP0x02 | TH_UNINT0x08:
1283 /*
1284 * Wait for the thread to stop, or sleep interruptibly
1285 * (thread_block will stop it in the latter case).
1286 * Check for failure if interrupted.
1287 */
1288 thread->wake_active = TRUE((boolean_t) 1);
1289 thread_sleep((event_t) &thread->wake_active,
1290 simple_lock_addr(thread->lock)((simple_lock_t)0), TRUE((boolean_t) 1));
1291 thread_lock(thread);
1292 if ((current_thread()(active_threads[(0)])->wait_result != THREAD_AWAKENED0) &&
1293 !must_halt) {
1294 ret = KERN_FAILURE5;
1295 break;
1296 }
1297
1298 /*
1299 * Repeat loop to check thread`s state.
1300 */
1301 continue;
1302 }
1303 /*
1304 * Thread is stopped at this point.
1305 */
1306 break;
1307 }
1308
1309 thread_unlock(thread);
1310 (void) splx(s);
1311
1312 if (need_wakeup)
1313 thread_wakeup((event_t) &thread->wake_active)thread_wakeup_prim(((event_t) &thread->wake_active), (
(boolean_t) 0), 0)
;
1314
1315 return ret;
1316}
1317
1318void thread_release(
1319 thread_t thread)
1320{
1321 spl_t s;
1322
1323 s = splsched();
1324 thread_lock(thread);
1325 if (--thread->suspend_count == 0) {
1326 thread->state &= ~(TH_SUSP0x02 | TH_HALTED0x10);
1327 if ((thread->state & (TH_WAIT0x01 | TH_RUN0x04)) == 0) {
1328 /* was only suspended */
1329 thread->state |= TH_RUN0x04;
1330 thread_setrun(thread, TRUE((boolean_t) 1));
1331 }
1332 }
1333 thread_unlock(thread);
1334 (void) splx(s);
1335}
1336
1337kern_return_t thread_suspend(
1338 thread_t thread)
1339{
1340 boolean_t hold;
1341 spl_t spl;
1342
1343 if (thread == THREAD_NULL((thread_t) 0))
1344 return KERN_INVALID_ARGUMENT4;
1345
1346 hold = FALSE((boolean_t) 0);
1347 spl = splsched();
1348 thread_lock(thread);
1349 /* Wait for thread to get interruptible */
1350 while (thread->state & TH_UNINT0x08) {
1351 assert_wait(&thread->state, TRUE((boolean_t) 1));
1352 thread_unlock(thread);
1353 thread_block(NULL((void *) 0));
1354 thread_lock(thread);
1355 }
1356 if (thread->user_stop_count++ == 0) {
1357 hold = TRUE((boolean_t) 1);
1358 thread->suspend_count++;
1359 thread->state |= TH_SUSP0x02;
1360 }
1361 thread_unlock(thread);
1362 (void) splx(spl);
1363
1364 /*
1365 * Now wait for the thread if necessary.
1366 */
1367 if (hold) {
1368 if (thread == current_thread()(active_threads[(0)])) {
1369 /*
1370 * We want to call thread_block on our way out,
1371 * to stop running.
1372 */
1373 spl = splsched();
1374 ast_on(cpu_number(), AST_BLOCK)({ if ((need_ast[(0)] |= (0x4)) != 0x0) { ; } });
1375 (void) splx(spl);
1376 } else
1377 (void) thread_dowait(thread, TRUE((boolean_t) 1));
1378 }
1379 return KERN_SUCCESS0;
1380}
1381
1382
1383kern_return_t thread_resume(
1384 thread_t thread)
1385{
1386 kern_return_t ret;
1387 spl_t s;
1388
1389 if (thread == THREAD_NULL((thread_t) 0))
1390 return KERN_INVALID_ARGUMENT4;
1391
1392 ret = KERN_SUCCESS0;
1393
1394 s = splsched();
1395 thread_lock(thread);
1396 if (thread->user_stop_count > 0) {
1397 if (--thread->user_stop_count == 0) {
1398 if (--thread->suspend_count == 0) {
1399 thread->state &= ~(TH_SUSP0x02 | TH_HALTED0x10);
1400 if ((thread->state & (TH_WAIT0x01 | TH_RUN0x04)) == 0) {
1401 /* was only suspended */
1402 thread->state |= TH_RUN0x04;
1403 thread_setrun(thread, TRUE((boolean_t) 1));
1404 }
1405 }
1406 }
1407 }
1408 else {
1409 ret = KERN_FAILURE5;
1410 }
1411
1412 thread_unlock(thread);
1413 (void) splx(s);
1414
1415 return ret;
1416}
1417
1418/*
1419 * Return thread's machine-dependent state.
1420 */
1421kern_return_t thread_get_state(
1422 thread_t thread,
1423 int flavor,
1424 thread_state_t old_state, /* pointer to OUT array */
1425 natural_t *old_state_count) /*IN/OUT*/
1426{
1427 kern_return_t ret;
1428
1429 if (thread == THREAD_NULL((thread_t) 0) || thread == current_thread()(active_threads[(0)])) {
1430 return KERN_INVALID_ARGUMENT4;
1431 }
1432
1433 thread_hold(thread);
1434 (void) thread_dowait(thread, TRUE((boolean_t) 1));
1435
1436 ret = thread_getstatus(thread, flavor, old_state, old_state_count);
1437
1438 thread_release(thread);
1439 return ret;
1440}
1441
1442/*
1443 * Change thread's machine-dependent state.
1444 */
1445kern_return_t thread_set_state(
1446 thread_t thread,
1447 int flavor,
1448 thread_state_t new_state,
1449 natural_t new_state_count)
1450{
1451 kern_return_t ret;
1452
1453 if (thread == THREAD_NULL((thread_t) 0) || thread == current_thread()(active_threads[(0)])) {
1454 return KERN_INVALID_ARGUMENT4;
1455 }
1456
1457 thread_hold(thread);
1458 (void) thread_dowait(thread, TRUE((boolean_t) 1));
1459
1460 ret = thread_setstatus(thread, flavor, new_state, new_state_count);
1461
1462 thread_release(thread);
1463 return ret;
1464}
1465
1466kern_return_t thread_info(
1467 thread_t thread,
1468 int flavor,
1469 thread_info_t thread_info_out, /* pointer to OUT array */
1470 natural_t *thread_info_count) /*IN/OUT*/
1471{
1472 int state, flags;
1473 spl_t s;
1474
1475 if (thread == THREAD_NULL((thread_t) 0))
1476 return KERN_INVALID_ARGUMENT4;
1477
1478 if (flavor == THREAD_BASIC_INFO1) {
1479 thread_basic_info_t basic_info;
1480
1481 /* Allow *thread_info_count to be one smaller than the
1482 usual amount, because creation_time is a new member
1483 that some callers might not know about. */
1484
1485 if (*thread_info_count < THREAD_BASIC_INFO_COUNT(sizeof(thread_basic_info_data_t) / sizeof(natural_t)) - 1) {
1486 return KERN_INVALID_ARGUMENT4;
1487 }
1488
1489 basic_info = (thread_basic_info_t) thread_info_out;
1490
1491 s = splsched();
1492 thread_lock(thread);
1493
1494 /*
1495 * Update lazy-evaluated scheduler info because someone wants it.
1496 */
1497 if ((thread->state & TH_RUN0x04) == 0 &&
1498 thread->sched_stamp != sched_tick)
1499 update_priority(thread);
1500
1501 /* fill in info */
1502
1503 thread_read_times(thread,
1504 &basic_info->user_time,
1505 &basic_info->system_time);
1506 basic_info->base_priority = thread->priority;
1507 basic_info->cur_priority = thread->sched_pri;
1508 basic_info->creation_time = thread->creation_time;
1509
1510 /*
1511 * To calculate cpu_usage, first correct for timer rate,
1512 * then for 5/8 ageing. The correction factor [3/5] is
1513 * (1/(5/8) - 1).
1514 */
1515 basic_info->cpu_usage = thread->cpu_usage /
1516 (TIMER_RATE1000000/TH_USAGE_SCALE1000);
1517 basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5;
1518#if SIMPLE_CLOCK0
1519 /*
1520 * Clock drift compensation.
1521 */
1522 basic_info->cpu_usage =
1523 (basic_info->cpu_usage * 1000000)/sched_usec;
1524#endif /* SIMPLE_CLOCK */
1525
1526 flags = 0;
1527 if (thread->state & TH_SWAPPED0x0100)
1528 flags |= TH_FLAGS_SWAPPED0x1;
1529 if (thread->state & TH_IDLE0x80)
1530 flags |= TH_FLAGS_IDLE0x2;
1531
1532 if (thread->state & TH_HALTED0x10)
1533 state = TH_STATE_HALTED5;
1534 else
1535 if (thread->state & TH_RUN0x04)
1536 state = TH_STATE_RUNNING1;
1537 else
1538 if (thread->state & TH_UNINT0x08)
1539 state = TH_STATE_UNINTERRUPTIBLE4;
1540 else
1541 if (thread->state & TH_SUSP0x02)
1542 state = TH_STATE_STOPPED2;
1543 else
1544 if (thread->state & TH_WAIT0x01)
1545 state = TH_STATE_WAITING3;
1546 else
1547 state = 0; /* ? */
1548
1549 basic_info->run_state = state;
1550 basic_info->flags = flags;
1551 basic_info->suspend_count = thread->user_stop_count;
1552 if (state == TH_STATE_RUNNING1)
1553 basic_info->sleep_time = 0;
1554 else
1555 basic_info->sleep_time = sched_tick - thread->sched_stamp;
1556
1557 thread_unlock(thread);
1558 splx(s);
1559
1560 if (*thread_info_count > THREAD_BASIC_INFO_COUNT(sizeof(thread_basic_info_data_t) / sizeof(natural_t)))
1561 *thread_info_count = THREAD_BASIC_INFO_COUNT(sizeof(thread_basic_info_data_t) / sizeof(natural_t));
1562 return KERN_SUCCESS0;
1563 }
1564 else if (flavor == THREAD_SCHED_INFO2) {
1565 thread_sched_info_t sched_info;
1566
1567 if (*thread_info_count < THREAD_SCHED_INFO_COUNT(sizeof(thread_sched_info_data_t) / sizeof(natural_t))) {
1568 return KERN_INVALID_ARGUMENT4;
1569 }
1570
1571 sched_info = (thread_sched_info_t) thread_info_out;
1572
1573 s = splsched();
1574 thread_lock(thread);
1575
1576#if MACH_FIXPRI1
1577 sched_info->policy = thread->policy;
1578 if (thread->policy == POLICY_FIXEDPRI2) {
1579 sched_info->data = (thread->sched_data * tick)/1000;
1580 }
1581 else {
1582 sched_info->data = 0;
1583 }
1584#else /* MACH_FIXPRI */
1585 sched_info->policy = POLICY_TIMESHARE1;
1586 sched_info->data = 0;
1587#endif /* MACH_FIXPRI */
1588
1589 sched_info->base_priority = thread->priority;
1590 sched_info->max_priority = thread->max_priority;
1591 sched_info->cur_priority = thread->sched_pri;
1592
1593 sched_info->depressed = (thread->depress_priority >= 0);
1594 sched_info->depress_priority = thread->depress_priority;
1595
1596 thread_unlock(thread);
1597 splx(s);
1598
1599 *thread_info_count = THREAD_SCHED_INFO_COUNT(sizeof(thread_sched_info_data_t) / sizeof(natural_t));
1600 return KERN_SUCCESS0;
1601 }
1602
1603 return KERN_INVALID_ARGUMENT4;
1604}
1605
1606kern_return_t thread_abort(
1607 thread_t thread)
1608{
1609 if (thread == THREAD_NULL((thread_t) 0) || thread == current_thread()(active_threads[(0)])) {
1610 return KERN_INVALID_ARGUMENT4;
1611 }
1612
1613 /*
1614 *
1615 * clear it of an event wait
1616 */
1617 evc_notify_abort(thread);
1618
1619 /*
1620 * Try to force the thread to a clean point
1621 * If the halt operation fails return KERN_ABORTED.
1622 * ipc code will convert this to an ipc interrupted error code.
1623 */
1624 if (thread_halt(thread, FALSE((boolean_t) 0)) != KERN_SUCCESS0)
1625 return KERN_ABORTED14;
1626
1627 /*
1628 * If the thread was in an exception, abort that too.
1629 */
1630 mach_msg_abort_rpc(thread);
1631
1632 /*
1633 * Then set it going again.
1634 */
1635 thread_release(thread);
1636
1637 /*
1638 * Also abort any depression.
1639 */
1640 if (thread->depress_priority != -1)
1641 thread_depress_abort(thread);
1642
1643 return KERN_SUCCESS0;
1644}
1645
1646/*
1647 * thread_start:
1648 *
1649 * Start a thread at the specified routine.
1650 * The thread must be in a swapped state.
1651 */
1652
1653void
1654thread_start(
1655 thread_t thread,
1656 continuation_t start)
1657{
1658 thread->swap_func = start;
1659}
1660
1661/*
1662 * kernel_thread:
1663 *
1664 * Start up a kernel thread in the specified task.
1665 */
1666
1667thread_t kernel_thread(
1668 task_t task,
1669 continuation_t start,
1670 void * arg)
1671{
1672 thread_t thread;
1
Variable 'thread' declared without an initial value
1673
1674 (void) thread_create(task, &thread);
2
Calling 'thread_create'
25
Returning from 'thread_create'
1675 /* release "extra" ref that thread_create gave us */
1676 thread_deallocate(thread);
26
Function call argument is an uninitialized value
1677 thread_start(thread, start);
1678 thread->ith_othersaved.other = arg;
1679
1680 /*
1681 * We ensure that the kernel thread starts with a stack.
1682 * The swapin mechanism might not be operational yet.
1683 */
1684 thread_doswapin(thread);
1685 thread->max_priority = BASEPRI_SYSTEM6;
1686 thread->priority = BASEPRI_SYSTEM6;
1687 thread->sched_pri = BASEPRI_SYSTEM6;
1688 (void) thread_resume(thread);
1689 return thread;
1690}
1691
1692/*
1693 * reaper_thread:
1694 *
1695 * This kernel thread runs forever looking for threads to destroy
1696 * (when they request that they be destroyed, of course).
1697 */
1698void reaper_thread_continue(void)
1699{
1700 for (;;) {
1701 thread_t thread;
1702 spl_t s;
1703
1704 s = splsched();
1705 simple_lock(&reaper_lock);
1706
1707 while ((thread = (thread_t) dequeue_head(&reaper_queue))
1708 != THREAD_NULL((thread_t) 0)) {
1709 simple_unlock(&reaper_lock);
1710 (void) splx(s);
1711
1712 (void) thread_dowait(thread, TRUE((boolean_t) 1)); /* may block */
1713 thread_deallocate(thread); /* may block */
1714
1715 s = splsched();
1716 simple_lock(&reaper_lock);
1717 }
1718
1719 assert_wait((event_t) &reaper_queue, FALSE((boolean_t) 0));
1720 simple_unlock(&reaper_lock);
1721 (void) splx(s);
1722 counter(c_reaper_thread_block++);
1723 thread_block(reaper_thread_continue);
1724 }
1725}
1726
1727void reaper_thread(void)
1728{
1729 reaper_thread_continue();
1730 /*NOTREACHED*/
1731}
1732
1733#if MACH_HOST0
1734/*
1735 * thread_assign:
1736 *
1737 * Change processor set assignment.
1738 * Caller must hold an extra reference to the thread (if this is
1739 * called directly from the ipc interface, this is an operation
1740 * in progress reference). Caller must hold no locks -- this may block.
1741 */
1742
1743kern_return_t
1744thread_assign(
1745 thread_t thread,
1746 processor_set_t new_pset)
1747{
1748 if (thread == THREAD_NULL((thread_t) 0) || new_pset == PROCESSOR_SET_NULL((processor_set_t) 0)) {
1749 return KERN_INVALID_ARGUMENT4;
1750 }
1751
1752 thread_freeze(thread);
1753 thread_doassign(thread, new_pset, TRUE((boolean_t) 1));
1754
1755 return KERN_SUCCESS0;
1756}
1757
1758/*
1759 * thread_freeze:
1760 *
1761 * Freeze thread's assignment. Prelude to assigning thread.
1762 * Only one freeze may be held per thread.
1763 */
1764void
1765thread_freeze(
1766 thread_t thread)
1767{
1768 spl_t s;
1769 /*
1770 * Freeze the assignment, deferring to a prior freeze.
1771 */
1772 s = splsched();
1773 thread_lock(thread);
1774 while (thread->may_assign == FALSE((boolean_t) 0)) {
1775 thread->assign_active = TRUE((boolean_t) 1);
1776 thread_sleep((event_t) &thread->assign_active,
1777 simple_lock_addr(thread->lock)((simple_lock_t)0), FALSE((boolean_t) 0));
1778 thread_lock(thread);
1779 }
1780 thread->may_assign = FALSE((boolean_t) 0);
1781 thread_unlock(thread);
1782 (void) splx(s);
1783
1784}
1785
1786/*
1787 * thread_unfreeze: release freeze on thread's assignment.
1788 */
1789void
1790thread_unfreeze(
1791 thread_t thread)
1792{
1793 spl_t s;
1794
1795 s = splsched();
1796 thread_lock(thread);
1797 thread->may_assign = TRUE((boolean_t) 1);
1798 if (thread->assign_active) {
1799 thread->assign_active = FALSE((boolean_t) 0);
1800 thread_wakeup((event_t)&thread->assign_active)thread_wakeup_prim(((event_t)&thread->assign_active), (
(boolean_t) 0), 0)
;
1801 }
1802 thread_unlock(thread);
1803 splx(s);
1804}
1805
1806/*
1807 * thread_doassign:
1808 *
1809 * Actually do thread assignment. thread_will_assign must have been
1810 * called on the thread. release_freeze argument indicates whether
1811 * to release freeze on thread.
1812 */
1813
1814void
1815thread_doassign(
1816 thread_t thread,
1817 processor_set_t new_pset,
1818 boolean_t release_freeze)
1819{
1820 processor_set_t pset;
1821 boolean_t old_empty, new_empty;
1822 boolean_t recompute_pri = FALSE((boolean_t) 0);
1823 spl_t s;
1824
1825 /*
1826 * Check for silly no-op.
1827 */
1828 pset = thread->processor_set;
1829 if (pset == new_pset) {
1830 if (release_freeze)
1831 thread_unfreeze(thread);
1832 return;
1833 }
1834 /*
1835 * Suspend the thread and stop it if it's not the current thread.
1836 */
1837 thread_hold(thread);
1838 if (thread != current_thread()(active_threads[(0)]))
1839 (void) thread_dowait(thread, TRUE((boolean_t) 1));
1840
1841 /*
1842 * Lock both psets now, use ordering to avoid deadlocks.
1843 */
1844Restart:
1845 if ((vm_offset_t)pset < (vm_offset_t)new_pset) {
1846 pset_lock(pset);
1847 pset_lock(new_pset);
1848 }
1849 else {
1850 pset_lock(new_pset);
1851 pset_lock(pset);
1852 }
1853
1854 /*
1855 * Check if new_pset is ok to assign to. If not, reassign
1856 * to default_pset.
1857 */
1858 if (!new_pset->active) {
1859 pset_unlock(pset);
1860 pset_unlock(new_pset);
1861 new_pset = &default_pset;
1862 goto Restart;
1863 }
1864
1865 pset_reference(new_pset);
1866
1867 /*
1868 * Grab the thread lock and move the thread.
1869 * Then drop the lock on the old pset and the thread's
1870 * reference to it.
1871 */
1872 s = splsched();
1873 thread_lock(thread);
1874
1875 thread_change_psets(thread, pset, new_pset);
1876
1877 old_empty = pset->empty;
1878 new_empty = new_pset->empty;
1879
1880 pset_unlock(pset);
1881
1882 /*
1883 * Reset policy and priorities if needed.
1884 */
1885#if MACH_FIXPRI1
1886 if (thread->policy & new_pset->policies == 0) {
1887 thread->policy = POLICY_TIMESHARE1;
1888 recompute_pri = TRUE((boolean_t) 1);
1889 }
1890#endif /* MACH_FIXPRI */
1891
1892 if (thread->max_priority < new_pset->max_priority) {
1893 thread->max_priority = new_pset->max_priority;
1894 if (thread->priority < thread->max_priority) {
1895 thread->priority = thread->max_priority;
1896 recompute_pri = TRUE((boolean_t) 1);
1897 }
1898 else {
1899 if ((thread->depress_priority >= 0) &&
1900 (thread->depress_priority < thread->max_priority)) {
1901 thread->depress_priority = thread->max_priority;
1902 }
1903 }
1904 }
1905
1906 pset_unlock(new_pset);
1907
1908 if (recompute_pri)
1909 compute_priority(thread, TRUE((boolean_t) 1));
1910
1911 if (release_freeze) {
1912 thread->may_assign = TRUE((boolean_t) 1);
1913 if (thread->assign_active) {
1914 thread->assign_active = FALSE((boolean_t) 0);
1915 thread_wakeup((event_t)&thread->assign_active)thread_wakeup_prim(((event_t)&thread->assign_active), (
(boolean_t) 0), 0)
;
1916 }
1917 }
1918
1919 thread_unlock(thread);
1920 splx(s);
1921
1922 pset_deallocate(pset);
1923
1924 /*
1925 * Figure out hold status of thread. Threads assigned to empty
1926 * psets must be held. Therefore:
1927 * If old pset was empty release its hold.
1928 * Release our hold from above unless new pset is empty.
1929 */
1930
1931 if (old_empty)
1932 thread_release(thread);
1933 if (!new_empty)
1934 thread_release(thread);
1935
1936 /*
1937 * If current_thread is assigned, context switch to force
1938 * assignment to happen. This also causes hold to take
1939 * effect if the new pset is empty.
1940 */
1941 if (thread == current_thread()(active_threads[(0)])) {
1942 s = splsched();
1943 ast_on(cpu_number(), AST_BLOCK)({ if ((need_ast[(0)] |= (0x4)) != 0x0) { ; } });
1944 (void) splx(s);
1945 }
1946}
1947#else /* MACH_HOST */
1948kern_return_t
1949thread_assign(
1950 thread_t thread,
1951 processor_set_t new_pset)
1952{
1953 return KERN_FAILURE5;
1954}
1955#endif /* MACH_HOST */
1956
1957/*
1958 * thread_assign_default:
1959 *
1960 * Special version of thread_assign for assigning threads to default
1961 * processor set.
1962 */
1963kern_return_t
1964thread_assign_default(
1965 thread_t thread)
1966{
1967 return thread_assign(thread, &default_pset);
1968}
1969
1970/*
1971 * thread_get_assignment
1972 *
1973 * Return current assignment for this thread.
1974 */
1975kern_return_t thread_get_assignment(
1976 thread_t thread,
1977 processor_set_t *pset)
1978{
1979 *pset = thread->processor_set;
1980 pset_reference(*pset);
1981 return KERN_SUCCESS0;
1982}
1983
1984/*
1985 * thread_priority:
1986 *
1987 * Set priority (and possibly max priority) for thread.
1988 */
1989kern_return_t
1990thread_priority(
1991 thread_t thread,
1992 int priority,
1993 boolean_t set_max)
1994{
1995 spl_t s;
1996 kern_return_t ret = KERN_SUCCESS0;
1997
1998 if ((thread == THREAD_NULL((thread_t) 0)) || invalid_pri(priority)(((priority) < 0) || ((priority) >= 50)))
1999 return KERN_INVALID_ARGUMENT4;
2000
2001 s = splsched();
2002 thread_lock(thread);
2003
2004 /*
2005 * Check for violation of max priority
2006 */
2007 if (priority < thread->max_priority) {
2008 ret = KERN_FAILURE5;
2009 }
2010 else {
2011 /*
2012 * Set priorities. If a depression is in progress,
2013 * change the priority to restore.
2014 */
2015 if (thread->depress_priority >= 0) {
2016 thread->depress_priority = priority;
2017 }
2018 else {
2019 thread->priority = priority;
2020 compute_priority(thread, TRUE((boolean_t) 1));
2021 }
2022
2023 if (set_max)
2024 thread->max_priority = priority;
2025 }
2026 thread_unlock(thread);
2027 (void) splx(s);
2028
2029 return ret;
2030}
2031
2032/*
2033 * thread_set_own_priority:
2034 *
2035 * Internal use only; sets the priority of the calling thread.
2036 * Will adjust max_priority if necessary.
2037 */
2038void
2039thread_set_own_priority(
2040 int priority)
2041{
2042 spl_t s;
2043 thread_t thread = current_thread()(active_threads[(0)]);
2044
2045 s = splsched();
2046 thread_lock(thread);
2047
2048 if (priority < thread->max_priority)
2049 thread->max_priority = priority;
2050 thread->priority = priority;
2051 compute_priority(thread, TRUE((boolean_t) 1));
2052
2053 thread_unlock(thread);
2054 (void) splx(s);
2055}
2056
2057/*
2058 * thread_max_priority:
2059 *
2060 * Reset the max priority for a thread.
2061 */
2062kern_return_t
2063thread_max_priority(
2064 thread_t thread,
2065 processor_set_t pset,
2066 int max_priority)
2067{
2068 spl_t s;
2069 kern_return_t ret = KERN_SUCCESS0;
2070
2071 if ((thread == THREAD_NULL((thread_t) 0)) || (pset == PROCESSOR_SET_NULL((processor_set_t) 0)) ||
2072 invalid_pri(max_priority)(((max_priority) < 0) || ((max_priority) >= 50)))
2073 return KERN_INVALID_ARGUMENT4;
2074
2075 s = splsched();
2076 thread_lock(thread);
2077
2078#if MACH_HOST0
2079 /*
2080 * Check for wrong processor set.
2081 */
2082 if (pset != thread->processor_set) {
2083 ret = KERN_FAILURE5;
2084 }
2085 else {
2086#endif /* MACH_HOST */
2087 thread->max_priority = max_priority;
2088
2089 /*
2090 * Reset priority if it violates new max priority
2091 */
2092 if (max_priority > thread->priority) {
2093 thread->priority = max_priority;
2094
2095 compute_priority(thread, TRUE((boolean_t) 1));
2096 }
2097 else {
2098 if (thread->depress_priority >= 0 &&
2099 max_priority > thread->depress_priority)
2100 thread->depress_priority = max_priority;
2101 }
2102#if MACH_HOST0
2103 }
2104#endif /* MACH_HOST */
2105
2106 thread_unlock(thread);
2107 (void) splx(s);
2108
2109 return ret;
2110}
2111
2112/*
2113 * thread_policy:
2114 *
2115 * Set scheduling policy for thread.
2116 */
2117kern_return_t
2118thread_policy(
2119 thread_t thread,
2120 int policy,
2121 int data)
2122{
2123#if MACH_FIXPRI1
2124 kern_return_t ret = KERN_SUCCESS0;
2125 int temp;
2126 spl_t s;
2127#endif /* MACH_FIXPRI */
2128
2129 if ((thread == THREAD_NULL((thread_t) 0)) || invalid_policy(policy)(((policy) <= 0) || ((policy) > 2)))
2130 return KERN_INVALID_ARGUMENT4;
2131
2132#if MACH_FIXPRI1
2133 s = splsched();
2134 thread_lock(thread);
2135
2136 /*
2137 * Check if changing policy.
2138 */
2139 if (policy == thread->policy) {
2140 /*
2141 * Just changing data. This is meaningless for
2142 * timesharing, quantum for fixed priority (but
2143 * has no effect until current quantum runs out).
2144 */
2145 if (policy == POLICY_FIXEDPRI2) {
2146 temp = data * 1000;
2147 if (temp % tick)
2148 temp += tick;
2149 thread->sched_data = temp/tick;
2150 }
2151 }
2152 else {
2153 /*
2154 * Changing policy. Check if new policy is allowed.
2155 */
2156 if ((thread->processor_set->policies & policy) == 0) {
2157 ret = KERN_FAILURE5;
2158 }
2159 else {
2160 /*
2161 * Changing policy. Save data and calculate new
2162 * priority.
2163 */
2164 thread->policy = policy;
2165 if (policy == POLICY_FIXEDPRI2) {
2166 temp = data * 1000;
2167 if (temp % tick)
2168 temp += tick;
2169 thread->sched_data = temp/tick;
2170 }
2171 compute_priority(thread, TRUE((boolean_t) 1));
2172 }
2173 }
2174 thread_unlock(thread);
2175 (void) splx(s);
2176
2177 return ret;
2178#else /* MACH_FIXPRI */
2179 if (policy == POLICY_TIMESHARE1)
2180 return KERN_SUCCESS0;
2181 else
2182 return KERN_FAILURE5;
2183#endif /* MACH_FIXPRI */
2184}
2185
2186/*
2187 * thread_wire:
2188 *
2189 * Specify that the target thread must always be able
2190 * to run and to allocate memory.
2191 */
2192kern_return_t
2193thread_wire(
2194 host_t host,
2195 thread_t thread,
2196 boolean_t wired)
2197{
2198 spl_t s;
2199
2200 if (host == HOST_NULL((host_t)0))
2201 return KERN_INVALID_ARGUMENT4;
2202
2203 if (thread == THREAD_NULL((thread_t) 0))
2204 return KERN_INVALID_ARGUMENT4;
2205
2206 /*
2207 * This implementation only works for the current thread.
2208 * See stack_privilege.
2209 */
2210 if (thread != current_thread()(active_threads[(0)]))
2211 return KERN_INVALID_ARGUMENT4;
2212
2213 s = splsched();
2214 thread_lock(thread);
2215
2216 if (wired) {
2217 thread->vm_privilege = TRUE((boolean_t) 1);
2218 stack_privilege(thread);
2219 }
2220 else {
2221 thread->vm_privilege = FALSE((boolean_t) 0);
2222/*XXX stack_unprivilege(thread); */
2223 thread->stack_privilege = 0;
2224 }
2225
2226 thread_unlock(thread);
2227 splx(s);
2228
2229 return KERN_SUCCESS0;
2230}
2231
2232/*
2233 * thread_collect_scan:
2234 *
2235 * Attempt to free resources owned by threads.
2236 * pcb_collect doesn't do anything yet.
2237 */
2238
2239void thread_collect_scan(void)
2240{
2241#if 0
2242 register thread_t thread, prev_thread;
2243 processor_set_t pset, prev_pset;
2244
2245 prev_thread = THREAD_NULL((thread_t) 0);
2246 prev_pset = PROCESSOR_SET_NULL((processor_set_t) 0);
2247
2248 simple_lock(&all_psets_lock);
2249 queue_iterate(&all_psets, pset, processor_set_t, all_psets)for ((pset) = (processor_set_t) ((&all_psets)->next); !
(((&all_psets)) == ((queue_entry_t)(pset))); (pset) = (processor_set_t
) ((&(pset)->all_psets)->next))
{
2250 pset_lock(pset);
2251 queue_iterate(&pset->threads, thread, thread_t, pset_threads)for ((thread) = (thread_t) ((&pset->threads)->next)
; !(((&pset->threads)) == ((queue_entry_t)(thread))); (
thread) = (thread_t) ((&(thread)->pset_threads)->next
))
{
2252 spl_t s = splsched();
2253 thread_lock(thread);
2254
2255 /*
2256 * Only collect threads which are
2257 * not runnable and are swapped.
2258 */
2259
2260 if ((thread->state & (TH_RUN0x04|TH_SWAPPED0x0100))
2261 == TH_SWAPPED0x0100) {
2262 thread->ref_count++;
2263 thread_unlock(thread);
2264 (void) splx(s);
2265 pset->ref_count++;
2266 pset_unlock(pset);
2267 simple_unlock(&all_psets_lock);
2268
2269 pcb_collect(thread);
2270
2271 if (prev_thread != THREAD_NULL((thread_t) 0))
2272 thread_deallocate(prev_thread);
2273 prev_thread = thread;
2274
2275 if (prev_pset != PROCESSOR_SET_NULL((processor_set_t) 0))
2276 pset_deallocate(prev_pset);
2277 prev_pset = pset;
2278
2279 simple_lock(&all_psets_lock);
2280 pset_lock(pset);
2281 } else {
2282 thread_unlock(thread);
2283 (void) splx(s);
2284 }
2285 }
2286 pset_unlock(pset);
2287 }
2288 simple_unlock(&all_psets_lock);
2289
2290 if (prev_thread != THREAD_NULL((thread_t) 0))
2291 thread_deallocate(prev_thread);
2292 if (prev_pset != PROCESSOR_SET_NULL((processor_set_t) 0))
2293 pset_deallocate(prev_pset);
2294#endif /* 0 */
2295}
2296
2297boolean_t thread_collect_allowed = TRUE((boolean_t) 1);
2298unsigned thread_collect_last_tick = 0;
2299unsigned thread_collect_max_rate = 0; /* in ticks */
2300
2301/*
2302 * consider_thread_collect:
2303 *
2304 * Called by the pageout daemon when the system needs more free pages.
2305 */
2306
2307void consider_thread_collect(void)
2308{
2309 /*
2310 * By default, don't attempt thread collection more frequently
2311 * than once a second.
2312 */
2313
2314 if (thread_collect_max_rate == 0)
2315 thread_collect_max_rate = hz;
2316
2317 if (thread_collect_allowed &&
2318 (sched_tick >
2319 (thread_collect_last_tick + thread_collect_max_rate))) {
2320 thread_collect_last_tick = sched_tick;
2321 thread_collect_scan();
2322 }
2323}
2324
2325#if MACH_DEBUG1
2326
2327vm_size_t stack_usage(
2328 vm_offset_t stack)
2329{
2330 int i;
2331
2332 for (i = 0; i < KERNEL_STACK_SIZE(1*4096)/sizeof(unsigned int); i++)
2333 if (((unsigned int *)stack)[i] != STACK_MARKER0xdeadbeefU)
2334 break;
2335
2336 return KERNEL_STACK_SIZE(1*4096) - i * sizeof(unsigned int);
2337}
2338
2339/*
2340 * Machine-dependent code should call stack_init
2341 * before doing its own initialization of the stack.
2342 */
2343
2344void stack_init(
2345 vm_offset_t stack)
2346{
2347 if (stack_check_usage) {
2348 int i;
2349
2350 for (i = 0; i < KERNEL_STACK_SIZE(1*4096)/sizeof(unsigned int); i++)
2351 ((unsigned int *)stack)[i] = STACK_MARKER0xdeadbeefU;
2352 }
2353}
2354
2355/*
2356 * Machine-dependent code should call stack_finalize
2357 * before releasing the stack memory.
2358 */
2359
2360void stack_finalize(
2361 vm_offset_t stack)
2362{
2363 if (stack_check_usage) {
2364 vm_size_t used = stack_usage(stack);
2365
2366 simple_lock(&stack_usage_lock);
2367 if (used > stack_max_usage)
2368 stack_max_usage = used;
2369 simple_unlock(&stack_usage_lock);
2370 }
2371}
2372
2373#ifndef MACHINE_STACK
2374/*
2375 * stack_statistics:
2376 *
2377 * Return statistics on cached kernel stacks.
2378 * *maxusagep must be initialized by the caller.
2379 */
2380
2381void stack_statistics(
2382 natural_t *totalp,
2383 vm_size_t *maxusagep)
2384{
2385 spl_t s;
2386
2387 s = splsched();
2388 stack_lock();
2389 if (stack_check_usage) {
2390 vm_offset_t stack;
2391
2392 /*
2393 * This is pretty expensive to do at splsched,
2394 * but it only happens when someone makes
2395 * a debugging call, so it should be OK.
2396 */
2397
2398 for (stack = stack_free_list; stack != 0;
2399 stack = stack_next(stack)(*((vm_offset_t *)((stack) + (1*4096)) - 1))) {
2400 vm_size_t usage = stack_usage(stack);
2401
2402 if (usage > *maxusagep)
2403 *maxusagep = usage;
2404 }
2405 }
2406
2407 *totalp = stack_free_count;
2408 stack_unlock();
2409 (void) splx(s);
2410}
2411#endif /* MACHINE_STACK */
2412
2413kern_return_t host_stack_usage(
2414 host_t host,
2415 vm_size_t *reservedp,
2416 unsigned int *totalp,
2417 vm_size_t *spacep,
2418 vm_size_t *residentp,
2419 vm_size_t *maxusagep,
2420 vm_offset_t *maxstackp)
2421{
2422 natural_t total;
2423 vm_size_t maxusage;
2424
2425 if (host == HOST_NULL((host_t)0))
2426 return KERN_INVALID_HOST22;
2427
2428 simple_lock(&stack_usage_lock);
2429 maxusage = stack_max_usage;
2430 simple_unlock(&stack_usage_lock);
2431
2432 stack_statistics(&total, &maxusage);
2433
2434 *reservedp = 0;
2435 *totalp = total;
2436 *spacep = *residentp = total * round_page(KERNEL_STACK_SIZE)((vm_offset_t)((((vm_offset_t)((1*4096))) + ((1 << 12)-
1)) & ~((1 << 12)-1)))
;
2437 *maxusagep = maxusage;
2438 *maxstackp = 0;
2439 return KERN_SUCCESS0;
2440}
2441
2442kern_return_t processor_set_stack_usage(
2443 processor_set_t pset,
2444 unsigned int *totalp,
2445 vm_size_t *spacep,
2446 vm_size_t *residentp,
2447 vm_size_t *maxusagep,
2448 vm_offset_t *maxstackp)
2449{
2450 unsigned int total;
2451 vm_size_t maxusage;
2452 vm_offset_t maxstack;
2453
2454 thread_t *threads;
2455 thread_t tmp_thread;
2456
2457 unsigned int actual; /* this many things */
2458 unsigned int i;
2459
2460 vm_size_t size, size_needed;
2461 vm_offset_t addr;
2462
2463 if (pset == PROCESSOR_SET_NULL((processor_set_t) 0))
2464 return KERN_INVALID_ARGUMENT4;
2465
2466 size = 0; addr = 0;
2467
2468 for (;;) {
2469 pset_lock(pset);
2470 if (!pset->active) {
2471 pset_unlock(pset);
2472 return KERN_INVALID_ARGUMENT4;
2473 }
2474
2475 actual = pset->thread_count;
2476
2477 /* do we have the memory we need? */
2478
2479 size_needed = actual * sizeof(thread_t);
2480 if (size_needed <= size)
2481 break;
2482
2483 /* unlock the pset and allocate more memory */
2484 pset_unlock(pset);
2485
2486 if (size != 0)
2487 kfree(addr, size);
2488
2489 assert(size_needed > 0)({ if (!(size_needed > 0)) Assert("size_needed > 0", "../kern/thread.c"
, 2489); })
;
2490 size = size_needed;
2491
2492 addr = kalloc(size);
2493 if (addr == 0)
2494 return KERN_RESOURCE_SHORTAGE6;
2495 }
2496
2497 /* OK, have memory and the processor_set is locked & active */
2498
2499 threads = (thread_t *) addr;
2500 for (i = 0, tmp_thread = (thread_t) queue_first(&pset->threads)((&pset->threads)->next);
2501 i < actual;
2502 i++,
2503 tmp_thread = (thread_t) queue_next(&tmp_thread->pset_threads)((&tmp_thread->pset_threads)->next)) {
2504 thread_reference(tmp_thread);
2505 threads[i] = tmp_thread;
2506 }
2507 assert(queue_end(&pset->threads, (queue_entry_t) tmp_thread))({ if (!(((&pset->threads) == ((queue_entry_t) tmp_thread
)))) Assert("queue_end(&pset->threads, (queue_entry_t) tmp_thread)"
, "../kern/thread.c", 2507); })
;
2508
2509 /* can unlock processor set now that we have the thread refs */
2510 pset_unlock(pset);
2511
2512 /* calculate maxusage and free thread references */
2513
2514 total = 0;
2515 maxusage = 0;
2516 maxstack = 0;
2517 for (i = 0; i < actual; i++) {
2518 thread_t thread = threads[i];
2519 vm_offset_t stack = 0;
2520
2521 /*
2522 * thread->kernel_stack is only accurate if the
2523 * thread isn't swapped and is not executing.
2524 *
2525 * Of course, we don't have the appropriate locks
2526 * for these shenanigans.
2527 */
2528
2529 if ((thread->state & TH_SWAPPED0x0100) == 0) {
2530 int cpu;
2531
2532 stack = thread->kernel_stack;
2533
2534 for (cpu = 0; cpu < NCPUS1; cpu++)
2535 if (active_threads[cpu] == thread) {
2536 stack = active_stacks[cpu];
2537 break;
2538 }
2539 }
2540
2541 if (stack != 0) {
2542 total++;
2543
2544 if (stack_check_usage) {
2545 vm_size_t usage = stack_usage(stack);
2546
2547 if (usage > maxusage) {
2548 maxusage = usage;
2549 maxstack = (vm_offset_t) thread;
2550 }
2551 }
2552 }
2553
2554 thread_deallocate(thread);
2555 }
2556
2557 if (size != 0)
2558 kfree(addr, size);
2559
2560 *totalp = total;
2561 *residentp = *spacep = total * round_page(KERNEL_STACK_SIZE)((vm_offset_t)((((vm_offset_t)((1*4096))) + ((1 << 12)-
1)) & ~((1 << 12)-1)))
;
2562 *maxusagep = maxusage;
2563 *maxstackp = maxstack;
2564 return KERN_SUCCESS0;
2565}
2566
2567/*
2568 * Useful in the debugger:
2569 */
2570void
2571thread_stats(void)
2572{
2573 thread_t thread;
2574 int total = 0, rpcreply = 0;
2575
2576 queue_iterate(&default_pset.threads, thread, thread_t, pset_threads)for ((thread) = (thread_t) ((&default_pset.threads)->next
); !(((&default_pset.threads)) == ((queue_entry_t)(thread
))); (thread) = (thread_t) ((&(thread)->pset_threads)->
next))
{
2577 total++;
2578 if (thread->ith_rpc_reply != IP_NULL((ipc_port_t) ((ipc_object_t) 0)))
2579 rpcreply++;
2580 }
2581
2582 printf("%d total threads.\n", total);
2583 printf("%d using rpc_reply.\n", rpcreply);
2584}
2585#endif /* MACH_DEBUG */