Bug Summary

File:obj-scan-build/../kern/task.c
Location:line 579, column 3
Description:Array access (from variable 'threads') results in a null pointer dereference

Annotated Source Code

1/*
2 * Mach Operating System
3 * Copyright (c) 1993-1988 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26/*
27 * File: kern/task.c
28 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
29 * David Black
30 *
31 * Task management primitives implementation.
32 */
33
34#include <string.h>
35
36#include <mach/machine/vm_types.h>
37#include <mach/vm_param.h>
38#include <mach/task_info.h>
39#include <mach/task_special_ports.h>
40#include <ipc/ipc_space.h>
41#include <ipc/ipc_types.h>
42#include <kern/debug.h>
43#include <kern/task.h>
44#include <kern/thread.h>
45#include <kern/slab.h>
46#include <kern/kalloc.h>
47#include <kern/processor.h>
48#include <kern/sched_prim.h> /* for thread_wakeup */
49#include <kern/ipc_tt.h>
50#include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */
51#include <machine/machspl.h> /* for splsched */
52
53task_t kernel_task = TASK_NULL((task_t) 0);
54struct kmem_cache task_cache;
55
56extern void eml_init(void);
57extern void eml_task_reference(task_t, task_t);
58extern void eml_task_deallocate(task_t);
59
60void task_init(void)
61{
62 kmem_cache_init(&task_cache, "task", sizeof(struct task), 0,
63 NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0);
64
65 eml_init();
66 machine_task_module_init ();
67
68 /*
69 * Create the kernel task as the first task.
70 * Task_create must assign to kernel_task as a side effect,
71 * for other initialization. (:-()
72 */
73 (void) task_create(TASK_NULL((task_t) 0), FALSE((boolean_t) 0), &kernel_task);
74}
75
76kern_return_t task_create(
77 task_t parent_task,
78 boolean_t inherit_memory,
79 task_t *child_task) /* OUT */
80{
81 register task_t new_task;
82 register processor_set_t pset;
83#if FAST_TAS0
84 int i;
85#endif
86
87 new_task = (task_t) kmem_cache_alloc(&task_cache);
88 if (new_task == TASK_NULL((task_t) 0)) {
89 panic("task_create: no memory for task structure");
90 }
91
92 /* one ref for just being alive; one for our caller */
93 new_task->ref_count = 2;
94
95 if (child_task == &kernel_task) {
96 new_task->map = kernel_map;
97 } else if (inherit_memory) {
98 new_task->map = vm_map_fork(parent_task->map);
99 } else {
100 new_task->map = vm_map_create(pmap_create(0),
101 round_page(VM_MIN_ADDRESS)((vm_offset_t)((((vm_offset_t)((0))) + ((1 << 12)-1)) &
~((1 << 12)-1)))
,
102 trunc_page(VM_MAX_ADDRESS)((vm_offset_t)(((vm_offset_t)((0xc0000000UL))) & ~((1 <<
12)-1)))
, TRUE((boolean_t) 1));
103 }
104
105 simple_lock_init(&new_task->lock);
106 queue_init(&new_task->thread_list)((&new_task->thread_list)->next = (&new_task->
thread_list)->prev = &new_task->thread_list)
;
107 new_task->suspend_count = 0;
108 new_task->active = TRUE((boolean_t) 1);
109 new_task->user_stop_count = 0;
110 new_task->thread_count = 0;
111 new_task->faults = 0;
112 new_task->zero_fills = 0;
113 new_task->reactivations = 0;
114 new_task->pageins = 0;
115 new_task->cow_faults = 0;
116 new_task->messages_sent = 0;
117 new_task->messages_received = 0;
118
119 eml_task_reference(new_task, parent_task);
120
121 ipc_task_init(new_task, parent_task);
122 machine_task_init (new_task);
123
124 new_task->total_user_time.seconds = 0;
125 new_task->total_user_time.microseconds = 0;
126 new_task->total_system_time.seconds = 0;
127 new_task->total_system_time.microseconds = 0;
128
129 record_time_stamp (&new_task->creation_time);
130
131 if (parent_task != TASK_NULL((task_t) 0)) {
132 task_lock(parent_task);
133 pset = parent_task->processor_set;
134 if (!pset->active)
135 pset = &default_pset;
136 pset_reference(pset);
137 new_task->priority = parent_task->priority;
138 task_unlock(parent_task);
139 }
140 else {
141 pset = &default_pset;
142 pset_reference(pset);
143 new_task->priority = BASEPRI_USER25;
144 }
145 pset_lock(pset);
146 pset_add_task(pset, new_task);
147 pset_unlock(pset);
148
149 new_task->may_assign = TRUE((boolean_t) 1);
150 new_task->assign_active = FALSE((boolean_t) 0);
151
152#if MACH_PCSAMPLE1
153 new_task->pc_sample.buffer = 0;
154 new_task->pc_sample.seqno = 0;
155 new_task->pc_sample.sampletypes = 0;
156#endif /* MACH_PCSAMPLE */
157
158#if FAST_TAS0
159 for (i = 0; i < TASK_FAST_TAS_NRAS; i++) {
160 if (inherit_memory) {
161 new_task->fast_tas_base[i] = parent_task->fast_tas_base[i];
162 new_task->fast_tas_end[i] = parent_task->fast_tas_end[i];
163 } else {
164 new_task->fast_tas_base[i] = (vm_offset_t)0;
165 new_task->fast_tas_end[i] = (vm_offset_t)0;
166 }
167 }
168#endif /* FAST_TAS */
169
170 ipc_task_enable(new_task);
171
172 *child_task = new_task;
173 return KERN_SUCCESS0;
174}
175
176/*
177 * task_deallocate:
178 *
179 * Give up a reference to the specified task and destroy it if there
180 * are no other references left. It is assumed that the current thread
181 * is never in this task.
182 */
183void task_deallocate(
184 register task_t task)
185{
186 register int c;
187 register processor_set_t pset;
188
189 if (task == TASK_NULL((task_t) 0))
190 return;
191
192 task_lock(task);
193 c = --(task->ref_count);
194 task_unlock(task);
195 if (c != 0)
196 return;
197
198 machine_task_terminate (task);
199
200 eml_task_deallocate(task);
201
202 pset = task->processor_set;
203 pset_lock(pset);
204 pset_remove_task(pset,task);
205 pset_unlock(pset);
206 pset_deallocate(pset);
207 vm_map_deallocate(task->map);
208 is_release(task->itk_space)ipc_space_release(task->itk_space);
209 kmem_cache_free(&task_cache, (vm_offset_t) task);
210}
211
212void task_reference(
213 register task_t task)
214{
215 if (task == TASK_NULL((task_t) 0))
216 return;
217
218 task_lock(task);
219 task->ref_count++;
220 task_unlock(task);
221}
222
223/*
224 * task_terminate:
225 *
226 * Terminate the specified task. See comments on thread_terminate
227 * (kern/thread.c) about problems with terminating the "current task."
228 */
229kern_return_t task_terminate(
230 register task_t task)
231{
232 register thread_t thread, cur_thread;
233 register queue_head_t *list;
234 register task_t cur_task;
235 spl_t s;
236
237 if (task == TASK_NULL((task_t) 0))
238 return KERN_INVALID_ARGUMENT4;
239
240 list = &task->thread_list;
241 cur_task = current_task()((active_threads[(0)])->task);
242 cur_thread = current_thread()(active_threads[(0)]);
243
244 /*
245 * Deactivate task so that it can't be terminated again,
246 * and so lengthy operations in progress will abort.
247 *
248 * If the current thread is in this task, remove it from
249 * the task's thread list to keep the thread-termination
250 * loop simple.
251 */
252 if (task == cur_task) {
253 task_lock(task);
254 if (!task->active) {
255 /*
256 * Task is already being terminated.
257 */
258 task_unlock(task);
259 return KERN_FAILURE5;
260 }
261 /*
262 * Make sure current thread is not being terminated.
263 */
264 s = splsched();
265 thread_lock(cur_thread);
266 if (!cur_thread->active) {
267 thread_unlock(cur_thread);
268 (void) splx(s);
269 task_unlock(task);
270 thread_terminate(cur_thread);
271 return KERN_FAILURE5;
272 }
273 task->active = FALSE((boolean_t) 0);
274 queue_remove(list, cur_thread, thread_t, thread_list){ register queue_entry_t next, prev; next = (cur_thread)->
thread_list.next; prev = (cur_thread)->thread_list.prev; if
((list) == next) (list)->prev = prev; else ((thread_t)next
)->thread_list.prev = prev; if ((list) == prev) (list)->
next = next; else ((thread_t)prev)->thread_list.next = next
; }
;
275 thread_unlock(cur_thread);
276 (void) splx(s);
277 task_unlock(task);
278
279 /*
280 * Shut down this thread's ipc now because it must
281 * be left alone to terminate the task.
282 */
283 ipc_thread_disable(cur_thread);
284 ipc_thread_terminate(cur_thread);
285 }
286 else {
287 /*
288 * Lock both current and victim task to check for
289 * potential deadlock.
290 */
291 if ((vm_offset_t)task < (vm_offset_t)cur_task) {
292 task_lock(task);
293 task_lock(cur_task);
294 }
295 else {
296 task_lock(cur_task);
297 task_lock(task);
298 }
299 /*
300 * Check if current thread or task is being terminated.
301 */
302 s = splsched();
303 thread_lock(cur_thread);
304 if ((!cur_task->active) ||(!cur_thread->active)) {
305 /*
306 * Current task or thread is being terminated.
307 */
308 thread_unlock(cur_thread);
309 (void) splx(s);
310 task_unlock(task);
311 task_unlock(cur_task);
312 thread_terminate(cur_thread);
313 return KERN_FAILURE5;
314 }
315 thread_unlock(cur_thread);
316 (void) splx(s);
317 task_unlock(cur_task);
318
319 if (!task->active) {
320 /*
321 * Task is already being terminated.
322 */
323 task_unlock(task);
324 return KERN_FAILURE5;
325 }
326 task->active = FALSE((boolean_t) 0);
327 task_unlock(task);
328 }
329
330 /*
331 * Prevent further execution of the task. ipc_task_disable
332 * prevents further task operations via the task port.
333 * If this is the current task, the current thread will
334 * be left running.
335 */
336 ipc_task_disable(task);
337 (void) task_hold(task);
338 (void) task_dowait(task,TRUE((boolean_t) 1)); /* may block */
339
340 /*
341 * Terminate each thread in the task.
342 *
343 * The task_port is closed down, so no more thread_create
344 * operations can be done. Thread_force_terminate closes the
345 * thread port for each thread; when that is done, the
346 * thread will eventually disappear. Thus the loop will
347 * terminate. Call thread_force_terminate instead of
348 * thread_terminate to avoid deadlock checks. Need
349 * to call thread_block() inside loop because some other
350 * thread (e.g., the reaper) may have to run to get rid
351 * of all references to the thread; it won't vanish from
352 * the task's thread list until the last one is gone.
353 */
354 task_lock(task);
355 while (!queue_empty(list)(((list)) == (((list)->next)))) {
356 thread = (thread_t) queue_first(list)((list)->next);
357 thread_reference(thread);
358 task_unlock(task);
359 thread_force_terminate(thread);
360 thread_deallocate(thread);
361 thread_block((void (*)()) 0);
362 task_lock(task);
363 }
364 task_unlock(task);
365
366 /*
367 * Shut down IPC.
368 */
369 ipc_task_terminate(task);
370
371
372 /*
373 * Deallocate the task's reference to itself.
374 */
375 task_deallocate(task);
376
377 /*
378 * If the current thread is in this task, it has not yet
379 * been terminated (since it was removed from the task's
380 * thread-list). Put it back in the thread list (for
381 * completeness), and terminate it. Since it holds the
382 * last reference to the task, terminating it will deallocate
383 * the task.
384 */
385 if (cur_thread->task == task) {
386 task_lock(task);
387 s = splsched();
388 queue_enter(list, cur_thread, thread_t, thread_list){ register queue_entry_t prev; prev = (list)->prev; if ((list
) == prev) { (list)->next = (queue_entry_t) (cur_thread); }
else { ((thread_t)prev)->thread_list.next = (queue_entry_t
)(cur_thread); } (cur_thread)->thread_list.prev = prev; (cur_thread
)->thread_list.next = list; (list)->prev = (queue_entry_t
) cur_thread; }
;
389 (void) splx(s);
390 task_unlock(task);
391 (void) thread_terminate(cur_thread);
392 }
393
394 return KERN_SUCCESS0;
395}
396
397/*
398 * task_hold:
399 *
400 * Suspend execution of the specified task.
401 * This is a recursive-style suspension of the task, a count of
402 * suspends is maintained.
403 */
404kern_return_t task_hold(
405 register task_t task)
406{
407 register queue_head_t *list;
408 register thread_t thread, cur_thread;
409
410 cur_thread = current_thread()(active_threads[(0)]);
411
412 task_lock(task);
413 if (!task->active) {
414 task_unlock(task);
415 return KERN_FAILURE5;
416 }
417
418 task->suspend_count++;
419
420 /*
421 * Iterate through all the threads and hold them.
422 * Do not hold the current thread if it is within the
423 * task.
424 */
425 list = &task->thread_list;
426 queue_iterate(list, thread, thread_t, thread_list)for ((thread) = (thread_t) ((list)->next); !(((list)) == (
(queue_entry_t)(thread))); (thread) = (thread_t) ((&(thread
)->thread_list)->next))
{
427 if (thread != cur_thread)
428 thread_hold(thread);
429 }
430 task_unlock(task);
431 return KERN_SUCCESS0;
432}
433
434/*
435 * task_dowait:
436 *
437 * Wait until the task has really been suspended (all of the threads
438 * are stopped). Skip the current thread if it is within the task.
439 *
440 * If task is deactivated while waiting, return a failure code unless
441 * must_wait is true.
442 */
443kern_return_t task_dowait(
444 register task_t task,
445 boolean_t must_wait)
446{
447 register queue_head_t *list;
448 register thread_t thread, cur_thread, prev_thread;
449 register kern_return_t ret = KERN_SUCCESS0;
450
451 /*
452 * Iterate through all the threads.
453 * While waiting for each thread, we gain a reference to it
454 * to prevent it from going away on us. This guarantees
455 * that the "next" thread in the list will be a valid thread.
456 *
457 * We depend on the fact that if threads are created while
458 * we are looping through the threads, they will be held
459 * automatically. We don't care about threads that get
460 * deallocated along the way (the reference prevents it
461 * from happening to the thread we are working with).
462 *
463 * If the current thread is in the affected task, it is skipped.
464 *
465 * If the task is deactivated before we're done, and we don't
466 * have to wait for it (must_wait is FALSE), just bail out.
467 */
468 cur_thread = current_thread()(active_threads[(0)]);
469
470 list = &task->thread_list;
471 prev_thread = THREAD_NULL((thread_t) 0);
472 task_lock(task);
473 queue_iterate(list, thread, thread_t, thread_list)for ((thread) = (thread_t) ((list)->next); !(((list)) == (
(queue_entry_t)(thread))); (thread) = (thread_t) ((&(thread
)->thread_list)->next))
{
474 if (!(task->active) && !(must_wait)) {
475 ret = KERN_FAILURE5;
476 break;
477 }
478 if (thread != cur_thread) {
479 thread_reference(thread);
480 task_unlock(task);
481 if (prev_thread != THREAD_NULL((thread_t) 0))
482 thread_deallocate(prev_thread);
483 /* may block */
484 (void) thread_dowait(thread, TRUE((boolean_t) 1)); /* may block */
485 prev_thread = thread;
486 task_lock(task);
487 }
488 }
489 task_unlock(task);
490 if (prev_thread != THREAD_NULL((thread_t) 0))
491 thread_deallocate(prev_thread); /* may block */
492 return ret;
493}
494
495kern_return_t task_release(
496 register task_t task)
497{
498 register queue_head_t *list;
499 register thread_t thread, next;
500
501 task_lock(task);
502 if (!task->active) {
503 task_unlock(task);
504 return KERN_FAILURE5;
505 }
506
507 task->suspend_count--;
508
509 /*
510 * Iterate through all the threads and release them
511 */
512 list = &task->thread_list;
513 thread = (thread_t) queue_first(list)((list)->next);
514 while (!queue_end(list, (queue_entry_t) thread)((list) == ((queue_entry_t) thread))) {
515 next = (thread_t) queue_next(&thread->thread_list)((&thread->thread_list)->next);
516 thread_release(thread);
517 thread = next;
518 }
519 task_unlock(task);
520 return KERN_SUCCESS0;
521}
522
523kern_return_t task_threads(
524 task_t task,
525 thread_array_t *thread_list,
526 natural_t *count)
527{
528 unsigned int actual; /* this many threads */
529 thread_t thread;
530 thread_t *threads;
531 int i;
532
533 vm_size_t size, size_needed;
534 vm_offset_t addr;
535
536 if (task == TASK_NULL((task_t) 0))
1
Assuming 'task' is not equal to null
2
Taking false branch
537 return KERN_INVALID_ARGUMENT4;
538
539 size = 0; addr = 0;
3
The value 0 is assigned to 'addr'
540
541 for (;;) {
4
Loop condition is true. Entering loop body
542 task_lock(task);
543 if (!task->active) {
5
Taking false branch
544 task_unlock(task);
545 return KERN_FAILURE5;
546 }
547
548 actual = task->thread_count;
549
550 /* do we have the memory we need? */
551
552 size_needed = actual * sizeof(mach_port_t);
553 if (size_needed <= size)
6
Assuming 'size_needed' is <= 'size'
7
Taking true branch
554 break;
8
Execution continues on line 572
555
556 /* unlock the task and allocate more memory */
557 task_unlock(task);
558
559 if (size != 0)
560 kfree(addr, size);
561
562 assert(size_needed > 0)({ if (!(size_needed > 0)) Assert("size_needed > 0", "../kern/task.c"
, 562); })
;
563 size = size_needed;
564
565 addr = kalloc(size);
566 if (addr == 0)
567 return KERN_RESOURCE_SHORTAGE6;
568 }
569
570 /* OK, have memory and the task is locked & active */
571
572 threads = (thread_t *) addr;
9
Null pointer value stored to 'threads'
573
574 for (i = 0, thread = (thread_t) queue_first(&task->thread_list)((&task->thread_list)->next);
11
Loop condition is true. Entering loop body
575 i < actual;
10
Assuming 'i' is < 'actual'
576 i++, thread = (thread_t) queue_next(&thread->thread_list)((&thread->thread_list)->next)) {
577 /* take ref for convert_thread_to_port */
578 thread_reference(thread);
579 threads[i] = thread;
12
Array access (from variable 'threads') results in a null pointer dereference
580 }
581 assert(queue_end(&task->thread_list, (queue_entry_t) thread))({ if (!(((&task->thread_list) == ((queue_entry_t) thread
)))) Assert("queue_end(&task->thread_list, (queue_entry_t) thread)"
, "../kern/task.c", 581); })
;
582
583 /* can unlock task now that we've got the thread refs */
584 task_unlock(task);
585
586 if (actual == 0) {
587 /* no threads, so return null pointer and deallocate memory */
588
589 *thread_list = 0;
590 *count = 0;
591
592 if (size != 0)
593 kfree(addr, size);
594 } else {
595 /* if we allocated too much, must copy */
596
597 if (size_needed < size) {
598 vm_offset_t newaddr;
599
600 newaddr = kalloc(size_needed);
601 if (newaddr == 0) {
602 for (i = 0; i < actual; i++)
603 thread_deallocate(threads[i]);
604 kfree(addr, size);
605 return KERN_RESOURCE_SHORTAGE6;
606 }
607
608 memcpy((void *) newaddr, (void *) addr, size_needed);
609 kfree(addr, size);
610 threads = (thread_t *) newaddr;
611 }
612
613 *thread_list = (mach_port_t *) threads;
614 *count = actual;
615
616 /* do the conversion that Mig should handle */
617
618 for (i = 0; i < actual; i++)
619 ((ipc_port_t *) threads)[i] =
620 convert_thread_to_port(threads[i]);
621 }
622
623 return KERN_SUCCESS0;
624}
625
626kern_return_t task_suspend(
627 register task_t task)
628{
629 register boolean_t hold;
630
631 if (task == TASK_NULL((task_t) 0))
632 return KERN_INVALID_ARGUMENT4;
633
634 hold = FALSE((boolean_t) 0);
635 task_lock(task);
636 if ((task->user_stop_count)++ == 0)
637 hold = TRUE((boolean_t) 1);
638 task_unlock(task);
639
640 /*
641 * If the stop count was positive, the task is
642 * already stopped and we can exit.
643 */
644 if (!hold) {
645 return KERN_SUCCESS0;
646 }
647
648 /*
649 * Hold all of the threads in the task, and wait for
650 * them to stop. If the current thread is within
651 * this task, hold it separately so that all of the
652 * other threads can stop first.
653 */
654
655 if (task_hold(task) != KERN_SUCCESS0)
656 return KERN_FAILURE5;
657
658 if (task_dowait(task, FALSE((boolean_t) 0)) != KERN_SUCCESS0)
659 return KERN_FAILURE5;
660
661 if (current_task()((active_threads[(0)])->task) == task) {
662 spl_t s;
663
664 thread_hold(current_thread()(active_threads[(0)]));
665 /*
666 * We want to call thread_block on our way out,
667 * to stop running.
668 */
669 s = splsched();
670 ast_on(cpu_number(), AST_BLOCK)({ if ((need_ast[(0)] |= (0x4)) != 0x0) { ; } });
671 (void) splx(s);
672 }
673
674 return KERN_SUCCESS0;
675}
676
677kern_return_t task_resume(
678 register task_t task)
679{
680 register boolean_t release;
681
682 if (task == TASK_NULL((task_t) 0))
683 return KERN_INVALID_ARGUMENT4;
684
685 release = FALSE((boolean_t) 0);
686 task_lock(task);
687 if (task->user_stop_count > 0) {
688 if (--(task->user_stop_count) == 0)
689 release = TRUE((boolean_t) 1);
690 }
691 else {
692 task_unlock(task);
693 return KERN_FAILURE5;
694 }
695 task_unlock(task);
696
697 /*
698 * Release the task if necessary.
699 */
700 if (release)
701 return task_release(task);
702
703 return KERN_SUCCESS0;
704}
705
706kern_return_t task_info(
707 task_t task,
708 int flavor,
709 task_info_t task_info_out, /* pointer to OUT array */
710 natural_t *task_info_count) /* IN/OUT */
711{
712 vm_map_t map;
713
714 if (task == TASK_NULL((task_t) 0))
715 return KERN_INVALID_ARGUMENT4;
716
717 switch (flavor) {
718 case TASK_BASIC_INFO1:
719 {
720 register task_basic_info_t basic_info;
721
722 /* Allow *task_info_count to be two words smaller than
723 the usual amount, because creation_time is a new member
724 that some callers might not know about. */
725
726 if (*task_info_count < TASK_BASIC_INFO_COUNT(sizeof(task_basic_info_data_t) / sizeof(natural_t)) - 2) {
727 return KERN_INVALID_ARGUMENT4;
728 }
729
730 basic_info = (task_basic_info_t) task_info_out;
731
732 map = (task == kernel_task) ? kernel_map : task->map;
733
734 basic_info->virtual_size = map->size;
735 basic_info->resident_size = pmap_resident_count(map->pmap)((map->pmap)->stats.resident_count)
736 * PAGE_SIZE(1 << 12);
737
738 task_lock(task);
739 basic_info->base_priority = task->priority;
740 basic_info->suspend_count = task->user_stop_count;
741 basic_info->user_time.seconds
742 = task->total_user_time.seconds;
743 basic_info->user_time.microseconds
744 = task->total_user_time.microseconds;
745 basic_info->system_time.seconds
746 = task->total_system_time.seconds;
747 basic_info->system_time.microseconds
748 = task->total_system_time.microseconds;
749 basic_info->creation_time = task->creation_time;
750 task_unlock(task);
751
752 if (*task_info_count > TASK_BASIC_INFO_COUNT(sizeof(task_basic_info_data_t) / sizeof(natural_t)))
753 *task_info_count = TASK_BASIC_INFO_COUNT(sizeof(task_basic_info_data_t) / sizeof(natural_t));
754 break;
755 }
756
757 case TASK_EVENTS_INFO2:
758 {
759 register task_events_info_t event_info;
760
761 if (*task_info_count < TASK_EVENTS_INFO_COUNT(sizeof(task_events_info_data_t) / sizeof(natural_t))) {
762 return KERN_INVALID_ARGUMENT4;
763 }
764
765 event_info = (task_events_info_t) task_info_out;
766
767 task_lock(&task);
768 event_info->faults = task->faults;
769 event_info->zero_fills = task->zero_fills;
770 event_info->reactivations = task->reactivations;
771 event_info->pageins = task->pageins;
772 event_info->cow_faults = task->cow_faults;
773 event_info->messages_sent = task->messages_sent;
774 event_info->messages_received = task->messages_received;
775 task_unlock(&task);
776
777 *task_info_count = TASK_EVENTS_INFO_COUNT(sizeof(task_events_info_data_t) / sizeof(natural_t));
778 break;
779 }
780
781 case TASK_THREAD_TIMES_INFO3:
782 {
783 register task_thread_times_info_t times_info;
784 register thread_t thread;
785
786 if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT(sizeof(task_thread_times_info_data_t) / sizeof(natural_t))) {
787 return KERN_INVALID_ARGUMENT4;
788 }
789
790 times_info = (task_thread_times_info_t) task_info_out;
791 times_info->user_time.seconds = 0;
792 times_info->user_time.microseconds = 0;
793 times_info->system_time.seconds = 0;
794 times_info->system_time.microseconds = 0;
795
796 task_lock(task);
797 queue_iterate(&task->thread_list, thread,for ((thread) = (thread_t) ((&task->thread_list)->next
); !(((&task->thread_list)) == ((queue_entry_t)(thread
))); (thread) = (thread_t) ((&(thread)->thread_list)->
next))
798 thread_t, thread_list)for ((thread) = (thread_t) ((&task->thread_list)->next
); !(((&task->thread_list)) == ((queue_entry_t)(thread
))); (thread) = (thread_t) ((&(thread)->thread_list)->
next))
799 {
800 time_value_t user_time, system_time;
801 spl_t s;
802
803 s = splsched();
804 thread_lock(thread);
805
806 thread_read_times(thread, &user_time, &system_time);
807
808 thread_unlock(thread);
809 splx(s);
810
811 time_value_add(&times_info->user_time, &user_time){ (&times_info->user_time)->microseconds += (&user_time
)->microseconds; (&times_info->user_time)->seconds
+= (&user_time)->seconds; if ((&times_info->user_time
)->microseconds >= (1000000)) { (&times_info->user_time
)->microseconds -= (1000000); (&times_info->user_time
)->seconds++; } }
;
812 time_value_add(&times_info->system_time, &system_time){ (&times_info->system_time)->microseconds += (&
system_time)->microseconds; (&times_info->system_time
)->seconds += (&system_time)->seconds; if ((&times_info
->system_time)->microseconds >= (1000000)) { (&times_info
->system_time)->microseconds -= (1000000); (&times_info
->system_time)->seconds++; } }
;
813 }
814 task_unlock(task);
815
816 *task_info_count = TASK_THREAD_TIMES_INFO_COUNT(sizeof(task_thread_times_info_data_t) / sizeof(natural_t));
817 break;
818 }
819
820 default:
821 return KERN_INVALID_ARGUMENT4;
822 }
823
824 return KERN_SUCCESS0;
825}
826
827#if MACH_HOST0
828/*
829 * task_assign:
830 *
831 * Change the assigned processor set for the task
832 */
833kern_return_t
834task_assign(
835 task_t task,
836 processor_set_t new_pset,
837 boolean_t assign_threads)
838{
839 kern_return_t ret = KERN_SUCCESS0;
840 register thread_t thread, prev_thread;
841 register queue_head_t *list;
842 register processor_set_t pset;
843
844 if (task == TASK_NULL((task_t) 0) || new_pset == PROCESSOR_SET_NULL((processor_set_t) 0)) {
845 return KERN_INVALID_ARGUMENT4;
846 }
847
848 /*
849 * Freeze task`s assignment. Prelude to assigning
850 * task. Only one freeze may be held per task.
851 */
852
853 task_lock(task);
854 while (task->may_assign == FALSE((boolean_t) 0)) {
855 task->assign_active = TRUE((boolean_t) 1);
856 assert_wait((event_t)&task->assign_active, TRUE((boolean_t) 1));
857 task_unlock(task);
858 thread_block((void (*)()) 0);
859 task_lock(task);
860 }
861
862 /*
863 * Avoid work if task already in this processor set.
864 */
865 if (task->processor_set == new_pset) {
866 /*
867 * No need for task->assign_active wakeup:
868 * task->may_assign is still TRUE.
869 */
870 task_unlock(task);
871 return KERN_SUCCESS0;
872 }
873
874 task->may_assign = FALSE((boolean_t) 0);
875 task_unlock(task);
876
877 /*
878 * Safe to get the task`s pset: it cannot change while
879 * task is frozen.
880 */
881 pset = task->processor_set;
882
883 /*
884 * Lock both psets now. Use ordering to avoid deadlock.
885 */
886 Restart:
887 if ((vm_offset_t) pset < (vm_offset_t) new_pset) {
888 pset_lock(pset);
889 pset_lock(new_pset);
890 }
891 else {
892 pset_lock(new_pset);
893 pset_lock(pset);
894 }
895
896 /*
897 * Check if new_pset is ok to assign to. If not,
898 * reassign to default_pset.
899 */
900 if (!new_pset->active) {
901 pset_unlock(pset);
902 pset_unlock(new_pset);
903 new_pset = &default_pset;
904 goto Restart;
905 }
906
907 pset_reference(new_pset);
908
909 /*
910 * Now grab the task lock and move the task.
911 */
912
913 task_lock(task);
914 pset_remove_task(pset, task);
915 pset_add_task(new_pset, task);
916
917 pset_unlock(pset);
918 pset_unlock(new_pset);
919
920 if (assign_threads == FALSE((boolean_t) 0)) {
921 /*
922 * We leave existing threads at their
923 * old assignments. Unfreeze task`s
924 * assignment.
925 */
926 task->may_assign = TRUE((boolean_t) 1);
927 if (task->assign_active) {
928 task->assign_active = FALSE((boolean_t) 0);
929 thread_wakeup((event_t) &task->assign_active)thread_wakeup_prim(((event_t) &task->assign_active), (
(boolean_t) 0), 0)
;
930 }
931 task_unlock(task);
932 pset_deallocate(pset);
933 return KERN_SUCCESS0;
934 }
935
936 /*
937 * If current thread is in task, freeze its assignment.
938 */
939 if (current_thread()(active_threads[(0)])->task == task) {
940 task_unlock(task);
941 thread_freeze(current_thread()(active_threads[(0)]));
942 task_lock(task);
943 }
944
945 /*
946 * Iterate down the thread list reassigning all the threads.
947 * New threads pick up task's new processor set automatically.
948 * Do current thread last because new pset may be empty.
949 */
950 list = &task->thread_list;
951 prev_thread = THREAD_NULL((thread_t) 0);
952 queue_iterate(list, thread, thread_t, thread_list)for ((thread) = (thread_t) ((list)->next); !(((list)) == (
(queue_entry_t)(thread))); (thread) = (thread_t) ((&(thread
)->thread_list)->next))
{
953 if (!(task->active)) {
954 ret = KERN_FAILURE5;
955 break;
956 }
957 if (thread != current_thread()(active_threads[(0)])) {
958 thread_reference(thread);
959 task_unlock(task);
960 if (prev_thread != THREAD_NULL((thread_t) 0))
961 thread_deallocate(prev_thread); /* may block */
962 thread_assign(thread,new_pset); /* may block */
963 prev_thread = thread;
964 task_lock(task);
965 }
966 }
967
968 /*
969 * Done, wakeup anyone waiting for us.
970 */
971 task->may_assign = TRUE((boolean_t) 1);
972 if (task->assign_active) {
973 task->assign_active = FALSE((boolean_t) 0);
974 thread_wakeup((event_t)&task->assign_active)thread_wakeup_prim(((event_t)&task->assign_active), ((
boolean_t) 0), 0)
;
975 }
976 task_unlock(task);
977 if (prev_thread != THREAD_NULL((thread_t) 0))
978 thread_deallocate(prev_thread); /* may block */
979
980 /*
981 * Finish assignment of current thread.
982 */
983 if (current_thread()(active_threads[(0)])->task == task)
984 thread_doassign(current_thread()(active_threads[(0)]), new_pset, TRUE((boolean_t) 1));
985
986 pset_deallocate(pset);
987
988 return ret;
989}
990#else /* MACH_HOST */
991/*
992 * task_assign:
993 *
994 * Change the assigned processor set for the task
995 */
996kern_return_t
997task_assign(
998 task_t task,
999 processor_set_t new_pset,
1000 boolean_t assign_threads)
1001{
1002 return KERN_FAILURE5;
1003}
1004#endif /* MACH_HOST */
1005
1006
1007/*
1008 * task_assign_default:
1009 *
1010 * Version of task_assign to assign to default processor set.
1011 */
1012kern_return_t
1013task_assign_default(
1014 task_t task,
1015 boolean_t assign_threads)
1016{
1017 return task_assign(task, &default_pset, assign_threads);
1018}
1019
1020/*
1021 * task_get_assignment
1022 *
1023 * Return name of processor set that task is assigned to.
1024 */
1025kern_return_t task_get_assignment(
1026 task_t task,
1027 processor_set_t *pset)
1028{
1029 if (!task->active)
1030 return KERN_FAILURE5;
1031
1032 *pset = task->processor_set;
1033 pset_reference(*pset);
1034 return KERN_SUCCESS0;
1035}
1036
1037/*
1038 * task_priority
1039 *
1040 * Set priority of task; used only for newly created threads.
1041 * Optionally change priorities of threads.
1042 */
1043kern_return_t
1044task_priority(
1045 task_t task,
1046 int priority,
1047 boolean_t change_threads)
1048{
1049 kern_return_t ret = KERN_SUCCESS0;
1050
1051 if (task == TASK_NULL((task_t) 0) || invalid_pri(priority)(((priority) < 0) || ((priority) >= 50)))
1052 return KERN_INVALID_ARGUMENT4;
1053
1054 task_lock(task);
1055 task->priority = priority;
1056
1057 if (change_threads) {
1058 register thread_t thread;
1059 register queue_head_t *list;
1060
1061 list = &task->thread_list;
1062 queue_iterate(list, thread, thread_t, thread_list)for ((thread) = (thread_t) ((list)->next); !(((list)) == (
(queue_entry_t)(thread))); (thread) = (thread_t) ((&(thread
)->thread_list)->next))
{
1063 if (thread_priority(thread, priority, FALSE((boolean_t) 0))
1064 != KERN_SUCCESS0)
1065 ret = KERN_FAILURE5;
1066 }
1067 }
1068
1069 task_unlock(task);
1070 return ret;
1071}
1072
1073/*
1074 * task_collect_scan:
1075 *
1076 * Attempt to free resources owned by tasks.
1077 */
1078
1079void task_collect_scan(void)
1080{
1081 register task_t task, prev_task;
1082 processor_set_t pset, prev_pset;
1083
1084 prev_task = TASK_NULL((task_t) 0);
1085 prev_pset = PROCESSOR_SET_NULL((processor_set_t) 0);
1086
1087 simple_lock(&all_psets_lock);
1088 queue_iterate(&all_psets, pset, processor_set_t, all_psets)for ((pset) = (processor_set_t) ((&all_psets)->next); !
(((&all_psets)) == ((queue_entry_t)(pset))); (pset) = (processor_set_t
) ((&(pset)->all_psets)->next))
{
1089 pset_lock(pset);
1090 queue_iterate(&pset->tasks, task, task_t, pset_tasks)for ((task) = (task_t) ((&pset->tasks)->next); !(((
&pset->tasks)) == ((queue_entry_t)(task))); (task) = (
task_t) ((&(task)->pset_tasks)->next))
{
1091 task_reference(task);
1092 pset_reference(pset);
1093 pset_unlock(pset);
1094 simple_unlock(&all_psets_lock);
1095
1096 machine_task_collect (task);
1097 pmap_collect(task->map->pmap);
1098
1099 if (prev_task != TASK_NULL((task_t) 0))
1100 task_deallocate(prev_task);
1101 prev_task = task;
1102
1103 if (prev_pset != PROCESSOR_SET_NULL((processor_set_t) 0))
1104 pset_deallocate(prev_pset);
1105 prev_pset = pset;
1106
1107 simple_lock(&all_psets_lock);
1108 pset_lock(pset);
1109 }
1110 pset_unlock(pset);
1111 }
1112 simple_unlock(&all_psets_lock);
1113
1114 if (prev_task != TASK_NULL((task_t) 0))
1115 task_deallocate(prev_task);
1116 if (prev_pset != PROCESSOR_SET_NULL((processor_set_t) 0))
1117 pset_deallocate(prev_pset);
1118}
1119
1120boolean_t task_collect_allowed = TRUE((boolean_t) 1);
1121unsigned task_collect_last_tick = 0;
1122unsigned task_collect_max_rate = 0; /* in ticks */
1123
1124/*
1125 * consider_task_collect:
1126 *
1127 * Called by the pageout daemon when the system needs more free pages.
1128 */
1129
1130void consider_task_collect(void)
1131{
1132 /*
1133 * By default, don't attempt task collection more frequently
1134 * than once a second.
1135 */
1136
1137 if (task_collect_max_rate == 0)
1138 task_collect_max_rate = hz;
1139
1140 if (task_collect_allowed &&
1141 (sched_tick > (task_collect_last_tick + task_collect_max_rate))) {
1142 task_collect_last_tick = sched_tick;
1143 task_collect_scan();
1144 }
1145}
1146
1147kern_return_t
1148task_ras_control(
1149 task_t task,
1150 vm_offset_t pc,
1151 vm_offset_t endpc,
1152 int flavor)
1153{
1154 kern_return_t ret = KERN_FAILURE5;
1155
1156#if FAST_TAS0
1157 int i;
1158
1159 ret = KERN_SUCCESS0;
1160 task_lock(task);
1161 switch (flavor) {
1162 case TASK_RAS_CONTROL_PURGE_ALL0: /* remove all RAS */
1163 for (i = 0; i < TASK_FAST_TAS_NRAS; i++) {
1164 task->fast_tas_base[i] = task->fast_tas_end[i] = 0;
1165 }
1166 break;
1167 case TASK_RAS_CONTROL_PURGE_ONE1: /* remove this RAS, collapse remaining */
1168 for (i = 0; i < TASK_FAST_TAS_NRAS; i++) {
1169 if ( (task->fast_tas_base[i] == pc)
1170 && (task->fast_tas_end[i] == endpc)) {
1171 while (i < TASK_FAST_TAS_NRAS-1) {
1172 task->fast_tas_base[i] = task->fast_tas_base[i+1];
1173 task->fast_tas_end[i] = task->fast_tas_end[i+1];
1174 i++;
1175 }
1176 task->fast_tas_base[TASK_FAST_TAS_NRAS-1] = 0;
1177 task->fast_tas_end[TASK_FAST_TAS_NRAS-1] = 0;
1178 break;
1179 }
1180 }
1181 if (i == TASK_FAST_TAS_NRAS) {
1182 ret = KERN_INVALID_ADDRESS1;
1183 }
1184 break;
1185 case TASK_RAS_CONTROL_PURGE_ALL_AND_INSTALL_ONE2:
1186 /* remove all RAS an install this RAS */
1187 for (i = 0; i < TASK_FAST_TAS_NRAS; i++) {
1188 task->fast_tas_base[i] = task->fast_tas_end[i] = 0;
1189 }
1190 /* FALL THROUGH */
1191 case TASK_RAS_CONTROL_INSTALL_ONE3: /* install this RAS */
1192 for (i = 0; i < TASK_FAST_TAS_NRAS; i++) {
1193 if ( (task->fast_tas_base[i] == pc)
1194 && (task->fast_tas_end[i] == endpc)) {
1195 /* already installed */
1196 break;
1197 }
1198 if ((task->fast_tas_base[i] == 0) && (task->fast_tas_end[i] == 0)){
1199 task->fast_tas_base[i] = pc;
1200 task->fast_tas_end[i] = endpc;
1201 break;
1202 }
1203 }
1204 if (i == TASK_FAST_TAS_NRAS) {
1205 ret = KERN_RESOURCE_SHORTAGE6;
1206 }
1207 break;
1208 default: ret = KERN_INVALID_VALUE18;
1209 break;
1210 }
1211 task_unlock(task);
1212#endif
1213 return ret;
1214}