Bug Summary

File:obj-scan-build/../kern/task.c
Location:line 1094, column 17
Description:Access to field 'map' results in a dereference of a null pointer (loaded from variable 'task')

Annotated Source Code

1/*
2 * Mach Operating System
3 * Copyright (c) 1993-1988 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26/*
27 * File: kern/task.c
28 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
29 * David Black
30 *
31 * Task management primitives implementation.
32 */
33
34#include <string.h>
35
36#include <mach/machine/vm_types.h>
37#include <mach/vm_param.h>
38#include <mach/task_info.h>
39#include <mach/task_special_ports.h>
40#include <ipc/ipc_space.h>
41#include <ipc/ipc_types.h>
42#include <kern/debug.h>
43#include <kern/task.h>
44#include <kern/thread.h>
45#include <kern/slab.h>
46#include <kern/kalloc.h>
47#include <kern/processor.h>
48#include <kern/sched_prim.h> /* for thread_wakeup */
49#include <kern/ipc_tt.h>
50#include <kern/syscall_emulation.h>
51#include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */
52#include <machine/machspl.h> /* for splsched */
53
54task_t kernel_task = TASK_NULL((task_t) 0);
55struct kmem_cache task_cache;
56
57void task_init(void)
58{
59 kmem_cache_init(&task_cache, "task", sizeof(struct task), 0,
60 NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0);
61
62 eml_init();
63 machine_task_module_init ();
64
65 /*
66 * Create the kernel task as the first task.
67 * Task_create must assign to kernel_task as a side effect,
68 * for other initialization. (:-()
69 */
70 (void) task_create(TASK_NULL((task_t) 0), FALSE((boolean_t) 0), &kernel_task);
71}
72
73kern_return_t task_create(
74 task_t parent_task,
75 boolean_t inherit_memory,
76 task_t *child_task) /* OUT */
77{
78 task_t new_task;
79 processor_set_t pset;
80#if FAST_TAS0
81 int i;
82#endif
83
84 new_task = (task_t) kmem_cache_alloc(&task_cache);
85 if (new_task == TASK_NULL((task_t) 0)) {
86 panic("task_create: no memory for task structure");
87 }
88
89 /* one ref for just being alive; one for our caller */
90 new_task->ref_count = 2;
91
92 if (child_task == &kernel_task) {
93 new_task->map = kernel_map;
94 } else if (inherit_memory) {
95 new_task->map = vm_map_fork(parent_task->map);
96 } else {
97 new_task->map = vm_map_create(pmap_create(0),
98 round_page(VM_MIN_ADDRESS)((vm_offset_t)((((vm_offset_t)((0))) + ((1 << 12)-1)) &
~((1 << 12)-1)))
,
99 trunc_page(VM_MAX_ADDRESS)((vm_offset_t)(((vm_offset_t)((0xc0000000UL))) & ~((1 <<
12)-1)))
, TRUE((boolean_t) 1));
100 }
101
102 simple_lock_init(&new_task->lock);
103 queue_init(&new_task->thread_list)((&new_task->thread_list)->next = (&new_task->
thread_list)->prev = &new_task->thread_list)
;
104 new_task->suspend_count = 0;
105 new_task->active = TRUE((boolean_t) 1);
106 new_task->user_stop_count = 0;
107 new_task->thread_count = 0;
108 new_task->faults = 0;
109 new_task->zero_fills = 0;
110 new_task->reactivations = 0;
111 new_task->pageins = 0;
112 new_task->cow_faults = 0;
113 new_task->messages_sent = 0;
114 new_task->messages_received = 0;
115
116 eml_task_reference(new_task, parent_task);
117
118 ipc_task_init(new_task, parent_task);
119 machine_task_init (new_task);
120
121 new_task->total_user_time.seconds = 0;
122 new_task->total_user_time.microseconds = 0;
123 new_task->total_system_time.seconds = 0;
124 new_task->total_system_time.microseconds = 0;
125
126 record_time_stamp (&new_task->creation_time);
127
128 if (parent_task != TASK_NULL((task_t) 0)) {
129 task_lock(parent_task);
130 pset = parent_task->processor_set;
131 if (!pset->active)
132 pset = &default_pset;
133 pset_reference(pset);
134 new_task->priority = parent_task->priority;
135 task_unlock(parent_task)((void)(&(parent_task)->lock));
136 }
137 else {
138 pset = &default_pset;
139 pset_reference(pset);
140 new_task->priority = BASEPRI_USER25;
141 }
142 pset_lock(pset);
143 pset_add_task(pset, new_task);
144 pset_unlock(pset)((void)(&(pset)->lock));
145
146 new_task->may_assign = TRUE((boolean_t) 1);
147 new_task->assign_active = FALSE((boolean_t) 0);
148
149#if MACH_PCSAMPLE1
150 new_task->pc_sample.buffer = 0;
151 new_task->pc_sample.seqno = 0;
152 new_task->pc_sample.sampletypes = 0;
153#endif /* MACH_PCSAMPLE */
154
155#if FAST_TAS0
156 for (i = 0; i < TASK_FAST_TAS_NRAS; i++) {
157 if (inherit_memory) {
158 new_task->fast_tas_base[i] = parent_task->fast_tas_base[i];
159 new_task->fast_tas_end[i] = parent_task->fast_tas_end[i];
160 } else {
161 new_task->fast_tas_base[i] = (vm_offset_t)0;
162 new_task->fast_tas_end[i] = (vm_offset_t)0;
163 }
164 }
165#endif /* FAST_TAS */
166
167 ipc_task_enable(new_task);
168
169 *child_task = new_task;
170 return KERN_SUCCESS0;
171}
172
173/*
174 * task_deallocate:
175 *
176 * Give up a reference to the specified task and destroy it if there
177 * are no other references left. It is assumed that the current thread
178 * is never in this task.
179 */
180void task_deallocate(
181 task_t task)
182{
183 int c;
184 processor_set_t pset;
185
186 if (task == TASK_NULL((task_t) 0))
187 return;
188
189 task_lock(task);
190 c = --(task->ref_count);
191 task_unlock(task)((void)(&(task)->lock));
192 if (c != 0)
193 return;
194
195 machine_task_terminate (task);
196
197 eml_task_deallocate(task);
198
199 pset = task->processor_set;
200 pset_lock(pset);
201 pset_remove_task(pset,task);
202 pset_unlock(pset)((void)(&(pset)->lock));
203 pset_deallocate(pset);
204 vm_map_deallocate(task->map);
205 is_release(task->itk_space)ipc_space_release(task->itk_space);
206 kmem_cache_free(&task_cache, (vm_offset_t) task);
207}
208
209void task_reference(
210 task_t task)
211{
212 if (task == TASK_NULL((task_t) 0))
7
Assuming 'task' is equal to null
8
Taking true branch
213 return;
214
215 task_lock(task);
216 task->ref_count++;
217 task_unlock(task)((void)(&(task)->lock));
218}
219
220/*
221 * task_terminate:
222 *
223 * Terminate the specified task. See comments on thread_terminate
224 * (kern/thread.c) about problems with terminating the "current task."
225 */
226kern_return_t task_terminate(
227 task_t task)
228{
229 thread_t thread, cur_thread;
230 queue_head_t *list;
231 task_t cur_task;
232 spl_t s;
233
234 if (task == TASK_NULL((task_t) 0))
235 return KERN_INVALID_ARGUMENT4;
236
237 list = &task->thread_list;
238 cur_task = current_task()((active_threads[(0)])->task);
239 cur_thread = current_thread()(active_threads[(0)]);
240
241 /*
242 * Deactivate task so that it can't be terminated again,
243 * and so lengthy operations in progress will abort.
244 *
245 * If the current thread is in this task, remove it from
246 * the task's thread list to keep the thread-termination
247 * loop simple.
248 */
249 if (task == cur_task) {
250 task_lock(task);
251 if (!task->active) {
252 /*
253 * Task is already being terminated.
254 */
255 task_unlock(task)((void)(&(task)->lock));
256 return KERN_FAILURE5;
257 }
258 /*
259 * Make sure current thread is not being terminated.
260 */
261 s = splsched();
262 thread_lock(cur_thread);
263 if (!cur_thread->active) {
264 thread_unlock(cur_thread)((void)(&(cur_thread)->lock));
265 (void) splx(s);
266 task_unlock(task)((void)(&(task)->lock));
267 thread_terminate(cur_thread);
268 return KERN_FAILURE5;
269 }
270 task->active = FALSE((boolean_t) 0);
271 queue_remove(list, cur_thread, thread_t, thread_list){ queue_entry_t next, prev; next = (cur_thread)->thread_list
.next; prev = (cur_thread)->thread_list.prev; if ((list) ==
next) (list)->prev = prev; else ((thread_t)next)->thread_list
.prev = prev; if ((list) == prev) (list)->next = next; else
((thread_t)prev)->thread_list.next = next; }
;
272 thread_unlock(cur_thread)((void)(&(cur_thread)->lock));
273 (void) splx(s);
274 task_unlock(task)((void)(&(task)->lock));
275
276 /*
277 * Shut down this thread's ipc now because it must
278 * be left alone to terminate the task.
279 */
280 ipc_thread_disable(cur_thread);
281 ipc_thread_terminate(cur_thread);
282 }
283 else {
284 /*
285 * Lock both current and victim task to check for
286 * potential deadlock.
287 */
288 if ((vm_offset_t)task < (vm_offset_t)cur_task) {
289 task_lock(task);
290 task_lock(cur_task);
291 }
292 else {
293 task_lock(cur_task);
294 task_lock(task);
295 }
296 /*
297 * Check if current thread or task is being terminated.
298 */
299 s = splsched();
300 thread_lock(cur_thread);
301 if ((!cur_task->active) ||(!cur_thread->active)) {
302 /*
303 * Current task or thread is being terminated.
304 */
305 thread_unlock(cur_thread)((void)(&(cur_thread)->lock));
306 (void) splx(s);
307 task_unlock(task)((void)(&(task)->lock));
308 task_unlock(cur_task)((void)(&(cur_task)->lock));
309 thread_terminate(cur_thread);
310 return KERN_FAILURE5;
311 }
312 thread_unlock(cur_thread)((void)(&(cur_thread)->lock));
313 (void) splx(s);
314 task_unlock(cur_task)((void)(&(cur_task)->lock));
315
316 if (!task->active) {
317 /*
318 * Task is already being terminated.
319 */
320 task_unlock(task)((void)(&(task)->lock));
321 return KERN_FAILURE5;
322 }
323 task->active = FALSE((boolean_t) 0);
324 task_unlock(task)((void)(&(task)->lock));
325 }
326
327 /*
328 * Prevent further execution of the task. ipc_task_disable
329 * prevents further task operations via the task port.
330 * If this is the current task, the current thread will
331 * be left running.
332 */
333 ipc_task_disable(task);
334 (void) task_hold(task);
335 (void) task_dowait(task,TRUE((boolean_t) 1)); /* may block */
336
337 /*
338 * Terminate each thread in the task.
339 *
340 * The task_port is closed down, so no more thread_create
341 * operations can be done. Thread_force_terminate closes the
342 * thread port for each thread; when that is done, the
343 * thread will eventually disappear. Thus the loop will
344 * terminate. Call thread_force_terminate instead of
345 * thread_terminate to avoid deadlock checks. Need
346 * to call thread_block() inside loop because some other
347 * thread (e.g., the reaper) may have to run to get rid
348 * of all references to the thread; it won't vanish from
349 * the task's thread list until the last one is gone.
350 */
351 task_lock(task);
352 while (!queue_empty(list)(((list)) == (((list)->next)))) {
353 thread = (thread_t) queue_first(list)((list)->next);
354 thread_reference(thread);
355 task_unlock(task)((void)(&(task)->lock));
356 thread_force_terminate(thread);
357 thread_deallocate(thread);
358 thread_block((void (*)()) 0);
359 task_lock(task);
360 }
361 task_unlock(task)((void)(&(task)->lock));
362
363 /*
364 * Shut down IPC.
365 */
366 ipc_task_terminate(task);
367
368
369 /*
370 * Deallocate the task's reference to itself.
371 */
372 task_deallocate(task);
373
374 /*
375 * If the current thread is in this task, it has not yet
376 * been terminated (since it was removed from the task's
377 * thread-list). Put it back in the thread list (for
378 * completeness), and terminate it. Since it holds the
379 * last reference to the task, terminating it will deallocate
380 * the task.
381 */
382 if (cur_thread->task == task) {
383 task_lock(task);
384 s = splsched();
385 queue_enter(list, cur_thread, thread_t, thread_list){ queue_entry_t prev; prev = (list)->prev; if ((list) == prev
) { (list)->next = (queue_entry_t) (cur_thread); } else { (
(thread_t)prev)->thread_list.next = (queue_entry_t)(cur_thread
); } (cur_thread)->thread_list.prev = prev; (cur_thread)->
thread_list.next = list; (list)->prev = (queue_entry_t) cur_thread
; }
;
386 (void) splx(s);
387 task_unlock(task)((void)(&(task)->lock));
388 (void) thread_terminate(cur_thread);
389 }
390
391 return KERN_SUCCESS0;
392}
393
394/*
395 * task_hold:
396 *
397 * Suspend execution of the specified task.
398 * This is a recursive-style suspension of the task, a count of
399 * suspends is maintained.
400 */
401kern_return_t task_hold(
402 task_t task)
403{
404 queue_head_t *list;
405 thread_t thread, cur_thread;
406
407 cur_thread = current_thread()(active_threads[(0)]);
408
409 task_lock(task);
410 if (!task->active) {
411 task_unlock(task)((void)(&(task)->lock));
412 return KERN_FAILURE5;
413 }
414
415 task->suspend_count++;
416
417 /*
418 * Iterate through all the threads and hold them.
419 * Do not hold the current thread if it is within the
420 * task.
421 */
422 list = &task->thread_list;
423 queue_iterate(list, thread, thread_t, thread_list)for ((thread) = (thread_t) ((list)->next); !(((list)) == (
(queue_entry_t)(thread))); (thread) = (thread_t) ((&(thread
)->thread_list)->next))
{
424 if (thread != cur_thread)
425 thread_hold(thread);
426 }
427 task_unlock(task)((void)(&(task)->lock));
428 return KERN_SUCCESS0;
429}
430
431/*
432 * task_dowait:
433 *
434 * Wait until the task has really been suspended (all of the threads
435 * are stopped). Skip the current thread if it is within the task.
436 *
437 * If task is deactivated while waiting, return a failure code unless
438 * must_wait is true.
439 */
440kern_return_t task_dowait(
441 task_t task,
442 boolean_t must_wait)
443{
444 queue_head_t *list;
445 thread_t thread, cur_thread, prev_thread;
446 kern_return_t ret = KERN_SUCCESS0;
447
448 /*
449 * Iterate through all the threads.
450 * While waiting for each thread, we gain a reference to it
451 * to prevent it from going away on us. This guarantees
452 * that the "next" thread in the list will be a valid thread.
453 *
454 * We depend on the fact that if threads are created while
455 * we are looping through the threads, they will be held
456 * automatically. We don't care about threads that get
457 * deallocated along the way (the reference prevents it
458 * from happening to the thread we are working with).
459 *
460 * If the current thread is in the affected task, it is skipped.
461 *
462 * If the task is deactivated before we're done, and we don't
463 * have to wait for it (must_wait is FALSE), just bail out.
464 */
465 cur_thread = current_thread()(active_threads[(0)]);
466
467 list = &task->thread_list;
468 prev_thread = THREAD_NULL((thread_t) 0);
469 task_lock(task);
470 queue_iterate(list, thread, thread_t, thread_list)for ((thread) = (thread_t) ((list)->next); !(((list)) == (
(queue_entry_t)(thread))); (thread) = (thread_t) ((&(thread
)->thread_list)->next))
{
471 if (!(task->active) && !(must_wait)) {
472 ret = KERN_FAILURE5;
473 break;
474 }
475 if (thread != cur_thread) {
476 thread_reference(thread);
477 task_unlock(task)((void)(&(task)->lock));
478 if (prev_thread != THREAD_NULL((thread_t) 0))
479 thread_deallocate(prev_thread);
480 /* may block */
481 (void) thread_dowait(thread, TRUE((boolean_t) 1)); /* may block */
482 prev_thread = thread;
483 task_lock(task);
484 }
485 }
486 task_unlock(task)((void)(&(task)->lock));
487 if (prev_thread != THREAD_NULL((thread_t) 0))
488 thread_deallocate(prev_thread); /* may block */
489 return ret;
490}
491
492kern_return_t task_release(
493 task_t task)
494{
495 queue_head_t *list;
496 thread_t thread, next;
497
498 task_lock(task);
499 if (!task->active) {
500 task_unlock(task)((void)(&(task)->lock));
501 return KERN_FAILURE5;
502 }
503
504 task->suspend_count--;
505
506 /*
507 * Iterate through all the threads and release them
508 */
509 list = &task->thread_list;
510 thread = (thread_t) queue_first(list)((list)->next);
511 while (!queue_end(list, (queue_entry_t) thread)((list) == ((queue_entry_t) thread))) {
512 next = (thread_t) queue_next(&thread->thread_list)((&thread->thread_list)->next);
513 thread_release(thread);
514 thread = next;
515 }
516 task_unlock(task)((void)(&(task)->lock));
517 return KERN_SUCCESS0;
518}
519
520kern_return_t task_threads(
521 task_t task,
522 thread_array_t *thread_list,
523 natural_t *count)
524{
525 unsigned int actual; /* this many threads */
526 thread_t thread;
527 thread_t *threads;
528 int i;
529
530 vm_size_t size, size_needed;
531 vm_offset_t addr;
532
533 if (task == TASK_NULL((task_t) 0))
534 return KERN_INVALID_ARGUMENT4;
535
536 size = 0; addr = 0;
537
538 for (;;) {
539 task_lock(task);
540 if (!task->active) {
541 task_unlock(task)((void)(&(task)->lock));
542 return KERN_FAILURE5;
543 }
544
545 actual = task->thread_count;
546
547 /* do we have the memory we need? */
548
549 size_needed = actual * sizeof(mach_port_t);
550 if (size_needed <= size)
551 break;
552
553 /* unlock the task and allocate more memory */
554 task_unlock(task)((void)(&(task)->lock));
555
556 if (size != 0)
557 kfree(addr, size);
558
559 assert(size_needed > 0)({ if (!(size_needed > 0)) Assert("size_needed > 0", "../kern/task.c"
, 559); })
;
560 size = size_needed;
561
562 addr = kalloc(size);
563 if (addr == 0)
564 return KERN_RESOURCE_SHORTAGE6;
565 }
566
567 /* OK, have memory and the task is locked & active */
568
569 threads = (thread_t *) addr;
570
571 for (i = 0, thread = (thread_t) queue_first(&task->thread_list)((&task->thread_list)->next);
572 i < actual;
573 i++, thread = (thread_t) queue_next(&thread->thread_list)((&thread->thread_list)->next)) {
574 /* take ref for convert_thread_to_port */
575 thread_reference(thread);
576 threads[i] = thread;
577 }
578 assert(queue_end(&task->thread_list, (queue_entry_t) thread))({ if (!(((&task->thread_list) == ((queue_entry_t) thread
)))) Assert("queue_end(&task->thread_list, (queue_entry_t) thread)"
, "../kern/task.c", 578); })
;
579
580 /* can unlock task now that we've got the thread refs */
581 task_unlock(task)((void)(&(task)->lock));
582
583 if (actual == 0) {
584 /* no threads, so return null pointer and deallocate memory */
585
586 *thread_list = 0;
587 *count = 0;
588
589 if (size != 0)
590 kfree(addr, size);
591 } else {
592 /* if we allocated too much, must copy */
593
594 if (size_needed < size) {
595 vm_offset_t newaddr;
596
597 newaddr = kalloc(size_needed);
598 if (newaddr == 0) {
599 for (i = 0; i < actual; i++)
600 thread_deallocate(threads[i]);
601 kfree(addr, size);
602 return KERN_RESOURCE_SHORTAGE6;
603 }
604
605 memcpy((void *) newaddr, (void *) addr, size_needed);
606 kfree(addr, size);
607 threads = (thread_t *) newaddr;
608 }
609
610 *thread_list = (mach_port_t *) threads;
611 *count = actual;
612
613 /* do the conversion that Mig should handle */
614
615 for (i = 0; i < actual; i++)
616 ((ipc_port_t *) threads)[i] =
617 convert_thread_to_port(threads[i]);
618 }
619
620 return KERN_SUCCESS0;
621}
622
623kern_return_t task_suspend(
624 task_t task)
625{
626 boolean_t hold;
627
628 if (task == TASK_NULL((task_t) 0))
629 return KERN_INVALID_ARGUMENT4;
630
631 hold = FALSE((boolean_t) 0);
632 task_lock(task);
633 if ((task->user_stop_count)++ == 0)
634 hold = TRUE((boolean_t) 1);
635 task_unlock(task)((void)(&(task)->lock));
636
637 /*
638 * If the stop count was positive, the task is
639 * already stopped and we can exit.
640 */
641 if (!hold) {
642 return KERN_SUCCESS0;
643 }
644
645 /*
646 * Hold all of the threads in the task, and wait for
647 * them to stop. If the current thread is within
648 * this task, hold it separately so that all of the
649 * other threads can stop first.
650 */
651
652 if (task_hold(task) != KERN_SUCCESS0)
653 return KERN_FAILURE5;
654
655 if (task_dowait(task, FALSE((boolean_t) 0)) != KERN_SUCCESS0)
656 return KERN_FAILURE5;
657
658 if (current_task()((active_threads[(0)])->task) == task) {
659 spl_t s;
660
661 thread_hold(current_thread()(active_threads[(0)]));
662 /*
663 * We want to call thread_block on our way out,
664 * to stop running.
665 */
666 s = splsched();
667 ast_on(cpu_number(), AST_BLOCK)({ if ((need_ast[(0)] |= (0x4)) != 0x0) { ; } });
668 (void) splx(s);
669 }
670
671 return KERN_SUCCESS0;
672}
673
674kern_return_t task_resume(
675 task_t task)
676{
677 boolean_t release;
678
679 if (task == TASK_NULL((task_t) 0))
680 return KERN_INVALID_ARGUMENT4;
681
682 release = FALSE((boolean_t) 0);
683 task_lock(task);
684 if (task->user_stop_count > 0) {
685 if (--(task->user_stop_count) == 0)
686 release = TRUE((boolean_t) 1);
687 }
688 else {
689 task_unlock(task)((void)(&(task)->lock));
690 return KERN_FAILURE5;
691 }
692 task_unlock(task)((void)(&(task)->lock));
693
694 /*
695 * Release the task if necessary.
696 */
697 if (release)
698 return task_release(task);
699
700 return KERN_SUCCESS0;
701}
702
703kern_return_t task_info(
704 task_t task,
705 int flavor,
706 task_info_t task_info_out, /* pointer to OUT array */
707 natural_t *task_info_count) /* IN/OUT */
708{
709 vm_map_t map;
710
711 if (task == TASK_NULL((task_t) 0))
712 return KERN_INVALID_ARGUMENT4;
713
714 switch (flavor) {
715 case TASK_BASIC_INFO1:
716 {
717 task_basic_info_t basic_info;
718
719 /* Allow *task_info_count to be two words smaller than
720 the usual amount, because creation_time is a new member
721 that some callers might not know about. */
722
723 if (*task_info_count < TASK_BASIC_INFO_COUNT(sizeof(task_basic_info_data_t) / sizeof(natural_t)) - 2) {
724 return KERN_INVALID_ARGUMENT4;
725 }
726
727 basic_info = (task_basic_info_t) task_info_out;
728
729 map = (task == kernel_task) ? kernel_map : task->map;
730
731 basic_info->virtual_size = map->size;
732 basic_info->resident_size = pmap_resident_count(map->pmap)((map->pmap)->stats.resident_count)
733 * PAGE_SIZE(1 << 12);
734
735 task_lock(task);
736 basic_info->base_priority = task->priority;
737 basic_info->suspend_count = task->user_stop_count;
738 basic_info->user_time.seconds
739 = task->total_user_time.seconds;
740 basic_info->user_time.microseconds
741 = task->total_user_time.microseconds;
742 basic_info->system_time.seconds
743 = task->total_system_time.seconds;
744 basic_info->system_time.microseconds
745 = task->total_system_time.microseconds;
746 basic_info->creation_time = task->creation_time;
747 task_unlock(task)((void)(&(task)->lock));
748
749 if (*task_info_count > TASK_BASIC_INFO_COUNT(sizeof(task_basic_info_data_t) / sizeof(natural_t)))
750 *task_info_count = TASK_BASIC_INFO_COUNT(sizeof(task_basic_info_data_t) / sizeof(natural_t));
751 break;
752 }
753
754 case TASK_EVENTS_INFO2:
755 {
756 task_events_info_t event_info;
757
758 if (*task_info_count < TASK_EVENTS_INFO_COUNT(sizeof(task_events_info_data_t) / sizeof(natural_t))) {
759 return KERN_INVALID_ARGUMENT4;
760 }
761
762 event_info = (task_events_info_t) task_info_out;
763
764 task_lock(task);
765 event_info->faults = task->faults;
766 event_info->zero_fills = task->zero_fills;
767 event_info->reactivations = task->reactivations;
768 event_info->pageins = task->pageins;
769 event_info->cow_faults = task->cow_faults;
770 event_info->messages_sent = task->messages_sent;
771 event_info->messages_received = task->messages_received;
772 task_unlock(task)((void)(&(task)->lock));
773
774 *task_info_count = TASK_EVENTS_INFO_COUNT(sizeof(task_events_info_data_t) / sizeof(natural_t));
775 break;
776 }
777
778 case TASK_THREAD_TIMES_INFO3:
779 {
780 task_thread_times_info_t times_info;
781 thread_t thread;
782
783 if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT(sizeof(task_thread_times_info_data_t) / sizeof(natural_t))) {
784 return KERN_INVALID_ARGUMENT4;
785 }
786
787 times_info = (task_thread_times_info_t) task_info_out;
788 times_info->user_time.seconds = 0;
789 times_info->user_time.microseconds = 0;
790 times_info->system_time.seconds = 0;
791 times_info->system_time.microseconds = 0;
792
793 task_lock(task);
794 queue_iterate(&task->thread_list, thread,for ((thread) = (thread_t) ((&task->thread_list)->next
); !(((&task->thread_list)) == ((queue_entry_t)(thread
))); (thread) = (thread_t) ((&(thread)->thread_list)->
next))
795 thread_t, thread_list)for ((thread) = (thread_t) ((&task->thread_list)->next
); !(((&task->thread_list)) == ((queue_entry_t)(thread
))); (thread) = (thread_t) ((&(thread)->thread_list)->
next))
796 {
797 time_value_t user_time, system_time;
798 spl_t s;
799
800 s = splsched();
801 thread_lock(thread);
802
803 thread_read_times(thread, &user_time, &system_time);
804
805 thread_unlock(thread)((void)(&(thread)->lock));
806 splx(s);
807
808 time_value_add(&times_info->user_time, &user_time){ (&times_info->user_time)->microseconds += (&user_time
)->microseconds; (&times_info->user_time)->seconds
+= (&user_time)->seconds; if ((&times_info->user_time
)->microseconds >= (1000000)) { (&times_info->user_time
)->microseconds -= (1000000); (&times_info->user_time
)->seconds++; } }
;
809 time_value_add(&times_info->system_time, &system_time){ (&times_info->system_time)->microseconds += (&
system_time)->microseconds; (&times_info->system_time
)->seconds += (&system_time)->seconds; if ((&times_info
->system_time)->microseconds >= (1000000)) { (&times_info
->system_time)->microseconds -= (1000000); (&times_info
->system_time)->seconds++; } }
;
810 }
811 task_unlock(task)((void)(&(task)->lock));
812
813 *task_info_count = TASK_THREAD_TIMES_INFO_COUNT(sizeof(task_thread_times_info_data_t) / sizeof(natural_t));
814 break;
815 }
816
817 default:
818 return KERN_INVALID_ARGUMENT4;
819 }
820
821 return KERN_SUCCESS0;
822}
823
824#if MACH_HOST0
825/*
826 * task_assign:
827 *
828 * Change the assigned processor set for the task
829 */
830kern_return_t
831task_assign(
832 task_t task,
833 processor_set_t new_pset,
834 boolean_t assign_threads)
835{
836 kern_return_t ret = KERN_SUCCESS0;
837 thread_t thread, prev_thread;
838 queue_head_t *list;
839 processor_set_t pset;
840
841 if (task == TASK_NULL((task_t) 0) || new_pset == PROCESSOR_SET_NULL((processor_set_t) 0)) {
842 return KERN_INVALID_ARGUMENT4;
843 }
844
845 /*
846 * Freeze task`s assignment. Prelude to assigning
847 * task. Only one freeze may be held per task.
848 */
849
850 task_lock(task);
851 while (task->may_assign == FALSE((boolean_t) 0)) {
852 task->assign_active = TRUE((boolean_t) 1);
853 assert_wait((event_t)&task->assign_active, TRUE((boolean_t) 1));
854 task_unlock(task)((void)(&(task)->lock));
855 thread_block((void (*)()) 0);
856 task_lock(task);
857 }
858
859 /*
860 * Avoid work if task already in this processor set.
861 */
862 if (task->processor_set == new_pset) {
863 /*
864 * No need for task->assign_active wakeup:
865 * task->may_assign is still TRUE.
866 */
867 task_unlock(task)((void)(&(task)->lock));
868 return KERN_SUCCESS0;
869 }
870
871 task->may_assign = FALSE((boolean_t) 0);
872 task_unlock(task)((void)(&(task)->lock));
873
874 /*
875 * Safe to get the task`s pset: it cannot change while
876 * task is frozen.
877 */
878 pset = task->processor_set;
879
880 /*
881 * Lock both psets now. Use ordering to avoid deadlock.
882 */
883 Restart:
884 if ((vm_offset_t) pset < (vm_offset_t) new_pset) {
885 pset_lock(pset);
886 pset_lock(new_pset);
887 }
888 else {
889 pset_lock(new_pset);
890 pset_lock(pset);
891 }
892
893 /*
894 * Check if new_pset is ok to assign to. If not,
895 * reassign to default_pset.
896 */
897 if (!new_pset->active) {
898 pset_unlock(pset)((void)(&(pset)->lock));
899 pset_unlock(new_pset)((void)(&(new_pset)->lock));
900 new_pset = &default_pset;
901 goto Restart;
902 }
903
904 pset_reference(new_pset);
905
906 /*
907 * Now grab the task lock and move the task.
908 */
909
910 task_lock(task);
911 pset_remove_task(pset, task);
912 pset_add_task(new_pset, task);
913
914 pset_unlock(pset)((void)(&(pset)->lock));
915 pset_unlock(new_pset)((void)(&(new_pset)->lock));
916
917 if (assign_threads == FALSE((boolean_t) 0)) {
918 /*
919 * We leave existing threads at their
920 * old assignments. Unfreeze task`s
921 * assignment.
922 */
923 task->may_assign = TRUE((boolean_t) 1);
924 if (task->assign_active) {
925 task->assign_active = FALSE((boolean_t) 0);
926 thread_wakeup((event_t) &task->assign_active)thread_wakeup_prim(((event_t) &task->assign_active), (
(boolean_t) 0), 0)
;
927 }
928 task_unlock(task)((void)(&(task)->lock));
929 pset_deallocate(pset);
930 return KERN_SUCCESS0;
931 }
932
933 /*
934 * If current thread is in task, freeze its assignment.
935 */
936 if (current_thread()(active_threads[(0)])->task == task) {
937 task_unlock(task)((void)(&(task)->lock));
938 thread_freeze(current_thread()(active_threads[(0)]));
939 task_lock(task);
940 }
941
942 /*
943 * Iterate down the thread list reassigning all the threads.
944 * New threads pick up task's new processor set automatically.
945 * Do current thread last because new pset may be empty.
946 */
947 list = &task->thread_list;
948 prev_thread = THREAD_NULL((thread_t) 0);
949 queue_iterate(list, thread, thread_t, thread_list)for ((thread) = (thread_t) ((list)->next); !(((list)) == (
(queue_entry_t)(thread))); (thread) = (thread_t) ((&(thread
)->thread_list)->next))
{
950 if (!(task->active)) {
951 ret = KERN_FAILURE5;
952 break;
953 }
954 if (thread != current_thread()(active_threads[(0)])) {
955 thread_reference(thread);
956 task_unlock(task)((void)(&(task)->lock));
957 if (prev_thread != THREAD_NULL((thread_t) 0))
958 thread_deallocate(prev_thread); /* may block */
959 thread_assign(thread,new_pset); /* may block */
960 prev_thread = thread;
961 task_lock(task);
962 }
963 }
964
965 /*
966 * Done, wakeup anyone waiting for us.
967 */
968 task->may_assign = TRUE((boolean_t) 1);
969 if (task->assign_active) {
970 task->assign_active = FALSE((boolean_t) 0);
971 thread_wakeup((event_t)&task->assign_active)thread_wakeup_prim(((event_t)&task->assign_active), ((
boolean_t) 0), 0)
;
972 }
973 task_unlock(task)((void)(&(task)->lock));
974 if (prev_thread != THREAD_NULL((thread_t) 0))
975 thread_deallocate(prev_thread); /* may block */
976
977 /*
978 * Finish assignment of current thread.
979 */
980 if (current_thread()(active_threads[(0)])->task == task)
981 thread_doassign(current_thread()(active_threads[(0)]), new_pset, TRUE((boolean_t) 1));
982
983 pset_deallocate(pset);
984
985 return ret;
986}
987#else /* MACH_HOST */
988/*
989 * task_assign:
990 *
991 * Change the assigned processor set for the task
992 */
993kern_return_t
994task_assign(
995 task_t task,
996 processor_set_t new_pset,
997 boolean_t assign_threads)
998{
999 return KERN_FAILURE5;
1000}
1001#endif /* MACH_HOST */
1002
1003
1004/*
1005 * task_assign_default:
1006 *
1007 * Version of task_assign to assign to default processor set.
1008 */
1009kern_return_t
1010task_assign_default(
1011 task_t task,
1012 boolean_t assign_threads)
1013{
1014 return task_assign(task, &default_pset, assign_threads);
1015}
1016
1017/*
1018 * task_get_assignment
1019 *
1020 * Return name of processor set that task is assigned to.
1021 */
1022kern_return_t task_get_assignment(
1023 task_t task,
1024 processor_set_t *pset)
1025{
1026 if (!task->active)
1027 return KERN_FAILURE5;
1028
1029 *pset = task->processor_set;
1030 pset_reference(*pset);
1031 return KERN_SUCCESS0;
1032}
1033
1034/*
1035 * task_priority
1036 *
1037 * Set priority of task; used only for newly created threads.
1038 * Optionally change priorities of threads.
1039 */
1040kern_return_t
1041task_priority(
1042 task_t task,
1043 int priority,
1044 boolean_t change_threads)
1045{
1046 kern_return_t ret = KERN_SUCCESS0;
1047
1048 if (task == TASK_NULL((task_t) 0) || invalid_pri(priority)(((priority) < 0) || ((priority) >= 50)))
1049 return KERN_INVALID_ARGUMENT4;
1050
1051 task_lock(task);
1052 task->priority = priority;
1053
1054 if (change_threads) {
1055 thread_t thread;
1056 queue_head_t *list;
1057
1058 list = &task->thread_list;
1059 queue_iterate(list, thread, thread_t, thread_list)for ((thread) = (thread_t) ((list)->next); !(((list)) == (
(queue_entry_t)(thread))); (thread) = (thread_t) ((&(thread
)->thread_list)->next))
{
1060 if (thread_priority(thread, priority, FALSE((boolean_t) 0))
1061 != KERN_SUCCESS0)
1062 ret = KERN_FAILURE5;
1063 }
1064 }
1065
1066 task_unlock(task)((void)(&(task)->lock));
1067 return ret;
1068}
1069
1070/*
1071 * task_collect_scan:
1072 *
1073 * Attempt to free resources owned by tasks.
1074 */
1075
1076void task_collect_scan(void)
1077{
1078 task_t task, prev_task;
1079 processor_set_t pset, prev_pset;
1080
1081 prev_task = TASK_NULL((task_t) 0);
1082 prev_pset = PROCESSOR_SET_NULL((processor_set_t) 0);
1083
1084 simple_lock(&all_psets_lock);
1085 queue_iterate(&all_psets, pset, processor_set_t, all_psets)for ((pset) = (processor_set_t) ((&all_psets)->next); !
(((&all_psets)) == ((queue_entry_t)(pset))); (pset) = (processor_set_t
) ((&(pset)->all_psets)->next))
{
1086 pset_lock(pset);
1087 queue_iterate(&pset->tasks, task, task_t, pset_tasks)for ((task) = (task_t) ((&pset->tasks)->next); !(((
&pset->tasks)) == ((queue_entry_t)(task))); (task) = (
task_t) ((&(task)->pset_tasks)->next))
{
5
Within the expansion of the macro 'queue_iterate':
a
Value assigned to 'task'
1088 task_reference(task);
6
Calling 'task_reference'
9
Returning from 'task_reference'
1089 pset_reference(pset);
1090 pset_unlock(pset)((void)(&(pset)->lock));
1091 simple_unlock(&all_psets_lock)((void)(&all_psets_lock));
1092
1093 machine_task_collect (task);
1094 pmap_collect(task->map->pmap);
10
Access to field 'map' results in a dereference of a null pointer (loaded from variable 'task')
1095
1096 if (prev_task != TASK_NULL((task_t) 0))
1097 task_deallocate(prev_task);
1098 prev_task = task;
1099
1100 if (prev_pset != PROCESSOR_SET_NULL((processor_set_t) 0))
1101 pset_deallocate(prev_pset);
1102 prev_pset = pset;
1103
1104 simple_lock(&all_psets_lock);
1105 pset_lock(pset);
1106 }
1107 pset_unlock(pset)((void)(&(pset)->lock));
1108 }
1109 simple_unlock(&all_psets_lock)((void)(&all_psets_lock));
1110
1111 if (prev_task != TASK_NULL((task_t) 0))
1112 task_deallocate(prev_task);
1113 if (prev_pset != PROCESSOR_SET_NULL((processor_set_t) 0))
1114 pset_deallocate(prev_pset);
1115}
1116
1117boolean_t task_collect_allowed = TRUE((boolean_t) 1);
1118unsigned task_collect_last_tick = 0;
1119unsigned task_collect_max_rate = 0; /* in ticks */
1120
1121/*
1122 * consider_task_collect:
1123 *
1124 * Called by the pageout daemon when the system needs more free pages.
1125 */
1126
1127void consider_task_collect(void)
1128{
1129 /*
1130 * By default, don't attempt task collection more frequently
1131 * than once a second.
1132 */
1133
1134 if (task_collect_max_rate == 0)
1
Assuming 'task_collect_max_rate' is not equal to 0
2
Taking false branch
1135 task_collect_max_rate = hz;
1136
1137 if (task_collect_allowed &&
3
Taking true branch
1138 (sched_tick > (task_collect_last_tick + task_collect_max_rate))) {
1139 task_collect_last_tick = sched_tick;
1140 task_collect_scan();
4
Calling 'task_collect_scan'
1141 }
1142}
1143
1144kern_return_t
1145task_ras_control(
1146 task_t task,
1147 vm_offset_t pc,
1148 vm_offset_t endpc,
1149 int flavor)
1150{
1151 kern_return_t ret = KERN_FAILURE5;
1152
1153#if FAST_TAS0
1154 int i;
1155
1156 ret = KERN_SUCCESS0;
1157 task_lock(task);
1158 switch (flavor) {
1159 case TASK_RAS_CONTROL_PURGE_ALL0: /* remove all RAS */
1160 for (i = 0; i < TASK_FAST_TAS_NRAS; i++) {
1161 task->fast_tas_base[i] = task->fast_tas_end[i] = 0;
1162 }
1163 break;
1164 case TASK_RAS_CONTROL_PURGE_ONE1: /* remove this RAS, collapse remaining */
1165 for (i = 0; i < TASK_FAST_TAS_NRAS; i++) {
1166 if ( (task->fast_tas_base[i] == pc)
1167 && (task->fast_tas_end[i] == endpc)) {
1168 while (i < TASK_FAST_TAS_NRAS-1) {
1169 task->fast_tas_base[i] = task->fast_tas_base[i+1];
1170 task->fast_tas_end[i] = task->fast_tas_end[i+1];
1171 i++;
1172 }
1173 task->fast_tas_base[TASK_FAST_TAS_NRAS-1] = 0;
1174 task->fast_tas_end[TASK_FAST_TAS_NRAS-1] = 0;
1175 break;
1176 }
1177 }
1178 if (i == TASK_FAST_TAS_NRAS) {
1179 ret = KERN_INVALID_ADDRESS1;
1180 }
1181 break;
1182 case TASK_RAS_CONTROL_PURGE_ALL_AND_INSTALL_ONE2:
1183 /* remove all RAS an install this RAS */
1184 for (i = 0; i < TASK_FAST_TAS_NRAS; i++) {
1185 task->fast_tas_base[i] = task->fast_tas_end[i] = 0;
1186 }
1187 /* FALL THROUGH */
1188 case TASK_RAS_CONTROL_INSTALL_ONE3: /* install this RAS */
1189 for (i = 0; i < TASK_FAST_TAS_NRAS; i++) {
1190 if ( (task->fast_tas_base[i] == pc)
1191 && (task->fast_tas_end[i] == endpc)) {
1192 /* already installed */
1193 break;
1194 }
1195 if ((task->fast_tas_base[i] == 0) && (task->fast_tas_end[i] == 0)){
1196 task->fast_tas_base[i] = pc;
1197 task->fast_tas_end[i] = endpc;
1198 break;
1199 }
1200 }
1201 if (i == TASK_FAST_TAS_NRAS) {
1202 ret = KERN_RESOURCE_SHORTAGE6;
1203 }
1204 break;
1205 default: ret = KERN_INVALID_VALUE18;
1206 break;
1207 }
1208 task_unlock(task)((void)(&(task)->lock));
1209#endif /* FAST_TAS */
1210 return ret;
1211}