Bug Summary

File:obj-scan-build/../kern/task.c
Location:line 1146, column 17
Description:Access to field 'map' results in a dereference of a null pointer (loaded from variable 'task')

Annotated Source Code

1/*
2 * Mach Operating System
3 * Copyright (c) 1993-1988 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26/*
27 * File: kern/task.c
28 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
29 * David Black
30 *
31 * Task management primitives implementation.
32 */
33
34#include <string.h>
35
36#include <mach/machine/vm_types.h>
37#include <mach/vm_param.h>
38#include <mach/task_info.h>
39#include <mach/task_special_ports.h>
40#include <mach_debug/mach_debug_types.h>
41#include <ipc/ipc_space.h>
42#include <ipc/ipc_types.h>
43#include <kern/debug.h>
44#include <kern/task.h>
45#include <kern/thread.h>
46#include <kern/slab.h>
47#include <kern/kalloc.h>
48#include <kern/processor.h>
49#include <kern/printf.h>
50#include <kern/sched_prim.h> /* for thread_wakeup */
51#include <kern/ipc_tt.h>
52#include <kern/syscall_emulation.h>
53#include <kern/task_notify.user.h>
54#include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */
55#include <machine/machspl.h> /* for splsched */
56
57task_t kernel_task = TASK_NULL((task_t) 0);
58struct kmem_cache task_cache;
59
60/* Where to send notifications about newly created tasks. */
61ipc_port_t new_task_notification = NULL((void *) 0);
62
63void task_init(void)
64{
65 kmem_cache_init(&task_cache, "task", sizeof(struct task), 0,
66 NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0);
67
68 eml_init();
69 machine_task_module_init ();
70
71 /*
72 * Create the kernel task as the first task.
73 * Task_create must assign to kernel_task as a side effect,
74 * for other initialization. (:-()
75 */
76 (void) task_create(TASK_NULL((task_t) 0), FALSE((boolean_t) 0), &kernel_task);
77 (void) task_set_name(kernel_task, "gnumach");
78}
79
80kern_return_t task_create(
81 task_t parent_task,
82 boolean_t inherit_memory,
83 task_t *child_task) /* OUT */
84{
85 task_t new_task;
86 processor_set_t pset;
87#if FAST_TAS0
88 int i;
89#endif
90
91 new_task = (task_t) kmem_cache_alloc(&task_cache);
92 if (new_task == TASK_NULL((task_t) 0)) {
93 panic("task_create: no memory for task structure");
94 }
95
96 /* one ref for just being alive; one for our caller */
97 new_task->ref_count = 2;
98
99 if (child_task == &kernel_task) {
100 new_task->map = kernel_map;
101 } else if (inherit_memory) {
102 new_task->map = vm_map_fork(parent_task->map);
103 } else {
104 new_task->map = vm_map_create(pmap_create(0),
105 round_page(VM_MIN_ADDRESS)((vm_offset_t)((((vm_offset_t)((0))) + ((1 << 12)-1)) &
~((1 << 12)-1)))
,
106 trunc_page(VM_MAX_ADDRESS)((vm_offset_t)(((vm_offset_t)((0xc0000000UL))) & ~((1 <<
12)-1)))
, TRUE((boolean_t) 1));
107 }
108
109 simple_lock_init(&new_task->lock);
110 queue_init(&new_task->thread_list)((&new_task->thread_list)->next = (&new_task->
thread_list)->prev = &new_task->thread_list)
;
111 new_task->suspend_count = 0;
112 new_task->active = TRUE((boolean_t) 1);
113 new_task->user_stop_count = 0;
114 new_task->thread_count = 0;
115 new_task->faults = 0;
116 new_task->zero_fills = 0;
117 new_task->reactivations = 0;
118 new_task->pageins = 0;
119 new_task->cow_faults = 0;
120 new_task->messages_sent = 0;
121 new_task->messages_received = 0;
122
123 eml_task_reference(new_task, parent_task);
124
125 ipc_task_init(new_task, parent_task);
126 machine_task_init (new_task);
127
128 new_task->total_user_time.seconds = 0;
129 new_task->total_user_time.microseconds = 0;
130 new_task->total_system_time.seconds = 0;
131 new_task->total_system_time.microseconds = 0;
132
133 record_time_stamp (&new_task->creation_time);
134
135 if (parent_task != TASK_NULL((task_t) 0)) {
136 task_lock(parent_task);
137 pset = parent_task->processor_set;
138 if (!pset->active)
139 pset = &default_pset;
140 pset_reference(pset);
141 new_task->priority = parent_task->priority;
142 task_unlock(parent_task)((void)(&(parent_task)->lock));
143 }
144 else {
145 pset = &default_pset;
146 pset_reference(pset);
147 new_task->priority = BASEPRI_USER25;
148 }
149 pset_lock(pset);
150 pset_add_task(pset, new_task);
151 pset_unlock(pset)((void)(&(pset)->lock));
152
153 new_task->may_assign = TRUE((boolean_t) 1);
154 new_task->assign_active = FALSE((boolean_t) 0);
155
156#if MACH_PCSAMPLE1
157 new_task->pc_sample.buffer = 0;
158 new_task->pc_sample.seqno = 0;
159 new_task->pc_sample.sampletypes = 0;
160#endif /* MACH_PCSAMPLE */
161
162#if FAST_TAS0
163 for (i = 0; i < TASK_FAST_TAS_NRAS; i++) {
164 if (inherit_memory) {
165 new_task->fast_tas_base[i] = parent_task->fast_tas_base[i];
166 new_task->fast_tas_end[i] = parent_task->fast_tas_end[i];
167 } else {
168 new_task->fast_tas_base[i] = (vm_offset_t)0;
169 new_task->fast_tas_end[i] = (vm_offset_t)0;
170 }
171 }
172#endif /* FAST_TAS */
173
174 snprintf (new_task->name, sizeof new_task->name, "%p", new_task);
175
176 if (new_task_notification != NULL((void *) 0)) {
177 task_reference (new_task);
178 task_reference (parent_task);
179 mach_notify_new_task (new_task_notification,
180 convert_task_to_port (new_task),
181 convert_task_to_port (parent_task));
182 }
183
184 ipc_task_enable(new_task);
185
186 *child_task = new_task;
187 return KERN_SUCCESS0;
188}
189
190/*
191 * task_deallocate:
192 *
193 * Give up a reference to the specified task and destroy it if there
194 * are no other references left. It is assumed that the current thread
195 * is never in this task.
196 */
197void task_deallocate(
198 task_t task)
199{
200 int c;
201 processor_set_t pset;
202
203 if (task == TASK_NULL((task_t) 0))
204 return;
205
206 task_lock(task);
207 c = --(task->ref_count);
208 task_unlock(task)((void)(&(task)->lock));
209 if (c != 0)
210 return;
211
212 machine_task_terminate (task);
213
214 eml_task_deallocate(task);
215
216 pset = task->processor_set;
217 pset_lock(pset);
218 pset_remove_task(pset,task);
219 pset_unlock(pset)((void)(&(pset)->lock));
220 pset_deallocate(pset);
221 vm_map_deallocate(task->map);
222 is_release(task->itk_space)ipc_space_release(task->itk_space);
223 kmem_cache_free(&task_cache, (vm_offset_t) task);
224}
225
226void task_reference(
227 task_t task)
228{
229 if (task == TASK_NULL((task_t) 0))
7
Assuming 'task' is equal to null
8
Taking true branch
230 return;
231
232 task_lock(task);
233 task->ref_count++;
234 task_unlock(task)((void)(&(task)->lock));
235}
236
237/*
238 * task_terminate:
239 *
240 * Terminate the specified task. See comments on thread_terminate
241 * (kern/thread.c) about problems with terminating the "current task."
242 */
243kern_return_t task_terminate(
244 task_t task)
245{
246 thread_t thread, cur_thread;
247 queue_head_t *list;
248 task_t cur_task;
249 spl_t s;
250
251 if (task == TASK_NULL((task_t) 0))
252 return KERN_INVALID_ARGUMENT4;
253
254 list = &task->thread_list;
255 cur_task = current_task()((active_threads[(0)])->task);
256 cur_thread = current_thread()(active_threads[(0)]);
257
258 /*
259 * Deactivate task so that it can't be terminated again,
260 * and so lengthy operations in progress will abort.
261 *
262 * If the current thread is in this task, remove it from
263 * the task's thread list to keep the thread-termination
264 * loop simple.
265 */
266 if (task == cur_task) {
267 task_lock(task);
268 if (!task->active) {
269 /*
270 * Task is already being terminated.
271 */
272 task_unlock(task)((void)(&(task)->lock));
273 return KERN_FAILURE5;
274 }
275 /*
276 * Make sure current thread is not being terminated.
277 */
278 s = splsched();
279 thread_lock(cur_thread);
280 if (!cur_thread->active) {
281 thread_unlock(cur_thread)((void)(&(cur_thread)->lock));
282 (void) splx(s);
283 task_unlock(task)((void)(&(task)->lock));
284 thread_terminate(cur_thread);
285 return KERN_FAILURE5;
286 }
287 task_hold_locked(task);
288 task->active = FALSE((boolean_t) 0);
289 queue_remove(list, cur_thread, thread_t, thread_list){ queue_entry_t next, prev; next = (cur_thread)->thread_list
.next; prev = (cur_thread)->thread_list.prev; if ((list) ==
next) (list)->prev = prev; else ((thread_t)next)->thread_list
.prev = prev; if ((list) == prev) (list)->next = next; else
((thread_t)prev)->thread_list.next = next; }
;
290 thread_unlock(cur_thread)((void)(&(cur_thread)->lock));
291 (void) splx(s);
292 task_unlock(task)((void)(&(task)->lock));
293
294 /*
295 * Shut down this thread's ipc now because it must
296 * be left alone to terminate the task.
297 */
298 ipc_thread_disable(cur_thread);
299 ipc_thread_terminate(cur_thread);
300 }
301 else {
302 /*
303 * Lock both current and victim task to check for
304 * potential deadlock.
305 */
306 if ((vm_offset_t)task < (vm_offset_t)cur_task) {
307 task_lock(task);
308 task_lock(cur_task);
309 }
310 else {
311 task_lock(cur_task);
312 task_lock(task);
313 }
314 /*
315 * Check if current thread or task is being terminated.
316 */
317 s = splsched();
318 thread_lock(cur_thread);
319 if ((!cur_task->active) ||(!cur_thread->active)) {
320 /*
321 * Current task or thread is being terminated.
322 */
323 thread_unlock(cur_thread)((void)(&(cur_thread)->lock));
324 (void) splx(s);
325 task_unlock(task)((void)(&(task)->lock));
326 task_unlock(cur_task)((void)(&(cur_task)->lock));
327 thread_terminate(cur_thread);
328 return KERN_FAILURE5;
329 }
330 thread_unlock(cur_thread)((void)(&(cur_thread)->lock));
331 (void) splx(s);
332 task_unlock(cur_task)((void)(&(cur_task)->lock));
333
334 if (!task->active) {
335 /*
336 * Task is already being terminated.
337 */
338 task_unlock(task)((void)(&(task)->lock));
339 return KERN_FAILURE5;
340 }
341 task_hold_locked(task);
342 task->active = FALSE((boolean_t) 0);
343 task_unlock(task)((void)(&(task)->lock));
344 }
345
346 /*
347 * Prevent further execution of the task. ipc_task_disable
348 * prevents further task operations via the task port.
349 * If this is the current task, the current thread will
350 * be left running.
351 */
352 (void) task_dowait(task,TRUE((boolean_t) 1)); /* may block */
353 ipc_task_disable(task);
354
355 /*
356 * Terminate each thread in the task.
357 *
358 * The task_port is closed down, so no more thread_create
359 * operations can be done. Thread_force_terminate closes the
360 * thread port for each thread; when that is done, the
361 * thread will eventually disappear. Thus the loop will
362 * terminate. Call thread_force_terminate instead of
363 * thread_terminate to avoid deadlock checks. Need
364 * to call thread_block() inside loop because some other
365 * thread (e.g., the reaper) may have to run to get rid
366 * of all references to the thread; it won't vanish from
367 * the task's thread list until the last one is gone.
368 */
369 task_lock(task);
370 while (!queue_empty(list)(((list)) == (((list)->next)))) {
371 thread = (thread_t) queue_first(list)((list)->next);
372 thread_reference(thread);
373 task_unlock(task)((void)(&(task)->lock));
374 thread_force_terminate(thread);
375 thread_deallocate(thread);
376 thread_block((void (*)()) 0);
377 task_lock(task);
378 }
379 task_unlock(task)((void)(&(task)->lock));
380
381 /*
382 * Shut down IPC.
383 */
384 ipc_task_terminate(task);
385
386
387 /*
388 * Deallocate the task's reference to itself.
389 */
390 task_deallocate(task);
391
392 /*
393 * If the current thread is in this task, it has not yet
394 * been terminated (since it was removed from the task's
395 * thread-list). Put it back in the thread list (for
396 * completeness), and terminate it. Since it holds the
397 * last reference to the task, terminating it will deallocate
398 * the task.
399 */
400 if (cur_thread->task == task) {
401 task_lock(task);
402 s = splsched();
403 queue_enter(list, cur_thread, thread_t, thread_list){ queue_entry_t prev; prev = (list)->prev; if ((list) == prev
) { (list)->next = (queue_entry_t) (cur_thread); } else { (
(thread_t)prev)->thread_list.next = (queue_entry_t)(cur_thread
); } (cur_thread)->thread_list.prev = prev; (cur_thread)->
thread_list.next = list; (list)->prev = (queue_entry_t) cur_thread
; }
;
404 (void) splx(s);
405 task_unlock(task)((void)(&(task)->lock));
406 (void) thread_terminate(cur_thread);
407 }
408
409 return KERN_SUCCESS0;
410}
411
412/*
413 * task_hold:
414 *
415 * Suspend execution of the specified task.
416 * This is a recursive-style suspension of the task, a count of
417 * suspends is maintained.
418 *
419 * CONDITIONS: the task is locked and active.
420 */
421void task_hold_locked(
422 task_t task)
423{
424 queue_head_t *list;
425 thread_t thread, cur_thread;
426
427 assert(task->active)({ if (!(task->active)) Assert("task->active", "../kern/task.c"
, 427); })
;
428
429 cur_thread = current_thread()(active_threads[(0)]);
430
431 task->suspend_count++;
432
433 /*
434 * Iterate through all the threads and hold them.
435 * Do not hold the current thread if it is within the
436 * task.
437 */
438 list = &task->thread_list;
439 queue_iterate(list, thread, thread_t, thread_list)for ((thread) = (thread_t) ((list)->next); !(((list)) == (
(queue_entry_t)(thread))); (thread) = (thread_t) ((&(thread
)->thread_list)->next))
{
440 if (thread != cur_thread)
441 thread_hold(thread);
442 }
443}
444
445/*
446 * task_hold:
447 *
448 * Suspend execution of the specified task.
449 * This is a recursive-style suspension of the task, a count of
450 * suspends is maintained.
451 */
452kern_return_t task_hold(
453 task_t task)
454{
455 task_lock(task);
456 if (!task->active) {
457 task_unlock(task)((void)(&(task)->lock));
458 return KERN_FAILURE5;
459 }
460
461 task_hold_locked(task);
462
463 task_unlock(task)((void)(&(task)->lock));
464 return KERN_SUCCESS0;
465}
466
467/*
468 * task_dowait:
469 *
470 * Wait until the task has really been suspended (all of the threads
471 * are stopped). Skip the current thread if it is within the task.
472 *
473 * If task is deactivated while waiting, return a failure code unless
474 * must_wait is true.
475 */
476kern_return_t task_dowait(
477 task_t task,
478 boolean_t must_wait)
479{
480 queue_head_t *list;
481 thread_t thread, cur_thread, prev_thread;
482 kern_return_t ret = KERN_SUCCESS0;
483
484 /*
485 * Iterate through all the threads.
486 * While waiting for each thread, we gain a reference to it
487 * to prevent it from going away on us. This guarantees
488 * that the "next" thread in the list will be a valid thread.
489 *
490 * We depend on the fact that if threads are created while
491 * we are looping through the threads, they will be held
492 * automatically. We don't care about threads that get
493 * deallocated along the way (the reference prevents it
494 * from happening to the thread we are working with).
495 *
496 * If the current thread is in the affected task, it is skipped.
497 *
498 * If the task is deactivated before we're done, and we don't
499 * have to wait for it (must_wait is FALSE), just bail out.
500 */
501 cur_thread = current_thread()(active_threads[(0)]);
502
503 list = &task->thread_list;
504 prev_thread = THREAD_NULL((thread_t) 0);
505 task_lock(task);
506 queue_iterate(list, thread, thread_t, thread_list)for ((thread) = (thread_t) ((list)->next); !(((list)) == (
(queue_entry_t)(thread))); (thread) = (thread_t) ((&(thread
)->thread_list)->next))
{
507 if (!(task->active) && !(must_wait)) {
508 ret = KERN_FAILURE5;
509 break;
510 }
511 if (thread != cur_thread) {
512 thread_reference(thread);
513 task_unlock(task)((void)(&(task)->lock));
514 if (prev_thread != THREAD_NULL((thread_t) 0))
515 thread_deallocate(prev_thread);
516 /* may block */
517 (void) thread_dowait(thread, TRUE((boolean_t) 1)); /* may block */
518 prev_thread = thread;
519 task_lock(task);
520 }
521 }
522 task_unlock(task)((void)(&(task)->lock));
523 if (prev_thread != THREAD_NULL((thread_t) 0))
524 thread_deallocate(prev_thread); /* may block */
525 return ret;
526}
527
528kern_return_t task_release(
529 task_t task)
530{
531 queue_head_t *list;
532 thread_t thread, next;
533
534 task_lock(task);
535 if (!task->active) {
536 task_unlock(task)((void)(&(task)->lock));
537 return KERN_FAILURE5;
538 }
539
540 task->suspend_count--;
541
542 /*
543 * Iterate through all the threads and release them
544 */
545 list = &task->thread_list;
546 thread = (thread_t) queue_first(list)((list)->next);
547 while (!queue_end(list, (queue_entry_t) thread)((list) == ((queue_entry_t) thread))) {
548 next = (thread_t) queue_next(&thread->thread_list)((&thread->thread_list)->next);
549 thread_release(thread);
550 thread = next;
551 }
552 task_unlock(task)((void)(&(task)->lock));
553 return KERN_SUCCESS0;
554}
555
556kern_return_t task_threads(
557 task_t task,
558 thread_array_t *thread_list,
559 natural_t *count)
560{
561 unsigned int actual; /* this many threads */
562 thread_t thread;
563 thread_t *threads;
564 int i;
565
566 vm_size_t size, size_needed;
567 vm_offset_t addr;
568
569 if (task == TASK_NULL((task_t) 0))
570 return KERN_INVALID_ARGUMENT4;
571
572 size = 0; addr = 0;
573
574 for (;;) {
575 task_lock(task);
576 if (!task->active) {
577 task_unlock(task)((void)(&(task)->lock));
578 return KERN_FAILURE5;
579 }
580
581 actual = task->thread_count;
582
583 /* do we have the memory we need? */
584
585 size_needed = actual * sizeof(mach_port_t);
586 if (size_needed <= size)
587 break;
588
589 /* unlock the task and allocate more memory */
590 task_unlock(task)((void)(&(task)->lock));
591
592 if (size != 0)
593 kfree(addr, size);
594
595 assert(size_needed > 0)({ if (!(size_needed > 0)) Assert("size_needed > 0", "../kern/task.c"
, 595); })
;
596 size = size_needed;
597
598 addr = kalloc(size);
599 if (addr == 0)
600 return KERN_RESOURCE_SHORTAGE6;
601 }
602
603 /* OK, have memory and the task is locked & active */
604
605 threads = (thread_t *) addr;
606
607 for (i = 0, thread = (thread_t) queue_first(&task->thread_list)((&task->thread_list)->next);
608 i < actual;
609 i++, thread = (thread_t) queue_next(&thread->thread_list)((&thread->thread_list)->next)) {
610 /* take ref for convert_thread_to_port */
611 thread_reference(thread);
612 threads[i] = thread;
613 }
614 assert(queue_end(&task->thread_list, (queue_entry_t) thread))({ if (!(((&task->thread_list) == ((queue_entry_t) thread
)))) Assert("queue_end(&task->thread_list, (queue_entry_t) thread)"
, "../kern/task.c", 614); })
;
615
616 /* can unlock task now that we've got the thread refs */
617 task_unlock(task)((void)(&(task)->lock));
618
619 if (actual == 0) {
620 /* no threads, so return null pointer and deallocate memory */
621
622 *thread_list = 0;
623 *count = 0;
624
625 if (size != 0)
626 kfree(addr, size);
627 } else {
628 /* if we allocated too much, must copy */
629
630 if (size_needed < size) {
631 vm_offset_t newaddr;
632
633 newaddr = kalloc(size_needed);
634 if (newaddr == 0) {
635 for (i = 0; i < actual; i++)
636 thread_deallocate(threads[i]);
637 kfree(addr, size);
638 return KERN_RESOURCE_SHORTAGE6;
639 }
640
641 memcpy((void *) newaddr, (void *) addr, size_needed);
642 kfree(addr, size);
643 threads = (thread_t *) newaddr;
644 }
645
646 *thread_list = (mach_port_t *) threads;
647 *count = actual;
648
649 /* do the conversion that Mig should handle */
650
651 for (i = 0; i < actual; i++)
652 ((ipc_port_t *) threads)[i] =
653 convert_thread_to_port(threads[i]);
654 }
655
656 return KERN_SUCCESS0;
657}
658
659kern_return_t task_suspend(
660 task_t task)
661{
662 boolean_t hold;
663
664 if (task == TASK_NULL((task_t) 0))
665 return KERN_INVALID_ARGUMENT4;
666
667 hold = FALSE((boolean_t) 0);
668 task_lock(task);
669 if ((task->user_stop_count)++ == 0)
670 hold = TRUE((boolean_t) 1);
671 task_unlock(task)((void)(&(task)->lock));
672
673 /*
674 * If the stop count was positive, the task is
675 * already stopped and we can exit.
676 */
677 if (!hold) {
678 return KERN_SUCCESS0;
679 }
680
681 /*
682 * Hold all of the threads in the task, and wait for
683 * them to stop. If the current thread is within
684 * this task, hold it separately so that all of the
685 * other threads can stop first.
686 */
687
688 if (task_hold(task) != KERN_SUCCESS0)
689 return KERN_FAILURE5;
690
691 if (task_dowait(task, FALSE((boolean_t) 0)) != KERN_SUCCESS0)
692 return KERN_FAILURE5;
693
694 if (current_task()((active_threads[(0)])->task) == task) {
695 spl_t s;
696
697 thread_hold(current_thread()(active_threads[(0)]));
698 /*
699 * We want to call thread_block on our way out,
700 * to stop running.
701 */
702 s = splsched();
703 ast_on(cpu_number(), AST_BLOCK)({ if ((need_ast[(0)] |= (0x4)) != 0x0) { ; } });
704 (void) splx(s);
705 }
706
707 return KERN_SUCCESS0;
708}
709
710kern_return_t task_resume(
711 task_t task)
712{
713 boolean_t release;
714
715 if (task == TASK_NULL((task_t) 0))
716 return KERN_INVALID_ARGUMENT4;
717
718 release = FALSE((boolean_t) 0);
719 task_lock(task);
720 if (task->user_stop_count > 0) {
721 if (--(task->user_stop_count) == 0)
722 release = TRUE((boolean_t) 1);
723 }
724 else {
725 task_unlock(task)((void)(&(task)->lock));
726 return KERN_FAILURE5;
727 }
728 task_unlock(task)((void)(&(task)->lock));
729
730 /*
731 * Release the task if necessary.
732 */
733 if (release)
734 return task_release(task);
735
736 return KERN_SUCCESS0;
737}
738
739kern_return_t task_info(
740 task_t task,
741 int flavor,
742 task_info_t task_info_out, /* pointer to OUT array */
743 natural_t *task_info_count) /* IN/OUT */
744{
745 vm_map_t map;
746
747 if (task == TASK_NULL((task_t) 0))
748 return KERN_INVALID_ARGUMENT4;
749
750 switch (flavor) {
751 case TASK_BASIC_INFO1:
752 {
753 task_basic_info_t basic_info;
754
755 /* Allow *task_info_count to be two words smaller than
756 the usual amount, because creation_time is a new member
757 that some callers might not know about. */
758
759 if (*task_info_count < TASK_BASIC_INFO_COUNT(sizeof(task_basic_info_data_t) / sizeof(natural_t)) - 2) {
760 return KERN_INVALID_ARGUMENT4;
761 }
762
763 basic_info = (task_basic_info_t) task_info_out;
764
765 map = (task == kernel_task) ? kernel_map : task->map;
766
767 basic_info->virtual_size = map->size;
768 basic_info->resident_size = pmap_resident_count(map->pmap)((map->pmap)->stats.resident_count)
769 * PAGE_SIZE(1 << 12);
770
771 task_lock(task);
772 basic_info->base_priority = task->priority;
773 basic_info->suspend_count = task->user_stop_count;
774 basic_info->user_time.seconds
775 = task->total_user_time.seconds;
776 basic_info->user_time.microseconds
777 = task->total_user_time.microseconds;
778 basic_info->system_time.seconds
779 = task->total_system_time.seconds;
780 basic_info->system_time.microseconds
781 = task->total_system_time.microseconds;
782 basic_info->creation_time = task->creation_time;
783 task_unlock(task)((void)(&(task)->lock));
784
785 if (*task_info_count > TASK_BASIC_INFO_COUNT(sizeof(task_basic_info_data_t) / sizeof(natural_t)))
786 *task_info_count = TASK_BASIC_INFO_COUNT(sizeof(task_basic_info_data_t) / sizeof(natural_t));
787 break;
788 }
789
790 case TASK_EVENTS_INFO2:
791 {
792 task_events_info_t event_info;
793
794 if (*task_info_count < TASK_EVENTS_INFO_COUNT(sizeof(task_events_info_data_t) / sizeof(natural_t))) {
795 return KERN_INVALID_ARGUMENT4;
796 }
797
798 event_info = (task_events_info_t) task_info_out;
799
800 task_lock(task);
801 event_info->faults = task->faults;
802 event_info->zero_fills = task->zero_fills;
803 event_info->reactivations = task->reactivations;
804 event_info->pageins = task->pageins;
805 event_info->cow_faults = task->cow_faults;
806 event_info->messages_sent = task->messages_sent;
807 event_info->messages_received = task->messages_received;
808 task_unlock(task)((void)(&(task)->lock));
809
810 *task_info_count = TASK_EVENTS_INFO_COUNT(sizeof(task_events_info_data_t) / sizeof(natural_t));
811 break;
812 }
813
814 case TASK_THREAD_TIMES_INFO3:
815 {
816 task_thread_times_info_t times_info;
817 thread_t thread;
818
819 if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT(sizeof(task_thread_times_info_data_t) / sizeof(natural_t))) {
820 return KERN_INVALID_ARGUMENT4;
821 }
822
823 times_info = (task_thread_times_info_t) task_info_out;
824 times_info->user_time.seconds = 0;
825 times_info->user_time.microseconds = 0;
826 times_info->system_time.seconds = 0;
827 times_info->system_time.microseconds = 0;
828
829 task_lock(task);
830 queue_iterate(&task->thread_list, thread,for ((thread) = (thread_t) ((&task->thread_list)->next
); !(((&task->thread_list)) == ((queue_entry_t)(thread
))); (thread) = (thread_t) ((&(thread)->thread_list)->
next))
831 thread_t, thread_list)for ((thread) = (thread_t) ((&task->thread_list)->next
); !(((&task->thread_list)) == ((queue_entry_t)(thread
))); (thread) = (thread_t) ((&(thread)->thread_list)->
next))
832 {
833 time_value_t user_time, system_time;
834 spl_t s;
835
836 s = splsched();
837 thread_lock(thread);
838
839 thread_read_times(thread, &user_time, &system_time);
840
841 thread_unlock(thread)((void)(&(thread)->lock));
842 splx(s);
843
844 time_value_add(&times_info->user_time, &user_time){ (&times_info->user_time)->microseconds += (&user_time
)->microseconds; (&times_info->user_time)->seconds
+= (&user_time)->seconds; if ((&times_info->user_time
)->microseconds >= (1000000)) { (&times_info->user_time
)->microseconds -= (1000000); (&times_info->user_time
)->seconds++; } }
;
845 time_value_add(&times_info->system_time, &system_time){ (&times_info->system_time)->microseconds += (&
system_time)->microseconds; (&times_info->system_time
)->seconds += (&system_time)->seconds; if ((&times_info
->system_time)->microseconds >= (1000000)) { (&times_info
->system_time)->microseconds -= (1000000); (&times_info
->system_time)->seconds++; } }
;
846 }
847 task_unlock(task)((void)(&(task)->lock));
848
849 *task_info_count = TASK_THREAD_TIMES_INFO_COUNT(sizeof(task_thread_times_info_data_t) / sizeof(natural_t));
850 break;
851 }
852
853 default:
854 return KERN_INVALID_ARGUMENT4;
855 }
856
857 return KERN_SUCCESS0;
858}
859
860#if MACH_HOST0
861/*
862 * task_assign:
863 *
864 * Change the assigned processor set for the task
865 */
866kern_return_t
867task_assign(
868 task_t task,
869 processor_set_t new_pset,
870 boolean_t assign_threads)
871{
872 kern_return_t ret = KERN_SUCCESS0;
873 thread_t thread, prev_thread;
874 queue_head_t *list;
875 processor_set_t pset;
876
877 if (task == TASK_NULL((task_t) 0) || new_pset == PROCESSOR_SET_NULL((processor_set_t) 0)) {
878 return KERN_INVALID_ARGUMENT4;
879 }
880
881 /*
882 * Freeze task`s assignment. Prelude to assigning
883 * task. Only one freeze may be held per task.
884 */
885
886 task_lock(task);
887 while (task->may_assign == FALSE((boolean_t) 0)) {
888 task->assign_active = TRUE((boolean_t) 1);
889 assert_wait((event_t)&task->assign_active, TRUE((boolean_t) 1));
890 task_unlock(task)((void)(&(task)->lock));
891 thread_block((void (*)()) 0);
892 task_lock(task);
893 }
894
895 /*
896 * Avoid work if task already in this processor set.
897 */
898 if (task->processor_set == new_pset) {
899 /*
900 * No need for task->assign_active wakeup:
901 * task->may_assign is still TRUE.
902 */
903 task_unlock(task)((void)(&(task)->lock));
904 return KERN_SUCCESS0;
905 }
906
907 task->may_assign = FALSE((boolean_t) 0);
908 task_unlock(task)((void)(&(task)->lock));
909
910 /*
911 * Safe to get the task`s pset: it cannot change while
912 * task is frozen.
913 */
914 pset = task->processor_set;
915
916 /*
917 * Lock both psets now. Use ordering to avoid deadlock.
918 */
919 Restart:
920 if ((vm_offset_t) pset < (vm_offset_t) new_pset) {
921 pset_lock(pset);
922 pset_lock(new_pset);
923 }
924 else {
925 pset_lock(new_pset);
926 pset_lock(pset);
927 }
928
929 /*
930 * Check if new_pset is ok to assign to. If not,
931 * reassign to default_pset.
932 */
933 if (!new_pset->active) {
934 pset_unlock(pset)((void)(&(pset)->lock));
935 pset_unlock(new_pset)((void)(&(new_pset)->lock));
936 new_pset = &default_pset;
937 goto Restart;
938 }
939
940 pset_reference(new_pset);
941
942 /*
943 * Now grab the task lock and move the task.
944 */
945
946 task_lock(task);
947 pset_remove_task(pset, task);
948 pset_add_task(new_pset, task);
949
950 pset_unlock(pset)((void)(&(pset)->lock));
951 pset_unlock(new_pset)((void)(&(new_pset)->lock));
952
953 if (assign_threads == FALSE((boolean_t) 0)) {
954 /*
955 * We leave existing threads at their
956 * old assignments. Unfreeze task`s
957 * assignment.
958 */
959 task->may_assign = TRUE((boolean_t) 1);
960 if (task->assign_active) {
961 task->assign_active = FALSE((boolean_t) 0);
962 thread_wakeup((event_t) &task->assign_active)thread_wakeup_prim(((event_t) &task->assign_active), (
(boolean_t) 0), 0)
;
963 }
964 task_unlock(task)((void)(&(task)->lock));
965 pset_deallocate(pset);
966 return KERN_SUCCESS0;
967 }
968
969 /*
970 * If current thread is in task, freeze its assignment.
971 */
972 if (current_thread()(active_threads[(0)])->task == task) {
973 task_unlock(task)((void)(&(task)->lock));
974 thread_freeze(current_thread()(active_threads[(0)]));
975 task_lock(task);
976 }
977
978 /*
979 * Iterate down the thread list reassigning all the threads.
980 * New threads pick up task's new processor set automatically.
981 * Do current thread last because new pset may be empty.
982 */
983 list = &task->thread_list;
984 prev_thread = THREAD_NULL((thread_t) 0);
985 queue_iterate(list, thread, thread_t, thread_list)for ((thread) = (thread_t) ((list)->next); !(((list)) == (
(queue_entry_t)(thread))); (thread) = (thread_t) ((&(thread
)->thread_list)->next))
{
986 if (!(task->active)) {
987 ret = KERN_FAILURE5;
988 break;
989 }
990 if (thread != current_thread()(active_threads[(0)])) {
991 thread_reference(thread);
992 task_unlock(task)((void)(&(task)->lock));
993 if (prev_thread != THREAD_NULL((thread_t) 0))
994 thread_deallocate(prev_thread); /* may block */
995 thread_assign(thread,new_pset); /* may block */
996 prev_thread = thread;
997 task_lock(task);
998 }
999 }
1000
1001 /*
1002 * Done, wakeup anyone waiting for us.
1003 */
1004 task->may_assign = TRUE((boolean_t) 1);
1005 if (task->assign_active) {
1006 task->assign_active = FALSE((boolean_t) 0);
1007 thread_wakeup((event_t)&task->assign_active)thread_wakeup_prim(((event_t)&task->assign_active), ((
boolean_t) 0), 0)
;
1008 }
1009 task_unlock(task)((void)(&(task)->lock));
1010 if (prev_thread != THREAD_NULL((thread_t) 0))
1011 thread_deallocate(prev_thread); /* may block */
1012
1013 /*
1014 * Finish assignment of current thread.
1015 */
1016 if (current_thread()(active_threads[(0)])->task == task)
1017 thread_doassign(current_thread()(active_threads[(0)]), new_pset, TRUE((boolean_t) 1));
1018
1019 pset_deallocate(pset);
1020
1021 return ret;
1022}
1023#else /* MACH_HOST */
1024/*
1025 * task_assign:
1026 *
1027 * Change the assigned processor set for the task
1028 */
1029kern_return_t
1030task_assign(
1031 task_t task,
1032 processor_set_t new_pset,
1033 boolean_t assign_threads)
1034{
1035 return KERN_FAILURE5;
1036}
1037#endif /* MACH_HOST */
1038
1039
1040/*
1041 * task_assign_default:
1042 *
1043 * Version of task_assign to assign to default processor set.
1044 */
1045kern_return_t
1046task_assign_default(
1047 task_t task,
1048 boolean_t assign_threads)
1049{
1050 return task_assign(task, &default_pset, assign_threads);
1051}
1052
1053/*
1054 * task_get_assignment
1055 *
1056 * Return name of processor set that task is assigned to.
1057 */
1058kern_return_t task_get_assignment(
1059 task_t task,
1060 processor_set_t *pset)
1061{
1062 if (!task->active)
1063 return KERN_FAILURE5;
1064
1065 *pset = task->processor_set;
1066 pset_reference(*pset);
1067 return KERN_SUCCESS0;
1068}
1069
1070/*
1071 * task_priority
1072 *
1073 * Set priority of task; used only for newly created threads.
1074 * Optionally change priorities of threads.
1075 */
1076kern_return_t
1077task_priority(
1078 task_t task,
1079 int priority,
1080 boolean_t change_threads)
1081{
1082 kern_return_t ret = KERN_SUCCESS0;
1083
1084 if (task == TASK_NULL((task_t) 0) || invalid_pri(priority)(((priority) < 0) || ((priority) >= 50)))
1085 return KERN_INVALID_ARGUMENT4;
1086
1087 task_lock(task);
1088 task->priority = priority;
1089
1090 if (change_threads) {
1091 thread_t thread;
1092 queue_head_t *list;
1093
1094 list = &task->thread_list;
1095 queue_iterate(list, thread, thread_t, thread_list)for ((thread) = (thread_t) ((list)->next); !(((list)) == (
(queue_entry_t)(thread))); (thread) = (thread_t) ((&(thread
)->thread_list)->next))
{
1096 if (thread_priority(thread, priority, FALSE((boolean_t) 0))
1097 != KERN_SUCCESS0)
1098 ret = KERN_FAILURE5;
1099 }
1100 }
1101
1102 task_unlock(task)((void)(&(task)->lock));
1103 return ret;
1104}
1105
1106/*
1107 * task_set_name
1108 *
1109 * Set the name of task TASK to NAME. This is a debugging aid.
1110 * NAME will be used in error messages printed by the kernel.
1111 */
1112kern_return_t
1113task_set_name(
1114 task_t task,
1115 kernel_debug_name_t name)
1116{
1117 strncpy(task->name, name, sizeof task->name - 1);
1118 task->name[sizeof task->name - 1] = '\0';
1119 return KERN_SUCCESS0;
1120}
1121
1122/*
1123 * task_collect_scan:
1124 *
1125 * Attempt to free resources owned by tasks.
1126 */
1127
1128void task_collect_scan(void)
1129{
1130 task_t task, prev_task;
1131 processor_set_t pset, prev_pset;
1132
1133 prev_task = TASK_NULL((task_t) 0);
1134 prev_pset = PROCESSOR_SET_NULL((processor_set_t) 0);
1135
1136 simple_lock(&all_psets_lock);
1137 queue_iterate(&all_psets, pset, processor_set_t, all_psets)for ((pset) = (processor_set_t) ((&all_psets)->next); !
(((&all_psets)) == ((queue_entry_t)(pset))); (pset) = (processor_set_t
) ((&(pset)->all_psets)->next))
{
1138 pset_lock(pset);
1139 queue_iterate(&pset->tasks, task, task_t, pset_tasks)for ((task) = (task_t) ((&pset->tasks)->next); !(((
&pset->tasks)) == ((queue_entry_t)(task))); (task) = (
task_t) ((&(task)->pset_tasks)->next))
{
5
Within the expansion of the macro 'queue_iterate':
a
Value assigned to 'task'
1140 task_reference(task);
6
Calling 'task_reference'
9
Returning from 'task_reference'
1141 pset_reference(pset);
1142 pset_unlock(pset)((void)(&(pset)->lock));
1143 simple_unlock(&all_psets_lock)((void)(&all_psets_lock));
1144
1145 machine_task_collect (task);
1146 pmap_collect(task->map->pmap);
10
Access to field 'map' results in a dereference of a null pointer (loaded from variable 'task')
1147
1148 if (prev_task != TASK_NULL((task_t) 0))
1149 task_deallocate(prev_task);
1150 prev_task = task;
1151
1152 if (prev_pset != PROCESSOR_SET_NULL((processor_set_t) 0))
1153 pset_deallocate(prev_pset);
1154 prev_pset = pset;
1155
1156 simple_lock(&all_psets_lock);
1157 pset_lock(pset);
1158 }
1159 pset_unlock(pset)((void)(&(pset)->lock));
1160 }
1161 simple_unlock(&all_psets_lock)((void)(&all_psets_lock));
1162
1163 if (prev_task != TASK_NULL((task_t) 0))
1164 task_deallocate(prev_task);
1165 if (prev_pset != PROCESSOR_SET_NULL((processor_set_t) 0))
1166 pset_deallocate(prev_pset);
1167}
1168
1169boolean_t task_collect_allowed = TRUE((boolean_t) 1);
1170unsigned task_collect_last_tick = 0;
1171unsigned task_collect_max_rate = 0; /* in ticks */
1172
1173/*
1174 * consider_task_collect:
1175 *
1176 * Called by the pageout daemon when the system needs more free pages.
1177 */
1178
1179void consider_task_collect(void)
1180{
1181 /*
1182 * By default, don't attempt task collection more frequently
1183 * than once a second.
1184 */
1185
1186 if (task_collect_max_rate == 0)
1
Assuming 'task_collect_max_rate' is not equal to 0
2
Taking false branch
1187 task_collect_max_rate = hz;
1188
1189 if (task_collect_allowed &&
3
Taking true branch
1190 (sched_tick > (task_collect_last_tick + task_collect_max_rate))) {
1191 task_collect_last_tick = sched_tick;
1192 task_collect_scan();
4
Calling 'task_collect_scan'
1193 }
1194}
1195
1196kern_return_t
1197task_ras_control(
1198 task_t task,
1199 vm_offset_t pc,
1200 vm_offset_t endpc,
1201 int flavor)
1202{
1203 kern_return_t ret = KERN_FAILURE5;
1204
1205#if FAST_TAS0
1206 int i;
1207
1208 ret = KERN_SUCCESS0;
1209 task_lock(task);
1210 switch (flavor) {
1211 case TASK_RAS_CONTROL_PURGE_ALL0: /* remove all RAS */
1212 for (i = 0; i < TASK_FAST_TAS_NRAS; i++) {
1213 task->fast_tas_base[i] = task->fast_tas_end[i] = 0;
1214 }
1215 break;
1216 case TASK_RAS_CONTROL_PURGE_ONE1: /* remove this RAS, collapse remaining */
1217 for (i = 0; i < TASK_FAST_TAS_NRAS; i++) {
1218 if ( (task->fast_tas_base[i] == pc)
1219 && (task->fast_tas_end[i] == endpc)) {
1220 while (i < TASK_FAST_TAS_NRAS-1) {
1221 task->fast_tas_base[i] = task->fast_tas_base[i+1];
1222 task->fast_tas_end[i] = task->fast_tas_end[i+1];
1223 i++;
1224 }
1225 task->fast_tas_base[TASK_FAST_TAS_NRAS-1] = 0;
1226 task->fast_tas_end[TASK_FAST_TAS_NRAS-1] = 0;
1227 break;
1228 }
1229 }
1230 if (i == TASK_FAST_TAS_NRAS) {
1231 ret = KERN_INVALID_ADDRESS1;
1232 }
1233 break;
1234 case TASK_RAS_CONTROL_PURGE_ALL_AND_INSTALL_ONE2:
1235 /* remove all RAS an install this RAS */
1236 for (i = 0; i < TASK_FAST_TAS_NRAS; i++) {
1237 task->fast_tas_base[i] = task->fast_tas_end[i] = 0;
1238 }
1239 /* FALL THROUGH */
1240 case TASK_RAS_CONTROL_INSTALL_ONE3: /* install this RAS */
1241 for (i = 0; i < TASK_FAST_TAS_NRAS; i++) {
1242 if ( (task->fast_tas_base[i] == pc)
1243 && (task->fast_tas_end[i] == endpc)) {
1244 /* already installed */
1245 break;
1246 }
1247 if ((task->fast_tas_base[i] == 0) && (task->fast_tas_end[i] == 0)){
1248 task->fast_tas_base[i] = pc;
1249 task->fast_tas_end[i] = endpc;
1250 break;
1251 }
1252 }
1253 if (i == TASK_FAST_TAS_NRAS) {
1254 ret = KERN_RESOURCE_SHORTAGE6;
1255 }
1256 break;
1257 default: ret = KERN_INVALID_VALUE18;
1258 break;
1259 }
1260 task_unlock(task)((void)(&(task)->lock));
1261#endif /* FAST_TAS */
1262 return ret;
1263}
1264
1265/*
1266 * register_new_task_notification
1267 *
1268 * Register a port to which a notification about newly created
1269 * tasks are sent.
1270 */
1271kern_return_t
1272register_new_task_notification(
1273 const host_t host,
1274 ipc_port_t notification)
1275{
1276 if (host == HOST_NULL((host_t)0))
1277 return KERN_INVALID_HOST22;
1278
1279 if (new_task_notification != NULL((void *) 0))
1280 return KERN_NO_ACCESS8;
1281
1282 new_task_notification = notification;
1283 return KERN_SUCCESS0;
1284}