File: | obj-scan-build/../kern/task.c |
Location: | line 1115, column 17 |
Description: | Access to field 'map' results in a dereference of a null pointer (loaded from variable 'task') |
1 | /* | |||
2 | * Mach Operating System | |||
3 | * Copyright (c) 1993-1988 Carnegie Mellon University | |||
4 | * All Rights Reserved. | |||
5 | * | |||
6 | * Permission to use, copy, modify and distribute this software and its | |||
7 | * documentation is hereby granted, provided that both the copyright | |||
8 | * notice and this permission notice appear in all copies of the | |||
9 | * software, derivative works or modified versions, and any portions | |||
10 | * thereof, and that both notices appear in supporting documentation. | |||
11 | * | |||
12 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |||
13 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |||
14 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |||
15 | * | |||
16 | * Carnegie Mellon requests users of this software to return to | |||
17 | * | |||
18 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |||
19 | * School of Computer Science | |||
20 | * Carnegie Mellon University | |||
21 | * Pittsburgh PA 15213-3890 | |||
22 | * | |||
23 | * any improvements or extensions that they make and grant Carnegie Mellon | |||
24 | * the rights to redistribute these changes. | |||
25 | */ | |||
26 | /* | |||
27 | * File: kern/task.c | |||
28 | * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub, | |||
29 | * David Black | |||
30 | * | |||
31 | * Task management primitives implementation. | |||
32 | */ | |||
33 | ||||
34 | #include <string.h> | |||
35 | ||||
36 | #include <mach/machine/vm_types.h> | |||
37 | #include <mach/vm_param.h> | |||
38 | #include <mach/task_info.h> | |||
39 | #include <mach/task_special_ports.h> | |||
40 | #include <mach_debug/mach_debug_types.h> | |||
41 | #include <ipc/ipc_space.h> | |||
42 | #include <ipc/ipc_types.h> | |||
43 | #include <kern/debug.h> | |||
44 | #include <kern/task.h> | |||
45 | #include <kern/thread.h> | |||
46 | #include <kern/slab.h> | |||
47 | #include <kern/kalloc.h> | |||
48 | #include <kern/processor.h> | |||
49 | #include <kern/printf.h> | |||
50 | #include <kern/sched_prim.h> /* for thread_wakeup */ | |||
51 | #include <kern/ipc_tt.h> | |||
52 | #include <kern/syscall_emulation.h> | |||
53 | #include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */ | |||
54 | #include <machine/machspl.h> /* for splsched */ | |||
55 | ||||
56 | task_t kernel_task = TASK_NULL((task_t) 0); | |||
57 | struct kmem_cache task_cache; | |||
58 | ||||
59 | void task_init(void) | |||
60 | { | |||
61 | kmem_cache_init(&task_cache, "task", sizeof(struct task), 0, | |||
62 | NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0); | |||
63 | ||||
64 | eml_init(); | |||
65 | machine_task_module_init (); | |||
66 | ||||
67 | /* | |||
68 | * Create the kernel task as the first task. | |||
69 | * Task_create must assign to kernel_task as a side effect, | |||
70 | * for other initialization. (:-() | |||
71 | */ | |||
72 | (void) task_create(TASK_NULL((task_t) 0), FALSE((boolean_t) 0), &kernel_task); | |||
73 | (void) task_set_name(kernel_task, "gnumach"); | |||
74 | } | |||
75 | ||||
76 | kern_return_t task_create( | |||
77 | task_t parent_task, | |||
78 | boolean_t inherit_memory, | |||
79 | task_t *child_task) /* OUT */ | |||
80 | { | |||
81 | task_t new_task; | |||
82 | processor_set_t pset; | |||
83 | #if FAST_TAS0 | |||
84 | int i; | |||
85 | #endif | |||
86 | ||||
87 | new_task = (task_t) kmem_cache_alloc(&task_cache); | |||
88 | if (new_task == TASK_NULL((task_t) 0)) { | |||
89 | panic("task_create: no memory for task structure"); | |||
90 | } | |||
91 | ||||
92 | /* one ref for just being alive; one for our caller */ | |||
93 | new_task->ref_count = 2; | |||
94 | ||||
95 | if (child_task == &kernel_task) { | |||
96 | new_task->map = kernel_map; | |||
97 | } else if (inherit_memory) { | |||
98 | new_task->map = vm_map_fork(parent_task->map); | |||
99 | } else { | |||
100 | new_task->map = vm_map_create(pmap_create(0), | |||
101 | round_page(VM_MIN_ADDRESS)((vm_offset_t)((((vm_offset_t)((0))) + ((1 << 12)-1)) & ~((1 << 12)-1))), | |||
102 | trunc_page(VM_MAX_ADDRESS)((vm_offset_t)(((vm_offset_t)((0xc0000000UL))) & ~((1 << 12)-1))), TRUE((boolean_t) 1)); | |||
103 | } | |||
104 | ||||
105 | simple_lock_init(&new_task->lock); | |||
106 | queue_init(&new_task->thread_list)((&new_task->thread_list)->next = (&new_task-> thread_list)->prev = &new_task->thread_list); | |||
107 | new_task->suspend_count = 0; | |||
108 | new_task->active = TRUE((boolean_t) 1); | |||
109 | new_task->user_stop_count = 0; | |||
110 | new_task->thread_count = 0; | |||
111 | new_task->faults = 0; | |||
112 | new_task->zero_fills = 0; | |||
113 | new_task->reactivations = 0; | |||
114 | new_task->pageins = 0; | |||
115 | new_task->cow_faults = 0; | |||
116 | new_task->messages_sent = 0; | |||
117 | new_task->messages_received = 0; | |||
118 | ||||
119 | eml_task_reference(new_task, parent_task); | |||
120 | ||||
121 | ipc_task_init(new_task, parent_task); | |||
122 | machine_task_init (new_task); | |||
123 | ||||
124 | new_task->total_user_time.seconds = 0; | |||
125 | new_task->total_user_time.microseconds = 0; | |||
126 | new_task->total_system_time.seconds = 0; | |||
127 | new_task->total_system_time.microseconds = 0; | |||
128 | ||||
129 | record_time_stamp (&new_task->creation_time); | |||
130 | ||||
131 | if (parent_task != TASK_NULL((task_t) 0)) { | |||
132 | task_lock(parent_task); | |||
133 | pset = parent_task->processor_set; | |||
134 | if (!pset->active) | |||
135 | pset = &default_pset; | |||
136 | pset_reference(pset); | |||
137 | new_task->priority = parent_task->priority; | |||
138 | task_unlock(parent_task)((void)(&(parent_task)->lock)); | |||
139 | } | |||
140 | else { | |||
141 | pset = &default_pset; | |||
142 | pset_reference(pset); | |||
143 | new_task->priority = BASEPRI_USER25; | |||
144 | } | |||
145 | pset_lock(pset); | |||
146 | pset_add_task(pset, new_task); | |||
147 | pset_unlock(pset)((void)(&(pset)->lock)); | |||
148 | ||||
149 | new_task->may_assign = TRUE((boolean_t) 1); | |||
150 | new_task->assign_active = FALSE((boolean_t) 0); | |||
151 | ||||
152 | #if MACH_PCSAMPLE1 | |||
153 | new_task->pc_sample.buffer = 0; | |||
154 | new_task->pc_sample.seqno = 0; | |||
155 | new_task->pc_sample.sampletypes = 0; | |||
156 | #endif /* MACH_PCSAMPLE */ | |||
157 | ||||
158 | #if FAST_TAS0 | |||
159 | for (i = 0; i < TASK_FAST_TAS_NRAS; i++) { | |||
160 | if (inherit_memory) { | |||
161 | new_task->fast_tas_base[i] = parent_task->fast_tas_base[i]; | |||
162 | new_task->fast_tas_end[i] = parent_task->fast_tas_end[i]; | |||
163 | } else { | |||
164 | new_task->fast_tas_base[i] = (vm_offset_t)0; | |||
165 | new_task->fast_tas_end[i] = (vm_offset_t)0; | |||
166 | } | |||
167 | } | |||
168 | #endif /* FAST_TAS */ | |||
169 | ||||
170 | snprintf (new_task->name, sizeof new_task->name, "%p", new_task); | |||
171 | ||||
172 | ipc_task_enable(new_task); | |||
173 | ||||
174 | *child_task = new_task; | |||
175 | return KERN_SUCCESS0; | |||
176 | } | |||
177 | ||||
178 | /* | |||
179 | * task_deallocate: | |||
180 | * | |||
181 | * Give up a reference to the specified task and destroy it if there | |||
182 | * are no other references left. It is assumed that the current thread | |||
183 | * is never in this task. | |||
184 | */ | |||
185 | void task_deallocate( | |||
186 | task_t task) | |||
187 | { | |||
188 | int c; | |||
189 | processor_set_t pset; | |||
190 | ||||
191 | if (task == TASK_NULL((task_t) 0)) | |||
192 | return; | |||
193 | ||||
194 | task_lock(task); | |||
195 | c = --(task->ref_count); | |||
196 | task_unlock(task)((void)(&(task)->lock)); | |||
197 | if (c != 0) | |||
198 | return; | |||
199 | ||||
200 | machine_task_terminate (task); | |||
201 | ||||
202 | eml_task_deallocate(task); | |||
203 | ||||
204 | pset = task->processor_set; | |||
205 | pset_lock(pset); | |||
206 | pset_remove_task(pset,task); | |||
207 | pset_unlock(pset)((void)(&(pset)->lock)); | |||
208 | pset_deallocate(pset); | |||
209 | vm_map_deallocate(task->map); | |||
210 | is_release(task->itk_space)ipc_space_release(task->itk_space); | |||
211 | kmem_cache_free(&task_cache, (vm_offset_t) task); | |||
212 | } | |||
213 | ||||
214 | void task_reference( | |||
215 | task_t task) | |||
216 | { | |||
217 | if (task == TASK_NULL((task_t) 0)) | |||
218 | return; | |||
219 | ||||
220 | task_lock(task); | |||
221 | task->ref_count++; | |||
222 | task_unlock(task)((void)(&(task)->lock)); | |||
223 | } | |||
224 | ||||
225 | /* | |||
226 | * task_terminate: | |||
227 | * | |||
228 | * Terminate the specified task. See comments on thread_terminate | |||
229 | * (kern/thread.c) about problems with terminating the "current task." | |||
230 | */ | |||
231 | kern_return_t task_terminate( | |||
232 | task_t task) | |||
233 | { | |||
234 | thread_t thread, cur_thread; | |||
235 | queue_head_t *list; | |||
236 | task_t cur_task; | |||
237 | spl_t s; | |||
238 | ||||
239 | if (task == TASK_NULL((task_t) 0)) | |||
240 | return KERN_INVALID_ARGUMENT4; | |||
241 | ||||
242 | list = &task->thread_list; | |||
243 | cur_task = current_task()((active_threads[(0)])->task); | |||
244 | cur_thread = current_thread()(active_threads[(0)]); | |||
245 | ||||
246 | /* | |||
247 | * Deactivate task so that it can't be terminated again, | |||
248 | * and so lengthy operations in progress will abort. | |||
249 | * | |||
250 | * If the current thread is in this task, remove it from | |||
251 | * the task's thread list to keep the thread-termination | |||
252 | * loop simple. | |||
253 | */ | |||
254 | if (task == cur_task) { | |||
255 | task_lock(task); | |||
256 | if (!task->active) { | |||
257 | /* | |||
258 | * Task is already being terminated. | |||
259 | */ | |||
260 | task_unlock(task)((void)(&(task)->lock)); | |||
261 | return KERN_FAILURE5; | |||
262 | } | |||
263 | /* | |||
264 | * Make sure current thread is not being terminated. | |||
265 | */ | |||
266 | s = splsched(); | |||
267 | thread_lock(cur_thread); | |||
268 | if (!cur_thread->active) { | |||
269 | thread_unlock(cur_thread)((void)(&(cur_thread)->lock)); | |||
270 | (void) splx(s); | |||
271 | task_unlock(task)((void)(&(task)->lock)); | |||
272 | thread_terminate(cur_thread); | |||
273 | return KERN_FAILURE5; | |||
274 | } | |||
275 | task->active = FALSE((boolean_t) 0); | |||
276 | queue_remove(list, cur_thread, thread_t, thread_list){ queue_entry_t next, prev; next = (cur_thread)->thread_list .next; prev = (cur_thread)->thread_list.prev; if ((list) == next) (list)->prev = prev; else ((thread_t)next)->thread_list .prev = prev; if ((list) == prev) (list)->next = next; else ((thread_t)prev)->thread_list.next = next; }; | |||
277 | thread_unlock(cur_thread)((void)(&(cur_thread)->lock)); | |||
278 | (void) splx(s); | |||
279 | task_unlock(task)((void)(&(task)->lock)); | |||
280 | ||||
281 | /* | |||
282 | * Shut down this thread's ipc now because it must | |||
283 | * be left alone to terminate the task. | |||
284 | */ | |||
285 | ipc_thread_disable(cur_thread); | |||
286 | ipc_thread_terminate(cur_thread); | |||
287 | } | |||
288 | else { | |||
289 | /* | |||
290 | * Lock both current and victim task to check for | |||
291 | * potential deadlock. | |||
292 | */ | |||
293 | if ((vm_offset_t)task < (vm_offset_t)cur_task) { | |||
294 | task_lock(task); | |||
295 | task_lock(cur_task); | |||
296 | } | |||
297 | else { | |||
298 | task_lock(cur_task); | |||
299 | task_lock(task); | |||
300 | } | |||
301 | /* | |||
302 | * Check if current thread or task is being terminated. | |||
303 | */ | |||
304 | s = splsched(); | |||
305 | thread_lock(cur_thread); | |||
306 | if ((!cur_task->active) ||(!cur_thread->active)) { | |||
307 | /* | |||
308 | * Current task or thread is being terminated. | |||
309 | */ | |||
310 | thread_unlock(cur_thread)((void)(&(cur_thread)->lock)); | |||
311 | (void) splx(s); | |||
312 | task_unlock(task)((void)(&(task)->lock)); | |||
313 | task_unlock(cur_task)((void)(&(cur_task)->lock)); | |||
314 | thread_terminate(cur_thread); | |||
315 | return KERN_FAILURE5; | |||
316 | } | |||
317 | thread_unlock(cur_thread)((void)(&(cur_thread)->lock)); | |||
318 | (void) splx(s); | |||
319 | task_unlock(cur_task)((void)(&(cur_task)->lock)); | |||
320 | ||||
321 | if (!task->active) { | |||
322 | /* | |||
323 | * Task is already being terminated. | |||
324 | */ | |||
325 | task_unlock(task)((void)(&(task)->lock)); | |||
326 | return KERN_FAILURE5; | |||
327 | } | |||
328 | task->active = FALSE((boolean_t) 0); | |||
329 | task_unlock(task)((void)(&(task)->lock)); | |||
330 | } | |||
331 | ||||
332 | /* | |||
333 | * Prevent further execution of the task. ipc_task_disable | |||
334 | * prevents further task operations via the task port. | |||
335 | * If this is the current task, the current thread will | |||
336 | * be left running. | |||
337 | */ | |||
338 | ipc_task_disable(task); | |||
339 | (void) task_hold(task); | |||
340 | (void) task_dowait(task,TRUE((boolean_t) 1)); /* may block */ | |||
341 | ||||
342 | /* | |||
343 | * Terminate each thread in the task. | |||
344 | * | |||
345 | * The task_port is closed down, so no more thread_create | |||
346 | * operations can be done. Thread_force_terminate closes the | |||
347 | * thread port for each thread; when that is done, the | |||
348 | * thread will eventually disappear. Thus the loop will | |||
349 | * terminate. Call thread_force_terminate instead of | |||
350 | * thread_terminate to avoid deadlock checks. Need | |||
351 | * to call thread_block() inside loop because some other | |||
352 | * thread (e.g., the reaper) may have to run to get rid | |||
353 | * of all references to the thread; it won't vanish from | |||
354 | * the task's thread list until the last one is gone. | |||
355 | */ | |||
356 | task_lock(task); | |||
357 | while (!queue_empty(list)(((list)) == (((list)->next)))) { | |||
358 | thread = (thread_t) queue_first(list)((list)->next); | |||
359 | thread_reference(thread); | |||
360 | task_unlock(task)((void)(&(task)->lock)); | |||
361 | thread_force_terminate(thread); | |||
362 | thread_deallocate(thread); | |||
363 | thread_block((void (*)()) 0); | |||
364 | task_lock(task); | |||
365 | } | |||
366 | task_unlock(task)((void)(&(task)->lock)); | |||
367 | ||||
368 | /* | |||
369 | * Shut down IPC. | |||
370 | */ | |||
371 | ipc_task_terminate(task); | |||
372 | ||||
373 | ||||
374 | /* | |||
375 | * Deallocate the task's reference to itself. | |||
376 | */ | |||
377 | task_deallocate(task); | |||
378 | ||||
379 | /* | |||
380 | * If the current thread is in this task, it has not yet | |||
381 | * been terminated (since it was removed from the task's | |||
382 | * thread-list). Put it back in the thread list (for | |||
383 | * completeness), and terminate it. Since it holds the | |||
384 | * last reference to the task, terminating it will deallocate | |||
385 | * the task. | |||
386 | */ | |||
387 | if (cur_thread->task == task) { | |||
388 | task_lock(task); | |||
389 | s = splsched(); | |||
390 | queue_enter(list, cur_thread, thread_t, thread_list){ queue_entry_t prev; prev = (list)->prev; if ((list) == prev ) { (list)->next = (queue_entry_t) (cur_thread); } else { ( (thread_t)prev)->thread_list.next = (queue_entry_t)(cur_thread ); } (cur_thread)->thread_list.prev = prev; (cur_thread)-> thread_list.next = list; (list)->prev = (queue_entry_t) cur_thread ; }; | |||
391 | (void) splx(s); | |||
392 | task_unlock(task)((void)(&(task)->lock)); | |||
393 | (void) thread_terminate(cur_thread); | |||
394 | } | |||
395 | ||||
396 | return KERN_SUCCESS0; | |||
397 | } | |||
398 | ||||
399 | /* | |||
400 | * task_hold: | |||
401 | * | |||
402 | * Suspend execution of the specified task. | |||
403 | * This is a recursive-style suspension of the task, a count of | |||
404 | * suspends is maintained. | |||
405 | */ | |||
406 | kern_return_t task_hold( | |||
407 | task_t task) | |||
408 | { | |||
409 | queue_head_t *list; | |||
410 | thread_t thread, cur_thread; | |||
411 | ||||
412 | cur_thread = current_thread()(active_threads[(0)]); | |||
413 | ||||
414 | task_lock(task); | |||
415 | if (!task->active) { | |||
416 | task_unlock(task)((void)(&(task)->lock)); | |||
417 | return KERN_FAILURE5; | |||
418 | } | |||
419 | ||||
420 | task->suspend_count++; | |||
421 | ||||
422 | /* | |||
423 | * Iterate through all the threads and hold them. | |||
424 | * Do not hold the current thread if it is within the | |||
425 | * task. | |||
426 | */ | |||
427 | list = &task->thread_list; | |||
428 | queue_iterate(list, thread, thread_t, thread_list)for ((thread) = (thread_t) ((list)->next); !(((list)) == ( (queue_entry_t)(thread))); (thread) = (thread_t) ((&(thread )->thread_list)->next)) { | |||
429 | if (thread != cur_thread) | |||
430 | thread_hold(thread); | |||
431 | } | |||
432 | task_unlock(task)((void)(&(task)->lock)); | |||
433 | return KERN_SUCCESS0; | |||
434 | } | |||
435 | ||||
436 | /* | |||
437 | * task_dowait: | |||
438 | * | |||
439 | * Wait until the task has really been suspended (all of the threads | |||
440 | * are stopped). Skip the current thread if it is within the task. | |||
441 | * | |||
442 | * If task is deactivated while waiting, return a failure code unless | |||
443 | * must_wait is true. | |||
444 | */ | |||
445 | kern_return_t task_dowait( | |||
446 | task_t task, | |||
447 | boolean_t must_wait) | |||
448 | { | |||
449 | queue_head_t *list; | |||
450 | thread_t thread, cur_thread, prev_thread; | |||
451 | kern_return_t ret = KERN_SUCCESS0; | |||
452 | ||||
453 | /* | |||
454 | * Iterate through all the threads. | |||
455 | * While waiting for each thread, we gain a reference to it | |||
456 | * to prevent it from going away on us. This guarantees | |||
457 | * that the "next" thread in the list will be a valid thread. | |||
458 | * | |||
459 | * We depend on the fact that if threads are created while | |||
460 | * we are looping through the threads, they will be held | |||
461 | * automatically. We don't care about threads that get | |||
462 | * deallocated along the way (the reference prevents it | |||
463 | * from happening to the thread we are working with). | |||
464 | * | |||
465 | * If the current thread is in the affected task, it is skipped. | |||
466 | * | |||
467 | * If the task is deactivated before we're done, and we don't | |||
468 | * have to wait for it (must_wait is FALSE), just bail out. | |||
469 | */ | |||
470 | cur_thread = current_thread()(active_threads[(0)]); | |||
471 | ||||
472 | list = &task->thread_list; | |||
473 | prev_thread = THREAD_NULL((thread_t) 0); | |||
474 | task_lock(task); | |||
475 | queue_iterate(list, thread, thread_t, thread_list)for ((thread) = (thread_t) ((list)->next); !(((list)) == ( (queue_entry_t)(thread))); (thread) = (thread_t) ((&(thread )->thread_list)->next)) { | |||
476 | if (!(task->active) && !(must_wait)) { | |||
477 | ret = KERN_FAILURE5; | |||
478 | break; | |||
479 | } | |||
480 | if (thread != cur_thread) { | |||
481 | thread_reference(thread); | |||
482 | task_unlock(task)((void)(&(task)->lock)); | |||
483 | if (prev_thread != THREAD_NULL((thread_t) 0)) | |||
484 | thread_deallocate(prev_thread); | |||
485 | /* may block */ | |||
486 | (void) thread_dowait(thread, TRUE((boolean_t) 1)); /* may block */ | |||
487 | prev_thread = thread; | |||
488 | task_lock(task); | |||
489 | } | |||
490 | } | |||
491 | task_unlock(task)((void)(&(task)->lock)); | |||
492 | if (prev_thread != THREAD_NULL((thread_t) 0)) | |||
493 | thread_deallocate(prev_thread); /* may block */ | |||
494 | return ret; | |||
495 | } | |||
496 | ||||
497 | kern_return_t task_release( | |||
498 | task_t task) | |||
499 | { | |||
500 | queue_head_t *list; | |||
501 | thread_t thread, next; | |||
502 | ||||
503 | task_lock(task); | |||
504 | if (!task->active) { | |||
505 | task_unlock(task)((void)(&(task)->lock)); | |||
506 | return KERN_FAILURE5; | |||
507 | } | |||
508 | ||||
509 | task->suspend_count--; | |||
510 | ||||
511 | /* | |||
512 | * Iterate through all the threads and release them | |||
513 | */ | |||
514 | list = &task->thread_list; | |||
515 | thread = (thread_t) queue_first(list)((list)->next); | |||
516 | while (!queue_end(list, (queue_entry_t) thread)((list) == ((queue_entry_t) thread))) { | |||
517 | next = (thread_t) queue_next(&thread->thread_list)((&thread->thread_list)->next); | |||
518 | thread_release(thread); | |||
519 | thread = next; | |||
520 | } | |||
521 | task_unlock(task)((void)(&(task)->lock)); | |||
522 | return KERN_SUCCESS0; | |||
523 | } | |||
524 | ||||
525 | kern_return_t task_threads( | |||
526 | task_t task, | |||
527 | thread_array_t *thread_list, | |||
528 | natural_t *count) | |||
529 | { | |||
530 | unsigned int actual; /* this many threads */ | |||
531 | thread_t thread; | |||
532 | thread_t *threads; | |||
533 | int i; | |||
534 | ||||
535 | vm_size_t size, size_needed; | |||
536 | vm_offset_t addr; | |||
537 | ||||
538 | if (task == TASK_NULL((task_t) 0)) | |||
539 | return KERN_INVALID_ARGUMENT4; | |||
540 | ||||
541 | size = 0; addr = 0; | |||
542 | ||||
543 | for (;;) { | |||
544 | task_lock(task); | |||
545 | if (!task->active) { | |||
546 | task_unlock(task)((void)(&(task)->lock)); | |||
547 | return KERN_FAILURE5; | |||
548 | } | |||
549 | ||||
550 | actual = task->thread_count; | |||
551 | ||||
552 | /* do we have the memory we need? */ | |||
553 | ||||
554 | size_needed = actual * sizeof(mach_port_t); | |||
555 | if (size_needed <= size) | |||
556 | break; | |||
557 | ||||
558 | /* unlock the task and allocate more memory */ | |||
559 | task_unlock(task)((void)(&(task)->lock)); | |||
560 | ||||
561 | if (size != 0) | |||
562 | kfree(addr, size); | |||
563 | ||||
564 | assert(size_needed > 0)({ if (!(size_needed > 0)) Assert("size_needed > 0", "../kern/task.c" , 564); }); | |||
565 | size = size_needed; | |||
566 | ||||
567 | addr = kalloc(size); | |||
568 | if (addr == 0) | |||
569 | return KERN_RESOURCE_SHORTAGE6; | |||
570 | } | |||
571 | ||||
572 | /* OK, have memory and the task is locked & active */ | |||
573 | ||||
574 | threads = (thread_t *) addr; | |||
575 | ||||
576 | for (i = 0, thread = (thread_t) queue_first(&task->thread_list)((&task->thread_list)->next); | |||
577 | i < actual; | |||
578 | i++, thread = (thread_t) queue_next(&thread->thread_list)((&thread->thread_list)->next)) { | |||
579 | /* take ref for convert_thread_to_port */ | |||
580 | thread_reference(thread); | |||
581 | threads[i] = thread; | |||
582 | } | |||
583 | assert(queue_end(&task->thread_list, (queue_entry_t) thread))({ if (!(((&task->thread_list) == ((queue_entry_t) thread )))) Assert("queue_end(&task->thread_list, (queue_entry_t) thread)" , "../kern/task.c", 583); }); | |||
584 | ||||
585 | /* can unlock task now that we've got the thread refs */ | |||
586 | task_unlock(task)((void)(&(task)->lock)); | |||
587 | ||||
588 | if (actual == 0) { | |||
589 | /* no threads, so return null pointer and deallocate memory */ | |||
590 | ||||
591 | *thread_list = 0; | |||
592 | *count = 0; | |||
593 | ||||
594 | if (size != 0) | |||
595 | kfree(addr, size); | |||
596 | } else { | |||
597 | /* if we allocated too much, must copy */ | |||
598 | ||||
599 | if (size_needed < size) { | |||
600 | vm_offset_t newaddr; | |||
601 | ||||
602 | newaddr = kalloc(size_needed); | |||
603 | if (newaddr == 0) { | |||
604 | for (i = 0; i < actual; i++) | |||
605 | thread_deallocate(threads[i]); | |||
606 | kfree(addr, size); | |||
607 | return KERN_RESOURCE_SHORTAGE6; | |||
608 | } | |||
609 | ||||
610 | memcpy((void *) newaddr, (void *) addr, size_needed); | |||
611 | kfree(addr, size); | |||
612 | threads = (thread_t *) newaddr; | |||
613 | } | |||
614 | ||||
615 | *thread_list = (mach_port_t *) threads; | |||
616 | *count = actual; | |||
617 | ||||
618 | /* do the conversion that Mig should handle */ | |||
619 | ||||
620 | for (i = 0; i < actual; i++) | |||
621 | ((ipc_port_t *) threads)[i] = | |||
622 | convert_thread_to_port(threads[i]); | |||
623 | } | |||
624 | ||||
625 | return KERN_SUCCESS0; | |||
626 | } | |||
627 | ||||
628 | kern_return_t task_suspend( | |||
629 | task_t task) | |||
630 | { | |||
631 | boolean_t hold; | |||
632 | ||||
633 | if (task == TASK_NULL((task_t) 0)) | |||
634 | return KERN_INVALID_ARGUMENT4; | |||
635 | ||||
636 | hold = FALSE((boolean_t) 0); | |||
637 | task_lock(task); | |||
638 | if ((task->user_stop_count)++ == 0) | |||
639 | hold = TRUE((boolean_t) 1); | |||
640 | task_unlock(task)((void)(&(task)->lock)); | |||
641 | ||||
642 | /* | |||
643 | * If the stop count was positive, the task is | |||
644 | * already stopped and we can exit. | |||
645 | */ | |||
646 | if (!hold) { | |||
647 | return KERN_SUCCESS0; | |||
648 | } | |||
649 | ||||
650 | /* | |||
651 | * Hold all of the threads in the task, and wait for | |||
652 | * them to stop. If the current thread is within | |||
653 | * this task, hold it separately so that all of the | |||
654 | * other threads can stop first. | |||
655 | */ | |||
656 | ||||
657 | if (task_hold(task) != KERN_SUCCESS0) | |||
658 | return KERN_FAILURE5; | |||
659 | ||||
660 | if (task_dowait(task, FALSE((boolean_t) 0)) != KERN_SUCCESS0) | |||
661 | return KERN_FAILURE5; | |||
662 | ||||
663 | if (current_task()((active_threads[(0)])->task) == task) { | |||
664 | spl_t s; | |||
665 | ||||
666 | thread_hold(current_thread()(active_threads[(0)])); | |||
667 | /* | |||
668 | * We want to call thread_block on our way out, | |||
669 | * to stop running. | |||
670 | */ | |||
671 | s = splsched(); | |||
672 | ast_on(cpu_number(), AST_BLOCK)({ if ((need_ast[(0)] |= (0x4)) != 0x0) { ; } }); | |||
673 | (void) splx(s); | |||
674 | } | |||
675 | ||||
676 | return KERN_SUCCESS0; | |||
677 | } | |||
678 | ||||
679 | kern_return_t task_resume( | |||
680 | task_t task) | |||
681 | { | |||
682 | boolean_t release; | |||
683 | ||||
684 | if (task == TASK_NULL((task_t) 0)) | |||
685 | return KERN_INVALID_ARGUMENT4; | |||
686 | ||||
687 | release = FALSE((boolean_t) 0); | |||
688 | task_lock(task); | |||
689 | if (task->user_stop_count > 0) { | |||
690 | if (--(task->user_stop_count) == 0) | |||
691 | release = TRUE((boolean_t) 1); | |||
692 | } | |||
693 | else { | |||
694 | task_unlock(task)((void)(&(task)->lock)); | |||
695 | return KERN_FAILURE5; | |||
696 | } | |||
697 | task_unlock(task)((void)(&(task)->lock)); | |||
698 | ||||
699 | /* | |||
700 | * Release the task if necessary. | |||
701 | */ | |||
702 | if (release) | |||
703 | return task_release(task); | |||
704 | ||||
705 | return KERN_SUCCESS0; | |||
706 | } | |||
707 | ||||
708 | kern_return_t task_info( | |||
709 | task_t task, | |||
710 | int flavor, | |||
711 | task_info_t task_info_out, /* pointer to OUT array */ | |||
712 | natural_t *task_info_count) /* IN/OUT */ | |||
713 | { | |||
714 | vm_map_t map; | |||
715 | ||||
716 | if (task == TASK_NULL((task_t) 0)) | |||
717 | return KERN_INVALID_ARGUMENT4; | |||
718 | ||||
719 | switch (flavor) { | |||
720 | case TASK_BASIC_INFO1: | |||
721 | { | |||
722 | task_basic_info_t basic_info; | |||
723 | ||||
724 | /* Allow *task_info_count to be two words smaller than | |||
725 | the usual amount, because creation_time is a new member | |||
726 | that some callers might not know about. */ | |||
727 | ||||
728 | if (*task_info_count < TASK_BASIC_INFO_COUNT(sizeof(task_basic_info_data_t) / sizeof(natural_t)) - 2) { | |||
729 | return KERN_INVALID_ARGUMENT4; | |||
730 | } | |||
731 | ||||
732 | basic_info = (task_basic_info_t) task_info_out; | |||
733 | ||||
734 | map = (task == kernel_task) ? kernel_map : task->map; | |||
735 | ||||
736 | basic_info->virtual_size = map->size; | |||
737 | basic_info->resident_size = pmap_resident_count(map->pmap)((map->pmap)->stats.resident_count) | |||
738 | * PAGE_SIZE(1 << 12); | |||
739 | ||||
740 | task_lock(task); | |||
741 | basic_info->base_priority = task->priority; | |||
742 | basic_info->suspend_count = task->user_stop_count; | |||
743 | basic_info->user_time.seconds | |||
744 | = task->total_user_time.seconds; | |||
745 | basic_info->user_time.microseconds | |||
746 | = task->total_user_time.microseconds; | |||
747 | basic_info->system_time.seconds | |||
748 | = task->total_system_time.seconds; | |||
749 | basic_info->system_time.microseconds | |||
750 | = task->total_system_time.microseconds; | |||
751 | basic_info->creation_time = task->creation_time; | |||
752 | task_unlock(task)((void)(&(task)->lock)); | |||
753 | ||||
754 | if (*task_info_count > TASK_BASIC_INFO_COUNT(sizeof(task_basic_info_data_t) / sizeof(natural_t))) | |||
755 | *task_info_count = TASK_BASIC_INFO_COUNT(sizeof(task_basic_info_data_t) / sizeof(natural_t)); | |||
756 | break; | |||
757 | } | |||
758 | ||||
759 | case TASK_EVENTS_INFO2: | |||
760 | { | |||
761 | task_events_info_t event_info; | |||
762 | ||||
763 | if (*task_info_count < TASK_EVENTS_INFO_COUNT(sizeof(task_events_info_data_t) / sizeof(natural_t))) { | |||
764 | return KERN_INVALID_ARGUMENT4; | |||
765 | } | |||
766 | ||||
767 | event_info = (task_events_info_t) task_info_out; | |||
768 | ||||
769 | task_lock(task); | |||
770 | event_info->faults = task->faults; | |||
771 | event_info->zero_fills = task->zero_fills; | |||
772 | event_info->reactivations = task->reactivations; | |||
773 | event_info->pageins = task->pageins; | |||
774 | event_info->cow_faults = task->cow_faults; | |||
775 | event_info->messages_sent = task->messages_sent; | |||
776 | event_info->messages_received = task->messages_received; | |||
777 | task_unlock(task)((void)(&(task)->lock)); | |||
778 | ||||
779 | *task_info_count = TASK_EVENTS_INFO_COUNT(sizeof(task_events_info_data_t) / sizeof(natural_t)); | |||
780 | break; | |||
781 | } | |||
782 | ||||
783 | case TASK_THREAD_TIMES_INFO3: | |||
784 | { | |||
785 | task_thread_times_info_t times_info; | |||
786 | thread_t thread; | |||
787 | ||||
788 | if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT(sizeof(task_thread_times_info_data_t) / sizeof(natural_t))) { | |||
789 | return KERN_INVALID_ARGUMENT4; | |||
790 | } | |||
791 | ||||
792 | times_info = (task_thread_times_info_t) task_info_out; | |||
793 | times_info->user_time.seconds = 0; | |||
794 | times_info->user_time.microseconds = 0; | |||
795 | times_info->system_time.seconds = 0; | |||
796 | times_info->system_time.microseconds = 0; | |||
797 | ||||
798 | task_lock(task); | |||
799 | queue_iterate(&task->thread_list, thread,for ((thread) = (thread_t) ((&task->thread_list)->next ); !(((&task->thread_list)) == ((queue_entry_t)(thread ))); (thread) = (thread_t) ((&(thread)->thread_list)-> next)) | |||
800 | thread_t, thread_list)for ((thread) = (thread_t) ((&task->thread_list)->next ); !(((&task->thread_list)) == ((queue_entry_t)(thread ))); (thread) = (thread_t) ((&(thread)->thread_list)-> next)) | |||
801 | { | |||
802 | time_value_t user_time, system_time; | |||
803 | spl_t s; | |||
804 | ||||
805 | s = splsched(); | |||
806 | thread_lock(thread); | |||
807 | ||||
808 | thread_read_times(thread, &user_time, &system_time); | |||
809 | ||||
810 | thread_unlock(thread)((void)(&(thread)->lock)); | |||
811 | splx(s); | |||
812 | ||||
813 | time_value_add(×_info->user_time, &user_time){ (×_info->user_time)->microseconds += (&user_time )->microseconds; (×_info->user_time)->seconds += (&user_time)->seconds; if ((×_info->user_time )->microseconds >= (1000000)) { (×_info->user_time )->microseconds -= (1000000); (×_info->user_time )->seconds++; } }; | |||
814 | time_value_add(×_info->system_time, &system_time){ (×_info->system_time)->microseconds += (& system_time)->microseconds; (×_info->system_time )->seconds += (&system_time)->seconds; if ((×_info ->system_time)->microseconds >= (1000000)) { (×_info ->system_time)->microseconds -= (1000000); (×_info ->system_time)->seconds++; } }; | |||
815 | } | |||
816 | task_unlock(task)((void)(&(task)->lock)); | |||
817 | ||||
818 | *task_info_count = TASK_THREAD_TIMES_INFO_COUNT(sizeof(task_thread_times_info_data_t) / sizeof(natural_t)); | |||
819 | break; | |||
820 | } | |||
821 | ||||
822 | default: | |||
823 | return KERN_INVALID_ARGUMENT4; | |||
824 | } | |||
825 | ||||
826 | return KERN_SUCCESS0; | |||
827 | } | |||
828 | ||||
829 | #if MACH_HOST0 | |||
830 | /* | |||
831 | * task_assign: | |||
832 | * | |||
833 | * Change the assigned processor set for the task | |||
834 | */ | |||
835 | kern_return_t | |||
836 | task_assign( | |||
837 | task_t task, | |||
838 | processor_set_t new_pset, | |||
839 | boolean_t assign_threads) | |||
840 | { | |||
841 | kern_return_t ret = KERN_SUCCESS0; | |||
842 | thread_t thread, prev_thread; | |||
843 | queue_head_t *list; | |||
844 | processor_set_t pset; | |||
845 | ||||
846 | if (task == TASK_NULL((task_t) 0) || new_pset == PROCESSOR_SET_NULL((processor_set_t) 0)) { | |||
847 | return KERN_INVALID_ARGUMENT4; | |||
848 | } | |||
849 | ||||
850 | /* | |||
851 | * Freeze task`s assignment. Prelude to assigning | |||
852 | * task. Only one freeze may be held per task. | |||
853 | */ | |||
854 | ||||
855 | task_lock(task); | |||
856 | while (task->may_assign == FALSE((boolean_t) 0)) { | |||
857 | task->assign_active = TRUE((boolean_t) 1); | |||
858 | assert_wait((event_t)&task->assign_active, TRUE((boolean_t) 1)); | |||
859 | task_unlock(task)((void)(&(task)->lock)); | |||
860 | thread_block((void (*)()) 0); | |||
861 | task_lock(task); | |||
862 | } | |||
863 | ||||
864 | /* | |||
865 | * Avoid work if task already in this processor set. | |||
866 | */ | |||
867 | if (task->processor_set == new_pset) { | |||
868 | /* | |||
869 | * No need for task->assign_active wakeup: | |||
870 | * task->may_assign is still TRUE. | |||
871 | */ | |||
872 | task_unlock(task)((void)(&(task)->lock)); | |||
873 | return KERN_SUCCESS0; | |||
874 | } | |||
875 | ||||
876 | task->may_assign = FALSE((boolean_t) 0); | |||
877 | task_unlock(task)((void)(&(task)->lock)); | |||
878 | ||||
879 | /* | |||
880 | * Safe to get the task`s pset: it cannot change while | |||
881 | * task is frozen. | |||
882 | */ | |||
883 | pset = task->processor_set; | |||
884 | ||||
885 | /* | |||
886 | * Lock both psets now. Use ordering to avoid deadlock. | |||
887 | */ | |||
888 | Restart: | |||
889 | if ((vm_offset_t) pset < (vm_offset_t) new_pset) { | |||
890 | pset_lock(pset); | |||
891 | pset_lock(new_pset); | |||
892 | } | |||
893 | else { | |||
894 | pset_lock(new_pset); | |||
895 | pset_lock(pset); | |||
896 | } | |||
897 | ||||
898 | /* | |||
899 | * Check if new_pset is ok to assign to. If not, | |||
900 | * reassign to default_pset. | |||
901 | */ | |||
902 | if (!new_pset->active) { | |||
903 | pset_unlock(pset)((void)(&(pset)->lock)); | |||
904 | pset_unlock(new_pset)((void)(&(new_pset)->lock)); | |||
905 | new_pset = &default_pset; | |||
906 | goto Restart; | |||
907 | } | |||
908 | ||||
909 | pset_reference(new_pset); | |||
910 | ||||
911 | /* | |||
912 | * Now grab the task lock and move the task. | |||
913 | */ | |||
914 | ||||
915 | task_lock(task); | |||
916 | pset_remove_task(pset, task); | |||
917 | pset_add_task(new_pset, task); | |||
918 | ||||
919 | pset_unlock(pset)((void)(&(pset)->lock)); | |||
920 | pset_unlock(new_pset)((void)(&(new_pset)->lock)); | |||
921 | ||||
922 | if (assign_threads == FALSE((boolean_t) 0)) { | |||
923 | /* | |||
924 | * We leave existing threads at their | |||
925 | * old assignments. Unfreeze task`s | |||
926 | * assignment. | |||
927 | */ | |||
928 | task->may_assign = TRUE((boolean_t) 1); | |||
929 | if (task->assign_active) { | |||
930 | task->assign_active = FALSE((boolean_t) 0); | |||
931 | thread_wakeup((event_t) &task->assign_active)thread_wakeup_prim(((event_t) &task->assign_active), ( (boolean_t) 0), 0); | |||
932 | } | |||
933 | task_unlock(task)((void)(&(task)->lock)); | |||
934 | pset_deallocate(pset); | |||
935 | return KERN_SUCCESS0; | |||
936 | } | |||
937 | ||||
938 | /* | |||
939 | * If current thread is in task, freeze its assignment. | |||
940 | */ | |||
941 | if (current_thread()(active_threads[(0)])->task == task) { | |||
942 | task_unlock(task)((void)(&(task)->lock)); | |||
943 | thread_freeze(current_thread()(active_threads[(0)])); | |||
944 | task_lock(task); | |||
945 | } | |||
946 | ||||
947 | /* | |||
948 | * Iterate down the thread list reassigning all the threads. | |||
949 | * New threads pick up task's new processor set automatically. | |||
950 | * Do current thread last because new pset may be empty. | |||
951 | */ | |||
952 | list = &task->thread_list; | |||
953 | prev_thread = THREAD_NULL((thread_t) 0); | |||
954 | queue_iterate(list, thread, thread_t, thread_list)for ((thread) = (thread_t) ((list)->next); !(((list)) == ( (queue_entry_t)(thread))); (thread) = (thread_t) ((&(thread )->thread_list)->next)) { | |||
955 | if (!(task->active)) { | |||
956 | ret = KERN_FAILURE5; | |||
957 | break; | |||
958 | } | |||
959 | if (thread != current_thread()(active_threads[(0)])) { | |||
960 | thread_reference(thread); | |||
961 | task_unlock(task)((void)(&(task)->lock)); | |||
962 | if (prev_thread != THREAD_NULL((thread_t) 0)) | |||
963 | thread_deallocate(prev_thread); /* may block */ | |||
964 | thread_assign(thread,new_pset); /* may block */ | |||
965 | prev_thread = thread; | |||
966 | task_lock(task); | |||
967 | } | |||
968 | } | |||
969 | ||||
970 | /* | |||
971 | * Done, wakeup anyone waiting for us. | |||
972 | */ | |||
973 | task->may_assign = TRUE((boolean_t) 1); | |||
974 | if (task->assign_active) { | |||
975 | task->assign_active = FALSE((boolean_t) 0); | |||
976 | thread_wakeup((event_t)&task->assign_active)thread_wakeup_prim(((event_t)&task->assign_active), (( boolean_t) 0), 0); | |||
977 | } | |||
978 | task_unlock(task)((void)(&(task)->lock)); | |||
979 | if (prev_thread != THREAD_NULL((thread_t) 0)) | |||
980 | thread_deallocate(prev_thread); /* may block */ | |||
981 | ||||
982 | /* | |||
983 | * Finish assignment of current thread. | |||
984 | */ | |||
985 | if (current_thread()(active_threads[(0)])->task == task) | |||
986 | thread_doassign(current_thread()(active_threads[(0)]), new_pset, TRUE((boolean_t) 1)); | |||
987 | ||||
988 | pset_deallocate(pset); | |||
989 | ||||
990 | return ret; | |||
991 | } | |||
992 | #else /* MACH_HOST */ | |||
993 | /* | |||
994 | * task_assign: | |||
995 | * | |||
996 | * Change the assigned processor set for the task | |||
997 | */ | |||
998 | kern_return_t | |||
999 | task_assign( | |||
1000 | task_t task, | |||
1001 | processor_set_t new_pset, | |||
1002 | boolean_t assign_threads) | |||
1003 | { | |||
1004 | return KERN_FAILURE5; | |||
1005 | } | |||
1006 | #endif /* MACH_HOST */ | |||
1007 | ||||
1008 | ||||
1009 | /* | |||
1010 | * task_assign_default: | |||
1011 | * | |||
1012 | * Version of task_assign to assign to default processor set. | |||
1013 | */ | |||
1014 | kern_return_t | |||
1015 | task_assign_default( | |||
1016 | task_t task, | |||
1017 | boolean_t assign_threads) | |||
1018 | { | |||
1019 | return task_assign(task, &default_pset, assign_threads); | |||
1020 | } | |||
1021 | ||||
1022 | /* | |||
1023 | * task_get_assignment | |||
1024 | * | |||
1025 | * Return name of processor set that task is assigned to. | |||
1026 | */ | |||
1027 | kern_return_t task_get_assignment( | |||
1028 | task_t task, | |||
1029 | processor_set_t *pset) | |||
1030 | { | |||
1031 | if (!task->active) | |||
1032 | return KERN_FAILURE5; | |||
1033 | ||||
1034 | *pset = task->processor_set; | |||
1035 | pset_reference(*pset); | |||
1036 | return KERN_SUCCESS0; | |||
1037 | } | |||
1038 | ||||
1039 | /* | |||
1040 | * task_priority | |||
1041 | * | |||
1042 | * Set priority of task; used only for newly created threads. | |||
1043 | * Optionally change priorities of threads. | |||
1044 | */ | |||
1045 | kern_return_t | |||
1046 | task_priority( | |||
1047 | task_t task, | |||
1048 | int priority, | |||
1049 | boolean_t change_threads) | |||
1050 | { | |||
1051 | kern_return_t ret = KERN_SUCCESS0; | |||
1052 | ||||
1053 | if (task == TASK_NULL((task_t) 0) || invalid_pri(priority)(((priority) < 0) || ((priority) >= 50))) | |||
1054 | return KERN_INVALID_ARGUMENT4; | |||
1055 | ||||
1056 | task_lock(task); | |||
1057 | task->priority = priority; | |||
1058 | ||||
1059 | if (change_threads) { | |||
1060 | thread_t thread; | |||
1061 | queue_head_t *list; | |||
1062 | ||||
1063 | list = &task->thread_list; | |||
1064 | queue_iterate(list, thread, thread_t, thread_list)for ((thread) = (thread_t) ((list)->next); !(((list)) == ( (queue_entry_t)(thread))); (thread) = (thread_t) ((&(thread )->thread_list)->next)) { | |||
1065 | if (thread_priority(thread, priority, FALSE((boolean_t) 0)) | |||
1066 | != KERN_SUCCESS0) | |||
1067 | ret = KERN_FAILURE5; | |||
1068 | } | |||
1069 | } | |||
1070 | ||||
1071 | task_unlock(task)((void)(&(task)->lock)); | |||
1072 | return ret; | |||
1073 | } | |||
1074 | ||||
1075 | /* | |||
1076 | * task_set_name | |||
1077 | * | |||
1078 | * Set the name of task TASK to NAME. This is a debugging aid. | |||
1079 | * NAME will be used in error messages printed by the kernel. | |||
1080 | */ | |||
1081 | kern_return_t | |||
1082 | task_set_name( | |||
1083 | task_t task, | |||
1084 | kernel_debug_name_t name) | |||
1085 | { | |||
1086 | strncpy(task->name, name, sizeof task->name - 1); | |||
1087 | task->name[sizeof task->name - 1] = '\0'; | |||
1088 | return KERN_SUCCESS0; | |||
1089 | } | |||
1090 | ||||
1091 | /* | |||
1092 | * task_collect_scan: | |||
1093 | * | |||
1094 | * Attempt to free resources owned by tasks. | |||
1095 | */ | |||
1096 | ||||
1097 | void task_collect_scan(void) | |||
1098 | { | |||
1099 | task_t task, prev_task; | |||
1100 | processor_set_t pset, prev_pset; | |||
1101 | ||||
1102 | prev_task = TASK_NULL((task_t) 0); | |||
1103 | prev_pset = PROCESSOR_SET_NULL((processor_set_t) 0); | |||
1104 | ||||
1105 | simple_lock(&all_psets_lock); | |||
1106 | queue_iterate(&all_psets, pset, processor_set_t, all_psets)for ((pset) = (processor_set_t) ((&all_psets)->next); ! (((&all_psets)) == ((queue_entry_t)(pset))); (pset) = (processor_set_t ) ((&(pset)->all_psets)->next)) { | |||
1107 | pset_lock(pset); | |||
1108 | queue_iterate(&pset->tasks, task, task_t, pset_tasks)for ((task) = (task_t) ((&pset->tasks)->next); !((( &pset->tasks)) == ((queue_entry_t)(task))); (task) = ( task_t) ((&(task)->pset_tasks)->next)) { | |||
1109 | task_reference(task); | |||
1110 | pset_reference(pset); | |||
1111 | pset_unlock(pset)((void)(&(pset)->lock)); | |||
1112 | simple_unlock(&all_psets_lock)((void)(&all_psets_lock)); | |||
1113 | ||||
1114 | machine_task_collect (task); | |||
1115 | pmap_collect(task->map->pmap); | |||
| ||||
1116 | ||||
1117 | if (prev_task != TASK_NULL((task_t) 0)) | |||
1118 | task_deallocate(prev_task); | |||
1119 | prev_task = task; | |||
1120 | ||||
1121 | if (prev_pset != PROCESSOR_SET_NULL((processor_set_t) 0)) | |||
1122 | pset_deallocate(prev_pset); | |||
1123 | prev_pset = pset; | |||
1124 | ||||
1125 | simple_lock(&all_psets_lock); | |||
1126 | pset_lock(pset); | |||
1127 | } | |||
1128 | pset_unlock(pset)((void)(&(pset)->lock)); | |||
1129 | } | |||
1130 | simple_unlock(&all_psets_lock)((void)(&all_psets_lock)); | |||
1131 | ||||
1132 | if (prev_task != TASK_NULL((task_t) 0)) | |||
1133 | task_deallocate(prev_task); | |||
1134 | if (prev_pset != PROCESSOR_SET_NULL((processor_set_t) 0)) | |||
1135 | pset_deallocate(prev_pset); | |||
1136 | } | |||
1137 | ||||
1138 | boolean_t task_collect_allowed = TRUE((boolean_t) 1); | |||
1139 | unsigned task_collect_last_tick = 0; | |||
1140 | unsigned task_collect_max_rate = 0; /* in ticks */ | |||
1141 | ||||
1142 | /* | |||
1143 | * consider_task_collect: | |||
1144 | * | |||
1145 | * Called by the pageout daemon when the system needs more free pages. | |||
1146 | */ | |||
1147 | ||||
1148 | void consider_task_collect(void) | |||
1149 | { | |||
1150 | /* | |||
1151 | * By default, don't attempt task collection more frequently | |||
1152 | * than once a second. | |||
1153 | */ | |||
1154 | ||||
1155 | if (task_collect_max_rate == 0) | |||
| ||||
1156 | task_collect_max_rate = hz; | |||
1157 | ||||
1158 | if (task_collect_allowed && | |||
1159 | (sched_tick > (task_collect_last_tick + task_collect_max_rate))) { | |||
1160 | task_collect_last_tick = sched_tick; | |||
1161 | task_collect_scan(); | |||
1162 | } | |||
1163 | } | |||
1164 | ||||
1165 | kern_return_t | |||
1166 | task_ras_control( | |||
1167 | task_t task, | |||
1168 | vm_offset_t pc, | |||
1169 | vm_offset_t endpc, | |||
1170 | int flavor) | |||
1171 | { | |||
1172 | kern_return_t ret = KERN_FAILURE5; | |||
1173 | ||||
1174 | #if FAST_TAS0 | |||
1175 | int i; | |||
1176 | ||||
1177 | ret = KERN_SUCCESS0; | |||
1178 | task_lock(task); | |||
1179 | switch (flavor) { | |||
1180 | case TASK_RAS_CONTROL_PURGE_ALL0: /* remove all RAS */ | |||
1181 | for (i = 0; i < TASK_FAST_TAS_NRAS; i++) { | |||
1182 | task->fast_tas_base[i] = task->fast_tas_end[i] = 0; | |||
1183 | } | |||
1184 | break; | |||
1185 | case TASK_RAS_CONTROL_PURGE_ONE1: /* remove this RAS, collapse remaining */ | |||
1186 | for (i = 0; i < TASK_FAST_TAS_NRAS; i++) { | |||
1187 | if ( (task->fast_tas_base[i] == pc) | |||
1188 | && (task->fast_tas_end[i] == endpc)) { | |||
1189 | while (i < TASK_FAST_TAS_NRAS-1) { | |||
1190 | task->fast_tas_base[i] = task->fast_tas_base[i+1]; | |||
1191 | task->fast_tas_end[i] = task->fast_tas_end[i+1]; | |||
1192 | i++; | |||
1193 | } | |||
1194 | task->fast_tas_base[TASK_FAST_TAS_NRAS-1] = 0; | |||
1195 | task->fast_tas_end[TASK_FAST_TAS_NRAS-1] = 0; | |||
1196 | break; | |||
1197 | } | |||
1198 | } | |||
1199 | if (i == TASK_FAST_TAS_NRAS) { | |||
1200 | ret = KERN_INVALID_ADDRESS1; | |||
1201 | } | |||
1202 | break; | |||
1203 | case TASK_RAS_CONTROL_PURGE_ALL_AND_INSTALL_ONE2: | |||
1204 | /* remove all RAS an install this RAS */ | |||
1205 | for (i = 0; i < TASK_FAST_TAS_NRAS; i++) { | |||
1206 | task->fast_tas_base[i] = task->fast_tas_end[i] = 0; | |||
1207 | } | |||
1208 | /* FALL THROUGH */ | |||
1209 | case TASK_RAS_CONTROL_INSTALL_ONE3: /* install this RAS */ | |||
1210 | for (i = 0; i < TASK_FAST_TAS_NRAS; i++) { | |||
1211 | if ( (task->fast_tas_base[i] == pc) | |||
1212 | && (task->fast_tas_end[i] == endpc)) { | |||
1213 | /* already installed */ | |||
1214 | break; | |||
1215 | } | |||
1216 | if ((task->fast_tas_base[i] == 0) && (task->fast_tas_end[i] == 0)){ | |||
1217 | task->fast_tas_base[i] = pc; | |||
1218 | task->fast_tas_end[i] = endpc; | |||
1219 | break; | |||
1220 | } | |||
1221 | } | |||
1222 | if (i == TASK_FAST_TAS_NRAS) { | |||
1223 | ret = KERN_RESOURCE_SHORTAGE6; | |||
1224 | } | |||
1225 | break; | |||
1226 | default: ret = KERN_INVALID_VALUE18; | |||
1227 | break; | |||
1228 | } | |||
1229 | task_unlock(task)((void)(&(task)->lock)); | |||
1230 | #endif /* FAST_TAS */ | |||
1231 | return ret; | |||
1232 | } |