File: | obj-scan-build/../kern/task.c |
Location: | line 580, column 3 |
Description: | Array access (from variable 'threads') results in a null pointer dereference |
1 | /* | |||
2 | * Mach Operating System | |||
3 | * Copyright (c) 1993-1988 Carnegie Mellon University | |||
4 | * All Rights Reserved. | |||
5 | * | |||
6 | * Permission to use, copy, modify and distribute this software and its | |||
7 | * documentation is hereby granted, provided that both the copyright | |||
8 | * notice and this permission notice appear in all copies of the | |||
9 | * software, derivative works or modified versions, and any portions | |||
10 | * thereof, and that both notices appear in supporting documentation. | |||
11 | * | |||
12 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |||
13 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |||
14 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |||
15 | * | |||
16 | * Carnegie Mellon requests users of this software to return to | |||
17 | * | |||
18 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |||
19 | * School of Computer Science | |||
20 | * Carnegie Mellon University | |||
21 | * Pittsburgh PA 15213-3890 | |||
22 | * | |||
23 | * any improvements or extensions that they make and grant Carnegie Mellon | |||
24 | * the rights to redistribute these changes. | |||
25 | */ | |||
26 | /* | |||
27 | * File: kern/task.c | |||
28 | * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub, | |||
29 | * David Black | |||
30 | * | |||
31 | * Task management primitives implementation. | |||
32 | */ | |||
33 | ||||
34 | #include <string.h> | |||
35 | ||||
36 | #include <mach/machine/vm_types.h> | |||
37 | #include <mach/vm_param.h> | |||
38 | #include <mach/task_info.h> | |||
39 | #include <mach/task_special_ports.h> | |||
40 | #include <mach_debug/mach_debug_types.h> | |||
41 | #include <ipc/ipc_space.h> | |||
42 | #include <ipc/ipc_types.h> | |||
43 | #include <kern/debug.h> | |||
44 | #include <kern/task.h> | |||
45 | #include <kern/thread.h> | |||
46 | #include <kern/slab.h> | |||
47 | #include <kern/kalloc.h> | |||
48 | #include <kern/processor.h> | |||
49 | #include <kern/printf.h> | |||
50 | #include <kern/sched_prim.h> /* for thread_wakeup */ | |||
51 | #include <kern/ipc_tt.h> | |||
52 | #include <kern/syscall_emulation.h> | |||
53 | #include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */ | |||
54 | #include <machine/machspl.h> /* for splsched */ | |||
55 | ||||
56 | task_t kernel_task = TASK_NULL((task_t) 0); | |||
57 | struct kmem_cache task_cache; | |||
58 | ||||
59 | void task_init(void) | |||
60 | { | |||
61 | kmem_cache_init(&task_cache, "task", sizeof(struct task), 0, | |||
62 | NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0); | |||
63 | ||||
64 | eml_init(); | |||
65 | machine_task_module_init (); | |||
66 | ||||
67 | /* | |||
68 | * Create the kernel task as the first task. | |||
69 | * Task_create must assign to kernel_task as a side effect, | |||
70 | * for other initialization. (:-() | |||
71 | */ | |||
72 | (void) task_create(TASK_NULL((task_t) 0), FALSE((boolean_t) 0), &kernel_task); | |||
73 | } | |||
74 | ||||
75 | kern_return_t task_create( | |||
76 | task_t parent_task, | |||
77 | boolean_t inherit_memory, | |||
78 | task_t *child_task) /* OUT */ | |||
79 | { | |||
80 | task_t new_task; | |||
81 | processor_set_t pset; | |||
82 | #if FAST_TAS0 | |||
83 | int i; | |||
84 | #endif | |||
85 | ||||
86 | new_task = (task_t) kmem_cache_alloc(&task_cache); | |||
87 | if (new_task == TASK_NULL((task_t) 0)) { | |||
88 | panic("task_create: no memory for task structure"); | |||
89 | } | |||
90 | ||||
91 | /* one ref for just being alive; one for our caller */ | |||
92 | new_task->ref_count = 2; | |||
93 | ||||
94 | if (child_task == &kernel_task) { | |||
95 | new_task->map = kernel_map; | |||
96 | } else if (inherit_memory) { | |||
97 | new_task->map = vm_map_fork(parent_task->map); | |||
98 | } else { | |||
99 | new_task->map = vm_map_create(pmap_create(0), | |||
100 | round_page(VM_MIN_ADDRESS)((vm_offset_t)((((vm_offset_t)((0))) + ((1 << 12)-1)) & ~((1 << 12)-1))), | |||
101 | trunc_page(VM_MAX_ADDRESS)((vm_offset_t)(((vm_offset_t)((0xc0000000UL))) & ~((1 << 12)-1))), TRUE((boolean_t) 1)); | |||
102 | } | |||
103 | ||||
104 | simple_lock_init(&new_task->lock); | |||
105 | queue_init(&new_task->thread_list)((&new_task->thread_list)->next = (&new_task-> thread_list)->prev = &new_task->thread_list); | |||
106 | new_task->suspend_count = 0; | |||
107 | new_task->active = TRUE((boolean_t) 1); | |||
108 | new_task->user_stop_count = 0; | |||
109 | new_task->thread_count = 0; | |||
110 | new_task->faults = 0; | |||
111 | new_task->zero_fills = 0; | |||
112 | new_task->reactivations = 0; | |||
113 | new_task->pageins = 0; | |||
114 | new_task->cow_faults = 0; | |||
115 | new_task->messages_sent = 0; | |||
116 | new_task->messages_received = 0; | |||
117 | ||||
118 | eml_task_reference(new_task, parent_task); | |||
119 | ||||
120 | ipc_task_init(new_task, parent_task); | |||
121 | machine_task_init (new_task); | |||
122 | ||||
123 | new_task->total_user_time.seconds = 0; | |||
124 | new_task->total_user_time.microseconds = 0; | |||
125 | new_task->total_system_time.seconds = 0; | |||
126 | new_task->total_system_time.microseconds = 0; | |||
127 | ||||
128 | record_time_stamp (&new_task->creation_time); | |||
129 | ||||
130 | if (parent_task != TASK_NULL((task_t) 0)) { | |||
131 | task_lock(parent_task); | |||
132 | pset = parent_task->processor_set; | |||
133 | if (!pset->active) | |||
134 | pset = &default_pset; | |||
135 | pset_reference(pset); | |||
136 | new_task->priority = parent_task->priority; | |||
137 | task_unlock(parent_task)((void)(&(parent_task)->lock)); | |||
138 | } | |||
139 | else { | |||
140 | pset = &default_pset; | |||
141 | pset_reference(pset); | |||
142 | new_task->priority = BASEPRI_USER25; | |||
143 | } | |||
144 | pset_lock(pset); | |||
145 | pset_add_task(pset, new_task); | |||
146 | pset_unlock(pset)((void)(&(pset)->lock)); | |||
147 | ||||
148 | new_task->may_assign = TRUE((boolean_t) 1); | |||
149 | new_task->assign_active = FALSE((boolean_t) 0); | |||
150 | ||||
151 | #if MACH_PCSAMPLE1 | |||
152 | new_task->pc_sample.buffer = 0; | |||
153 | new_task->pc_sample.seqno = 0; | |||
154 | new_task->pc_sample.sampletypes = 0; | |||
155 | #endif /* MACH_PCSAMPLE */ | |||
156 | ||||
157 | #if FAST_TAS0 | |||
158 | for (i = 0; i < TASK_FAST_TAS_NRAS; i++) { | |||
159 | if (inherit_memory) { | |||
160 | new_task->fast_tas_base[i] = parent_task->fast_tas_base[i]; | |||
161 | new_task->fast_tas_end[i] = parent_task->fast_tas_end[i]; | |||
162 | } else { | |||
163 | new_task->fast_tas_base[i] = (vm_offset_t)0; | |||
164 | new_task->fast_tas_end[i] = (vm_offset_t)0; | |||
165 | } | |||
166 | } | |||
167 | #endif /* FAST_TAS */ | |||
168 | ||||
169 | snprintf (new_task->name, sizeof new_task->name, "%p", new_task); | |||
170 | ||||
171 | ipc_task_enable(new_task); | |||
172 | ||||
173 | *child_task = new_task; | |||
174 | return KERN_SUCCESS0; | |||
175 | } | |||
176 | ||||
177 | /* | |||
178 | * task_deallocate: | |||
179 | * | |||
180 | * Give up a reference to the specified task and destroy it if there | |||
181 | * are no other references left. It is assumed that the current thread | |||
182 | * is never in this task. | |||
183 | */ | |||
184 | void task_deallocate( | |||
185 | task_t task) | |||
186 | { | |||
187 | int c; | |||
188 | processor_set_t pset; | |||
189 | ||||
190 | if (task == TASK_NULL((task_t) 0)) | |||
191 | return; | |||
192 | ||||
193 | task_lock(task); | |||
194 | c = --(task->ref_count); | |||
195 | task_unlock(task)((void)(&(task)->lock)); | |||
196 | if (c != 0) | |||
197 | return; | |||
198 | ||||
199 | machine_task_terminate (task); | |||
200 | ||||
201 | eml_task_deallocate(task); | |||
202 | ||||
203 | pset = task->processor_set; | |||
204 | pset_lock(pset); | |||
205 | pset_remove_task(pset,task); | |||
206 | pset_unlock(pset)((void)(&(pset)->lock)); | |||
207 | pset_deallocate(pset); | |||
208 | vm_map_deallocate(task->map); | |||
209 | is_release(task->itk_space)ipc_space_release(task->itk_space); | |||
210 | kmem_cache_free(&task_cache, (vm_offset_t) task); | |||
211 | } | |||
212 | ||||
213 | void task_reference( | |||
214 | task_t task) | |||
215 | { | |||
216 | if (task == TASK_NULL((task_t) 0)) | |||
217 | return; | |||
218 | ||||
219 | task_lock(task); | |||
220 | task->ref_count++; | |||
221 | task_unlock(task)((void)(&(task)->lock)); | |||
222 | } | |||
223 | ||||
224 | /* | |||
225 | * task_terminate: | |||
226 | * | |||
227 | * Terminate the specified task. See comments on thread_terminate | |||
228 | * (kern/thread.c) about problems with terminating the "current task." | |||
229 | */ | |||
230 | kern_return_t task_terminate( | |||
231 | task_t task) | |||
232 | { | |||
233 | thread_t thread, cur_thread; | |||
234 | queue_head_t *list; | |||
235 | task_t cur_task; | |||
236 | spl_t s; | |||
237 | ||||
238 | if (task == TASK_NULL((task_t) 0)) | |||
239 | return KERN_INVALID_ARGUMENT4; | |||
240 | ||||
241 | list = &task->thread_list; | |||
242 | cur_task = current_task()((active_threads[(0)])->task); | |||
243 | cur_thread = current_thread()(active_threads[(0)]); | |||
244 | ||||
245 | /* | |||
246 | * Deactivate task so that it can't be terminated again, | |||
247 | * and so lengthy operations in progress will abort. | |||
248 | * | |||
249 | * If the current thread is in this task, remove it from | |||
250 | * the task's thread list to keep the thread-termination | |||
251 | * loop simple. | |||
252 | */ | |||
253 | if (task == cur_task) { | |||
254 | task_lock(task); | |||
255 | if (!task->active) { | |||
256 | /* | |||
257 | * Task is already being terminated. | |||
258 | */ | |||
259 | task_unlock(task)((void)(&(task)->lock)); | |||
260 | return KERN_FAILURE5; | |||
261 | } | |||
262 | /* | |||
263 | * Make sure current thread is not being terminated. | |||
264 | */ | |||
265 | s = splsched(); | |||
266 | thread_lock(cur_thread); | |||
267 | if (!cur_thread->active) { | |||
268 | thread_unlock(cur_thread)((void)(&(cur_thread)->lock)); | |||
269 | (void) splx(s); | |||
270 | task_unlock(task)((void)(&(task)->lock)); | |||
271 | thread_terminate(cur_thread); | |||
272 | return KERN_FAILURE5; | |||
273 | } | |||
274 | task->active = FALSE((boolean_t) 0); | |||
275 | queue_remove(list, cur_thread, thread_t, thread_list){ queue_entry_t next, prev; next = (cur_thread)->thread_list .next; prev = (cur_thread)->thread_list.prev; if ((list) == next) (list)->prev = prev; else ((thread_t)next)->thread_list .prev = prev; if ((list) == prev) (list)->next = next; else ((thread_t)prev)->thread_list.next = next; }; | |||
276 | thread_unlock(cur_thread)((void)(&(cur_thread)->lock)); | |||
277 | (void) splx(s); | |||
278 | task_unlock(task)((void)(&(task)->lock)); | |||
279 | ||||
280 | /* | |||
281 | * Shut down this thread's ipc now because it must | |||
282 | * be left alone to terminate the task. | |||
283 | */ | |||
284 | ipc_thread_disable(cur_thread); | |||
285 | ipc_thread_terminate(cur_thread); | |||
286 | } | |||
287 | else { | |||
288 | /* | |||
289 | * Lock both current and victim task to check for | |||
290 | * potential deadlock. | |||
291 | */ | |||
292 | if ((vm_offset_t)task < (vm_offset_t)cur_task) { | |||
293 | task_lock(task); | |||
294 | task_lock(cur_task); | |||
295 | } | |||
296 | else { | |||
297 | task_lock(cur_task); | |||
298 | task_lock(task); | |||
299 | } | |||
300 | /* | |||
301 | * Check if current thread or task is being terminated. | |||
302 | */ | |||
303 | s = splsched(); | |||
304 | thread_lock(cur_thread); | |||
305 | if ((!cur_task->active) ||(!cur_thread->active)) { | |||
306 | /* | |||
307 | * Current task or thread is being terminated. | |||
308 | */ | |||
309 | thread_unlock(cur_thread)((void)(&(cur_thread)->lock)); | |||
310 | (void) splx(s); | |||
311 | task_unlock(task)((void)(&(task)->lock)); | |||
312 | task_unlock(cur_task)((void)(&(cur_task)->lock)); | |||
313 | thread_terminate(cur_thread); | |||
314 | return KERN_FAILURE5; | |||
315 | } | |||
316 | thread_unlock(cur_thread)((void)(&(cur_thread)->lock)); | |||
317 | (void) splx(s); | |||
318 | task_unlock(cur_task)((void)(&(cur_task)->lock)); | |||
319 | ||||
320 | if (!task->active) { | |||
321 | /* | |||
322 | * Task is already being terminated. | |||
323 | */ | |||
324 | task_unlock(task)((void)(&(task)->lock)); | |||
325 | return KERN_FAILURE5; | |||
326 | } | |||
327 | task->active = FALSE((boolean_t) 0); | |||
328 | task_unlock(task)((void)(&(task)->lock)); | |||
329 | } | |||
330 | ||||
331 | /* | |||
332 | * Prevent further execution of the task. ipc_task_disable | |||
333 | * prevents further task operations via the task port. | |||
334 | * If this is the current task, the current thread will | |||
335 | * be left running. | |||
336 | */ | |||
337 | ipc_task_disable(task); | |||
338 | (void) task_hold(task); | |||
339 | (void) task_dowait(task,TRUE((boolean_t) 1)); /* may block */ | |||
340 | ||||
341 | /* | |||
342 | * Terminate each thread in the task. | |||
343 | * | |||
344 | * The task_port is closed down, so no more thread_create | |||
345 | * operations can be done. Thread_force_terminate closes the | |||
346 | * thread port for each thread; when that is done, the | |||
347 | * thread will eventually disappear. Thus the loop will | |||
348 | * terminate. Call thread_force_terminate instead of | |||
349 | * thread_terminate to avoid deadlock checks. Need | |||
350 | * to call thread_block() inside loop because some other | |||
351 | * thread (e.g., the reaper) may have to run to get rid | |||
352 | * of all references to the thread; it won't vanish from | |||
353 | * the task's thread list until the last one is gone. | |||
354 | */ | |||
355 | task_lock(task); | |||
356 | while (!queue_empty(list)(((list)) == (((list)->next)))) { | |||
357 | thread = (thread_t) queue_first(list)((list)->next); | |||
358 | thread_reference(thread); | |||
359 | task_unlock(task)((void)(&(task)->lock)); | |||
360 | thread_force_terminate(thread); | |||
361 | thread_deallocate(thread); | |||
362 | thread_block((void (*)()) 0); | |||
363 | task_lock(task); | |||
364 | } | |||
365 | task_unlock(task)((void)(&(task)->lock)); | |||
366 | ||||
367 | /* | |||
368 | * Shut down IPC. | |||
369 | */ | |||
370 | ipc_task_terminate(task); | |||
371 | ||||
372 | ||||
373 | /* | |||
374 | * Deallocate the task's reference to itself. | |||
375 | */ | |||
376 | task_deallocate(task); | |||
377 | ||||
378 | /* | |||
379 | * If the current thread is in this task, it has not yet | |||
380 | * been terminated (since it was removed from the task's | |||
381 | * thread-list). Put it back in the thread list (for | |||
382 | * completeness), and terminate it. Since it holds the | |||
383 | * last reference to the task, terminating it will deallocate | |||
384 | * the task. | |||
385 | */ | |||
386 | if (cur_thread->task == task) { | |||
387 | task_lock(task); | |||
388 | s = splsched(); | |||
389 | queue_enter(list, cur_thread, thread_t, thread_list){ queue_entry_t prev; prev = (list)->prev; if ((list) == prev ) { (list)->next = (queue_entry_t) (cur_thread); } else { ( (thread_t)prev)->thread_list.next = (queue_entry_t)(cur_thread ); } (cur_thread)->thread_list.prev = prev; (cur_thread)-> thread_list.next = list; (list)->prev = (queue_entry_t) cur_thread ; }; | |||
390 | (void) splx(s); | |||
391 | task_unlock(task)((void)(&(task)->lock)); | |||
392 | (void) thread_terminate(cur_thread); | |||
393 | } | |||
394 | ||||
395 | return KERN_SUCCESS0; | |||
396 | } | |||
397 | ||||
398 | /* | |||
399 | * task_hold: | |||
400 | * | |||
401 | * Suspend execution of the specified task. | |||
402 | * This is a recursive-style suspension of the task, a count of | |||
403 | * suspends is maintained. | |||
404 | */ | |||
405 | kern_return_t task_hold( | |||
406 | task_t task) | |||
407 | { | |||
408 | queue_head_t *list; | |||
409 | thread_t thread, cur_thread; | |||
410 | ||||
411 | cur_thread = current_thread()(active_threads[(0)]); | |||
412 | ||||
413 | task_lock(task); | |||
414 | if (!task->active) { | |||
415 | task_unlock(task)((void)(&(task)->lock)); | |||
416 | return KERN_FAILURE5; | |||
417 | } | |||
418 | ||||
419 | task->suspend_count++; | |||
420 | ||||
421 | /* | |||
422 | * Iterate through all the threads and hold them. | |||
423 | * Do not hold the current thread if it is within the | |||
424 | * task. | |||
425 | */ | |||
426 | list = &task->thread_list; | |||
427 | queue_iterate(list, thread, thread_t, thread_list)for ((thread) = (thread_t) ((list)->next); !(((list)) == ( (queue_entry_t)(thread))); (thread) = (thread_t) ((&(thread )->thread_list)->next)) { | |||
428 | if (thread != cur_thread) | |||
429 | thread_hold(thread); | |||
430 | } | |||
431 | task_unlock(task)((void)(&(task)->lock)); | |||
432 | return KERN_SUCCESS0; | |||
433 | } | |||
434 | ||||
435 | /* | |||
436 | * task_dowait: | |||
437 | * | |||
438 | * Wait until the task has really been suspended (all of the threads | |||
439 | * are stopped). Skip the current thread if it is within the task. | |||
440 | * | |||
441 | * If task is deactivated while waiting, return a failure code unless | |||
442 | * must_wait is true. | |||
443 | */ | |||
444 | kern_return_t task_dowait( | |||
445 | task_t task, | |||
446 | boolean_t must_wait) | |||
447 | { | |||
448 | queue_head_t *list; | |||
449 | thread_t thread, cur_thread, prev_thread; | |||
450 | kern_return_t ret = KERN_SUCCESS0; | |||
451 | ||||
452 | /* | |||
453 | * Iterate through all the threads. | |||
454 | * While waiting for each thread, we gain a reference to it | |||
455 | * to prevent it from going away on us. This guarantees | |||
456 | * that the "next" thread in the list will be a valid thread. | |||
457 | * | |||
458 | * We depend on the fact that if threads are created while | |||
459 | * we are looping through the threads, they will be held | |||
460 | * automatically. We don't care about threads that get | |||
461 | * deallocated along the way (the reference prevents it | |||
462 | * from happening to the thread we are working with). | |||
463 | * | |||
464 | * If the current thread is in the affected task, it is skipped. | |||
465 | * | |||
466 | * If the task is deactivated before we're done, and we don't | |||
467 | * have to wait for it (must_wait is FALSE), just bail out. | |||
468 | */ | |||
469 | cur_thread = current_thread()(active_threads[(0)]); | |||
470 | ||||
471 | list = &task->thread_list; | |||
472 | prev_thread = THREAD_NULL((thread_t) 0); | |||
473 | task_lock(task); | |||
474 | queue_iterate(list, thread, thread_t, thread_list)for ((thread) = (thread_t) ((list)->next); !(((list)) == ( (queue_entry_t)(thread))); (thread) = (thread_t) ((&(thread )->thread_list)->next)) { | |||
475 | if (!(task->active) && !(must_wait)) { | |||
476 | ret = KERN_FAILURE5; | |||
477 | break; | |||
478 | } | |||
479 | if (thread != cur_thread) { | |||
480 | thread_reference(thread); | |||
481 | task_unlock(task)((void)(&(task)->lock)); | |||
482 | if (prev_thread != THREAD_NULL((thread_t) 0)) | |||
483 | thread_deallocate(prev_thread); | |||
484 | /* may block */ | |||
485 | (void) thread_dowait(thread, TRUE((boolean_t) 1)); /* may block */ | |||
486 | prev_thread = thread; | |||
487 | task_lock(task); | |||
488 | } | |||
489 | } | |||
490 | task_unlock(task)((void)(&(task)->lock)); | |||
491 | if (prev_thread != THREAD_NULL((thread_t) 0)) | |||
492 | thread_deallocate(prev_thread); /* may block */ | |||
493 | return ret; | |||
494 | } | |||
495 | ||||
496 | kern_return_t task_release( | |||
497 | task_t task) | |||
498 | { | |||
499 | queue_head_t *list; | |||
500 | thread_t thread, next; | |||
501 | ||||
502 | task_lock(task); | |||
503 | if (!task->active) { | |||
504 | task_unlock(task)((void)(&(task)->lock)); | |||
505 | return KERN_FAILURE5; | |||
506 | } | |||
507 | ||||
508 | task->suspend_count--; | |||
509 | ||||
510 | /* | |||
511 | * Iterate through all the threads and release them | |||
512 | */ | |||
513 | list = &task->thread_list; | |||
514 | thread = (thread_t) queue_first(list)((list)->next); | |||
515 | while (!queue_end(list, (queue_entry_t) thread)((list) == ((queue_entry_t) thread))) { | |||
516 | next = (thread_t) queue_next(&thread->thread_list)((&thread->thread_list)->next); | |||
517 | thread_release(thread); | |||
518 | thread = next; | |||
519 | } | |||
520 | task_unlock(task)((void)(&(task)->lock)); | |||
521 | return KERN_SUCCESS0; | |||
522 | } | |||
523 | ||||
524 | kern_return_t task_threads( | |||
525 | task_t task, | |||
526 | thread_array_t *thread_list, | |||
527 | natural_t *count) | |||
528 | { | |||
529 | unsigned int actual; /* this many threads */ | |||
530 | thread_t thread; | |||
531 | thread_t *threads; | |||
532 | int i; | |||
533 | ||||
534 | vm_size_t size, size_needed; | |||
535 | vm_offset_t addr; | |||
536 | ||||
537 | if (task == TASK_NULL((task_t) 0)) | |||
| ||||
538 | return KERN_INVALID_ARGUMENT4; | |||
539 | ||||
540 | size = 0; addr = 0; | |||
541 | ||||
542 | for (;;) { | |||
543 | task_lock(task); | |||
544 | if (!task->active) { | |||
545 | task_unlock(task)((void)(&(task)->lock)); | |||
546 | return KERN_FAILURE5; | |||
547 | } | |||
548 | ||||
549 | actual = task->thread_count; | |||
550 | ||||
551 | /* do we have the memory we need? */ | |||
552 | ||||
553 | size_needed = actual * sizeof(mach_port_t); | |||
554 | if (size_needed <= size) | |||
555 | break; | |||
556 | ||||
557 | /* unlock the task and allocate more memory */ | |||
558 | task_unlock(task)((void)(&(task)->lock)); | |||
559 | ||||
560 | if (size != 0) | |||
561 | kfree(addr, size); | |||
562 | ||||
563 | assert(size_needed > 0)({ if (!(size_needed > 0)) Assert("size_needed > 0", "../kern/task.c" , 563); }); | |||
564 | size = size_needed; | |||
565 | ||||
566 | addr = kalloc(size); | |||
567 | if (addr == 0) | |||
568 | return KERN_RESOURCE_SHORTAGE6; | |||
569 | } | |||
570 | ||||
571 | /* OK, have memory and the task is locked & active */ | |||
572 | ||||
573 | threads = (thread_t *) addr; | |||
574 | ||||
575 | for (i = 0, thread = (thread_t) queue_first(&task->thread_list)((&task->thread_list)->next); | |||
576 | i < actual; | |||
577 | i++, thread = (thread_t) queue_next(&thread->thread_list)((&thread->thread_list)->next)) { | |||
578 | /* take ref for convert_thread_to_port */ | |||
579 | thread_reference(thread); | |||
580 | threads[i] = thread; | |||
| ||||
581 | } | |||
582 | assert(queue_end(&task->thread_list, (queue_entry_t) thread))({ if (!(((&task->thread_list) == ((queue_entry_t) thread )))) Assert("queue_end(&task->thread_list, (queue_entry_t) thread)" , "../kern/task.c", 582); }); | |||
583 | ||||
584 | /* can unlock task now that we've got the thread refs */ | |||
585 | task_unlock(task)((void)(&(task)->lock)); | |||
586 | ||||
587 | if (actual == 0) { | |||
588 | /* no threads, so return null pointer and deallocate memory */ | |||
589 | ||||
590 | *thread_list = 0; | |||
591 | *count = 0; | |||
592 | ||||
593 | if (size != 0) | |||
594 | kfree(addr, size); | |||
595 | } else { | |||
596 | /* if we allocated too much, must copy */ | |||
597 | ||||
598 | if (size_needed < size) { | |||
599 | vm_offset_t newaddr; | |||
600 | ||||
601 | newaddr = kalloc(size_needed); | |||
602 | if (newaddr == 0) { | |||
603 | for (i = 0; i < actual; i++) | |||
604 | thread_deallocate(threads[i]); | |||
605 | kfree(addr, size); | |||
606 | return KERN_RESOURCE_SHORTAGE6; | |||
607 | } | |||
608 | ||||
609 | memcpy((void *) newaddr, (void *) addr, size_needed); | |||
610 | kfree(addr, size); | |||
611 | threads = (thread_t *) newaddr; | |||
612 | } | |||
613 | ||||
614 | *thread_list = (mach_port_t *) threads; | |||
615 | *count = actual; | |||
616 | ||||
617 | /* do the conversion that Mig should handle */ | |||
618 | ||||
619 | for (i = 0; i < actual; i++) | |||
620 | ((ipc_port_t *) threads)[i] = | |||
621 | convert_thread_to_port(threads[i]); | |||
622 | } | |||
623 | ||||
624 | return KERN_SUCCESS0; | |||
625 | } | |||
626 | ||||
627 | kern_return_t task_suspend( | |||
628 | task_t task) | |||
629 | { | |||
630 | boolean_t hold; | |||
631 | ||||
632 | if (task == TASK_NULL((task_t) 0)) | |||
633 | return KERN_INVALID_ARGUMENT4; | |||
634 | ||||
635 | hold = FALSE((boolean_t) 0); | |||
636 | task_lock(task); | |||
637 | if ((task->user_stop_count)++ == 0) | |||
638 | hold = TRUE((boolean_t) 1); | |||
639 | task_unlock(task)((void)(&(task)->lock)); | |||
640 | ||||
641 | /* | |||
642 | * If the stop count was positive, the task is | |||
643 | * already stopped and we can exit. | |||
644 | */ | |||
645 | if (!hold) { | |||
646 | return KERN_SUCCESS0; | |||
647 | } | |||
648 | ||||
649 | /* | |||
650 | * Hold all of the threads in the task, and wait for | |||
651 | * them to stop. If the current thread is within | |||
652 | * this task, hold it separately so that all of the | |||
653 | * other threads can stop first. | |||
654 | */ | |||
655 | ||||
656 | if (task_hold(task) != KERN_SUCCESS0) | |||
657 | return KERN_FAILURE5; | |||
658 | ||||
659 | if (task_dowait(task, FALSE((boolean_t) 0)) != KERN_SUCCESS0) | |||
660 | return KERN_FAILURE5; | |||
661 | ||||
662 | if (current_task()((active_threads[(0)])->task) == task) { | |||
663 | spl_t s; | |||
664 | ||||
665 | thread_hold(current_thread()(active_threads[(0)])); | |||
666 | /* | |||
667 | * We want to call thread_block on our way out, | |||
668 | * to stop running. | |||
669 | */ | |||
670 | s = splsched(); | |||
671 | ast_on(cpu_number(), AST_BLOCK)({ if ((need_ast[(0)] |= (0x4)) != 0x0) { ; } }); | |||
672 | (void) splx(s); | |||
673 | } | |||
674 | ||||
675 | return KERN_SUCCESS0; | |||
676 | } | |||
677 | ||||
678 | kern_return_t task_resume( | |||
679 | task_t task) | |||
680 | { | |||
681 | boolean_t release; | |||
682 | ||||
683 | if (task == TASK_NULL((task_t) 0)) | |||
684 | return KERN_INVALID_ARGUMENT4; | |||
685 | ||||
686 | release = FALSE((boolean_t) 0); | |||
687 | task_lock(task); | |||
688 | if (task->user_stop_count > 0) { | |||
689 | if (--(task->user_stop_count) == 0) | |||
690 | release = TRUE((boolean_t) 1); | |||
691 | } | |||
692 | else { | |||
693 | task_unlock(task)((void)(&(task)->lock)); | |||
694 | return KERN_FAILURE5; | |||
695 | } | |||
696 | task_unlock(task)((void)(&(task)->lock)); | |||
697 | ||||
698 | /* | |||
699 | * Release the task if necessary. | |||
700 | */ | |||
701 | if (release) | |||
702 | return task_release(task); | |||
703 | ||||
704 | return KERN_SUCCESS0; | |||
705 | } | |||
706 | ||||
707 | kern_return_t task_info( | |||
708 | task_t task, | |||
709 | int flavor, | |||
710 | task_info_t task_info_out, /* pointer to OUT array */ | |||
711 | natural_t *task_info_count) /* IN/OUT */ | |||
712 | { | |||
713 | vm_map_t map; | |||
714 | ||||
715 | if (task == TASK_NULL((task_t) 0)) | |||
716 | return KERN_INVALID_ARGUMENT4; | |||
717 | ||||
718 | switch (flavor) { | |||
719 | case TASK_BASIC_INFO1: | |||
720 | { | |||
721 | task_basic_info_t basic_info; | |||
722 | ||||
723 | /* Allow *task_info_count to be two words smaller than | |||
724 | the usual amount, because creation_time is a new member | |||
725 | that some callers might not know about. */ | |||
726 | ||||
727 | if (*task_info_count < TASK_BASIC_INFO_COUNT(sizeof(task_basic_info_data_t) / sizeof(natural_t)) - 2) { | |||
728 | return KERN_INVALID_ARGUMENT4; | |||
729 | } | |||
730 | ||||
731 | basic_info = (task_basic_info_t) task_info_out; | |||
732 | ||||
733 | map = (task == kernel_task) ? kernel_map : task->map; | |||
734 | ||||
735 | basic_info->virtual_size = map->size; | |||
736 | basic_info->resident_size = pmap_resident_count(map->pmap)((map->pmap)->stats.resident_count) | |||
737 | * PAGE_SIZE(1 << 12); | |||
738 | ||||
739 | task_lock(task); | |||
740 | basic_info->base_priority = task->priority; | |||
741 | basic_info->suspend_count = task->user_stop_count; | |||
742 | basic_info->user_time.seconds | |||
743 | = task->total_user_time.seconds; | |||
744 | basic_info->user_time.microseconds | |||
745 | = task->total_user_time.microseconds; | |||
746 | basic_info->system_time.seconds | |||
747 | = task->total_system_time.seconds; | |||
748 | basic_info->system_time.microseconds | |||
749 | = task->total_system_time.microseconds; | |||
750 | basic_info->creation_time = task->creation_time; | |||
751 | task_unlock(task)((void)(&(task)->lock)); | |||
752 | ||||
753 | if (*task_info_count > TASK_BASIC_INFO_COUNT(sizeof(task_basic_info_data_t) / sizeof(natural_t))) | |||
754 | *task_info_count = TASK_BASIC_INFO_COUNT(sizeof(task_basic_info_data_t) / sizeof(natural_t)); | |||
755 | break; | |||
756 | } | |||
757 | ||||
758 | case TASK_EVENTS_INFO2: | |||
759 | { | |||
760 | task_events_info_t event_info; | |||
761 | ||||
762 | if (*task_info_count < TASK_EVENTS_INFO_COUNT(sizeof(task_events_info_data_t) / sizeof(natural_t))) { | |||
763 | return KERN_INVALID_ARGUMENT4; | |||
764 | } | |||
765 | ||||
766 | event_info = (task_events_info_t) task_info_out; | |||
767 | ||||
768 | task_lock(task); | |||
769 | event_info->faults = task->faults; | |||
770 | event_info->zero_fills = task->zero_fills; | |||
771 | event_info->reactivations = task->reactivations; | |||
772 | event_info->pageins = task->pageins; | |||
773 | event_info->cow_faults = task->cow_faults; | |||
774 | event_info->messages_sent = task->messages_sent; | |||
775 | event_info->messages_received = task->messages_received; | |||
776 | task_unlock(task)((void)(&(task)->lock)); | |||
777 | ||||
778 | *task_info_count = TASK_EVENTS_INFO_COUNT(sizeof(task_events_info_data_t) / sizeof(natural_t)); | |||
779 | break; | |||
780 | } | |||
781 | ||||
782 | case TASK_THREAD_TIMES_INFO3: | |||
783 | { | |||
784 | task_thread_times_info_t times_info; | |||
785 | thread_t thread; | |||
786 | ||||
787 | if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT(sizeof(task_thread_times_info_data_t) / sizeof(natural_t))) { | |||
788 | return KERN_INVALID_ARGUMENT4; | |||
789 | } | |||
790 | ||||
791 | times_info = (task_thread_times_info_t) task_info_out; | |||
792 | times_info->user_time.seconds = 0; | |||
793 | times_info->user_time.microseconds = 0; | |||
794 | times_info->system_time.seconds = 0; | |||
795 | times_info->system_time.microseconds = 0; | |||
796 | ||||
797 | task_lock(task); | |||
798 | queue_iterate(&task->thread_list, thread,for ((thread) = (thread_t) ((&task->thread_list)->next ); !(((&task->thread_list)) == ((queue_entry_t)(thread ))); (thread) = (thread_t) ((&(thread)->thread_list)-> next)) | |||
799 | thread_t, thread_list)for ((thread) = (thread_t) ((&task->thread_list)->next ); !(((&task->thread_list)) == ((queue_entry_t)(thread ))); (thread) = (thread_t) ((&(thread)->thread_list)-> next)) | |||
800 | { | |||
801 | time_value_t user_time, system_time; | |||
802 | spl_t s; | |||
803 | ||||
804 | s = splsched(); | |||
805 | thread_lock(thread); | |||
806 | ||||
807 | thread_read_times(thread, &user_time, &system_time); | |||
808 | ||||
809 | thread_unlock(thread)((void)(&(thread)->lock)); | |||
810 | splx(s); | |||
811 | ||||
812 | time_value_add(×_info->user_time, &user_time){ (×_info->user_time)->microseconds += (&user_time )->microseconds; (×_info->user_time)->seconds += (&user_time)->seconds; if ((×_info->user_time )->microseconds >= (1000000)) { (×_info->user_time )->microseconds -= (1000000); (×_info->user_time )->seconds++; } }; | |||
813 | time_value_add(×_info->system_time, &system_time){ (×_info->system_time)->microseconds += (& system_time)->microseconds; (×_info->system_time )->seconds += (&system_time)->seconds; if ((×_info ->system_time)->microseconds >= (1000000)) { (×_info ->system_time)->microseconds -= (1000000); (×_info ->system_time)->seconds++; } }; | |||
814 | } | |||
815 | task_unlock(task)((void)(&(task)->lock)); | |||
816 | ||||
817 | *task_info_count = TASK_THREAD_TIMES_INFO_COUNT(sizeof(task_thread_times_info_data_t) / sizeof(natural_t)); | |||
818 | break; | |||
819 | } | |||
820 | ||||
821 | default: | |||
822 | return KERN_INVALID_ARGUMENT4; | |||
823 | } | |||
824 | ||||
825 | return KERN_SUCCESS0; | |||
826 | } | |||
827 | ||||
828 | #if MACH_HOST0 | |||
829 | /* | |||
830 | * task_assign: | |||
831 | * | |||
832 | * Change the assigned processor set for the task | |||
833 | */ | |||
834 | kern_return_t | |||
835 | task_assign( | |||
836 | task_t task, | |||
837 | processor_set_t new_pset, | |||
838 | boolean_t assign_threads) | |||
839 | { | |||
840 | kern_return_t ret = KERN_SUCCESS0; | |||
841 | thread_t thread, prev_thread; | |||
842 | queue_head_t *list; | |||
843 | processor_set_t pset; | |||
844 | ||||
845 | if (task == TASK_NULL((task_t) 0) || new_pset == PROCESSOR_SET_NULL((processor_set_t) 0)) { | |||
846 | return KERN_INVALID_ARGUMENT4; | |||
847 | } | |||
848 | ||||
849 | /* | |||
850 | * Freeze task`s assignment. Prelude to assigning | |||
851 | * task. Only one freeze may be held per task. | |||
852 | */ | |||
853 | ||||
854 | task_lock(task); | |||
855 | while (task->may_assign == FALSE((boolean_t) 0)) { | |||
856 | task->assign_active = TRUE((boolean_t) 1); | |||
857 | assert_wait((event_t)&task->assign_active, TRUE((boolean_t) 1)); | |||
858 | task_unlock(task)((void)(&(task)->lock)); | |||
859 | thread_block((void (*)()) 0); | |||
860 | task_lock(task); | |||
861 | } | |||
862 | ||||
863 | /* | |||
864 | * Avoid work if task already in this processor set. | |||
865 | */ | |||
866 | if (task->processor_set == new_pset) { | |||
867 | /* | |||
868 | * No need for task->assign_active wakeup: | |||
869 | * task->may_assign is still TRUE. | |||
870 | */ | |||
871 | task_unlock(task)((void)(&(task)->lock)); | |||
872 | return KERN_SUCCESS0; | |||
873 | } | |||
874 | ||||
875 | task->may_assign = FALSE((boolean_t) 0); | |||
876 | task_unlock(task)((void)(&(task)->lock)); | |||
877 | ||||
878 | /* | |||
879 | * Safe to get the task`s pset: it cannot change while | |||
880 | * task is frozen. | |||
881 | */ | |||
882 | pset = task->processor_set; | |||
883 | ||||
884 | /* | |||
885 | * Lock both psets now. Use ordering to avoid deadlock. | |||
886 | */ | |||
887 | Restart: | |||
888 | if ((vm_offset_t) pset < (vm_offset_t) new_pset) { | |||
889 | pset_lock(pset); | |||
890 | pset_lock(new_pset); | |||
891 | } | |||
892 | else { | |||
893 | pset_lock(new_pset); | |||
894 | pset_lock(pset); | |||
895 | } | |||
896 | ||||
897 | /* | |||
898 | * Check if new_pset is ok to assign to. If not, | |||
899 | * reassign to default_pset. | |||
900 | */ | |||
901 | if (!new_pset->active) { | |||
902 | pset_unlock(pset)((void)(&(pset)->lock)); | |||
903 | pset_unlock(new_pset)((void)(&(new_pset)->lock)); | |||
904 | new_pset = &default_pset; | |||
905 | goto Restart; | |||
906 | } | |||
907 | ||||
908 | pset_reference(new_pset); | |||
909 | ||||
910 | /* | |||
911 | * Now grab the task lock and move the task. | |||
912 | */ | |||
913 | ||||
914 | task_lock(task); | |||
915 | pset_remove_task(pset, task); | |||
916 | pset_add_task(new_pset, task); | |||
917 | ||||
918 | pset_unlock(pset)((void)(&(pset)->lock)); | |||
919 | pset_unlock(new_pset)((void)(&(new_pset)->lock)); | |||
920 | ||||
921 | if (assign_threads == FALSE((boolean_t) 0)) { | |||
922 | /* | |||
923 | * We leave existing threads at their | |||
924 | * old assignments. Unfreeze task`s | |||
925 | * assignment. | |||
926 | */ | |||
927 | task->may_assign = TRUE((boolean_t) 1); | |||
928 | if (task->assign_active) { | |||
929 | task->assign_active = FALSE((boolean_t) 0); | |||
930 | thread_wakeup((event_t) &task->assign_active)thread_wakeup_prim(((event_t) &task->assign_active), ( (boolean_t) 0), 0); | |||
931 | } | |||
932 | task_unlock(task)((void)(&(task)->lock)); | |||
933 | pset_deallocate(pset); | |||
934 | return KERN_SUCCESS0; | |||
935 | } | |||
936 | ||||
937 | /* | |||
938 | * If current thread is in task, freeze its assignment. | |||
939 | */ | |||
940 | if (current_thread()(active_threads[(0)])->task == task) { | |||
941 | task_unlock(task)((void)(&(task)->lock)); | |||
942 | thread_freeze(current_thread()(active_threads[(0)])); | |||
943 | task_lock(task); | |||
944 | } | |||
945 | ||||
946 | /* | |||
947 | * Iterate down the thread list reassigning all the threads. | |||
948 | * New threads pick up task's new processor set automatically. | |||
949 | * Do current thread last because new pset may be empty. | |||
950 | */ | |||
951 | list = &task->thread_list; | |||
952 | prev_thread = THREAD_NULL((thread_t) 0); | |||
953 | queue_iterate(list, thread, thread_t, thread_list)for ((thread) = (thread_t) ((list)->next); !(((list)) == ( (queue_entry_t)(thread))); (thread) = (thread_t) ((&(thread )->thread_list)->next)) { | |||
954 | if (!(task->active)) { | |||
955 | ret = KERN_FAILURE5; | |||
956 | break; | |||
957 | } | |||
958 | if (thread != current_thread()(active_threads[(0)])) { | |||
959 | thread_reference(thread); | |||
960 | task_unlock(task)((void)(&(task)->lock)); | |||
961 | if (prev_thread != THREAD_NULL((thread_t) 0)) | |||
962 | thread_deallocate(prev_thread); /* may block */ | |||
963 | thread_assign(thread,new_pset); /* may block */ | |||
964 | prev_thread = thread; | |||
965 | task_lock(task); | |||
966 | } | |||
967 | } | |||
968 | ||||
969 | /* | |||
970 | * Done, wakeup anyone waiting for us. | |||
971 | */ | |||
972 | task->may_assign = TRUE((boolean_t) 1); | |||
973 | if (task->assign_active) { | |||
974 | task->assign_active = FALSE((boolean_t) 0); | |||
975 | thread_wakeup((event_t)&task->assign_active)thread_wakeup_prim(((event_t)&task->assign_active), (( boolean_t) 0), 0); | |||
976 | } | |||
977 | task_unlock(task)((void)(&(task)->lock)); | |||
978 | if (prev_thread != THREAD_NULL((thread_t) 0)) | |||
979 | thread_deallocate(prev_thread); /* may block */ | |||
980 | ||||
981 | /* | |||
982 | * Finish assignment of current thread. | |||
983 | */ | |||
984 | if (current_thread()(active_threads[(0)])->task == task) | |||
985 | thread_doassign(current_thread()(active_threads[(0)]), new_pset, TRUE((boolean_t) 1)); | |||
986 | ||||
987 | pset_deallocate(pset); | |||
988 | ||||
989 | return ret; | |||
990 | } | |||
991 | #else /* MACH_HOST */ | |||
992 | /* | |||
993 | * task_assign: | |||
994 | * | |||
995 | * Change the assigned processor set for the task | |||
996 | */ | |||
997 | kern_return_t | |||
998 | task_assign( | |||
999 | task_t task, | |||
1000 | processor_set_t new_pset, | |||
1001 | boolean_t assign_threads) | |||
1002 | { | |||
1003 | return KERN_FAILURE5; | |||
1004 | } | |||
1005 | #endif /* MACH_HOST */ | |||
1006 | ||||
1007 | ||||
1008 | /* | |||
1009 | * task_assign_default: | |||
1010 | * | |||
1011 | * Version of task_assign to assign to default processor set. | |||
1012 | */ | |||
1013 | kern_return_t | |||
1014 | task_assign_default( | |||
1015 | task_t task, | |||
1016 | boolean_t assign_threads) | |||
1017 | { | |||
1018 | return task_assign(task, &default_pset, assign_threads); | |||
1019 | } | |||
1020 | ||||
1021 | /* | |||
1022 | * task_get_assignment | |||
1023 | * | |||
1024 | * Return name of processor set that task is assigned to. | |||
1025 | */ | |||
1026 | kern_return_t task_get_assignment( | |||
1027 | task_t task, | |||
1028 | processor_set_t *pset) | |||
1029 | { | |||
1030 | if (!task->active) | |||
1031 | return KERN_FAILURE5; | |||
1032 | ||||
1033 | *pset = task->processor_set; | |||
1034 | pset_reference(*pset); | |||
1035 | return KERN_SUCCESS0; | |||
1036 | } | |||
1037 | ||||
1038 | /* | |||
1039 | * task_priority | |||
1040 | * | |||
1041 | * Set priority of task; used only for newly created threads. | |||
1042 | * Optionally change priorities of threads. | |||
1043 | */ | |||
1044 | kern_return_t | |||
1045 | task_priority( | |||
1046 | task_t task, | |||
1047 | int priority, | |||
1048 | boolean_t change_threads) | |||
1049 | { | |||
1050 | kern_return_t ret = KERN_SUCCESS0; | |||
1051 | ||||
1052 | if (task == TASK_NULL((task_t) 0) || invalid_pri(priority)(((priority) < 0) || ((priority) >= 50))) | |||
1053 | return KERN_INVALID_ARGUMENT4; | |||
1054 | ||||
1055 | task_lock(task); | |||
1056 | task->priority = priority; | |||
1057 | ||||
1058 | if (change_threads) { | |||
1059 | thread_t thread; | |||
1060 | queue_head_t *list; | |||
1061 | ||||
1062 | list = &task->thread_list; | |||
1063 | queue_iterate(list, thread, thread_t, thread_list)for ((thread) = (thread_t) ((list)->next); !(((list)) == ( (queue_entry_t)(thread))); (thread) = (thread_t) ((&(thread )->thread_list)->next)) { | |||
1064 | if (thread_priority(thread, priority, FALSE((boolean_t) 0)) | |||
1065 | != KERN_SUCCESS0) | |||
1066 | ret = KERN_FAILURE5; | |||
1067 | } | |||
1068 | } | |||
1069 | ||||
1070 | task_unlock(task)((void)(&(task)->lock)); | |||
1071 | return ret; | |||
1072 | } | |||
1073 | ||||
1074 | /* | |||
1075 | * task_set_name | |||
1076 | * | |||
1077 | * Set the name of task TASK to NAME. This is a debugging aid. | |||
1078 | * NAME will be used in error messages printed by the kernel. | |||
1079 | */ | |||
1080 | kern_return_t | |||
1081 | task_set_name( | |||
1082 | task_t task, | |||
1083 | kernel_debug_name_t name) | |||
1084 | { | |||
1085 | strncpy(task->name, name, sizeof task->name - 1); | |||
1086 | task->name[sizeof task->name - 1] = '\0'; | |||
1087 | return KERN_SUCCESS0; | |||
1088 | } | |||
1089 | ||||
1090 | /* | |||
1091 | * task_collect_scan: | |||
1092 | * | |||
1093 | * Attempt to free resources owned by tasks. | |||
1094 | */ | |||
1095 | ||||
1096 | void task_collect_scan(void) | |||
1097 | { | |||
1098 | task_t task, prev_task; | |||
1099 | processor_set_t pset, prev_pset; | |||
1100 | ||||
1101 | prev_task = TASK_NULL((task_t) 0); | |||
1102 | prev_pset = PROCESSOR_SET_NULL((processor_set_t) 0); | |||
1103 | ||||
1104 | simple_lock(&all_psets_lock); | |||
1105 | queue_iterate(&all_psets, pset, processor_set_t, all_psets)for ((pset) = (processor_set_t) ((&all_psets)->next); ! (((&all_psets)) == ((queue_entry_t)(pset))); (pset) = (processor_set_t ) ((&(pset)->all_psets)->next)) { | |||
1106 | pset_lock(pset); | |||
1107 | queue_iterate(&pset->tasks, task, task_t, pset_tasks)for ((task) = (task_t) ((&pset->tasks)->next); !((( &pset->tasks)) == ((queue_entry_t)(task))); (task) = ( task_t) ((&(task)->pset_tasks)->next)) { | |||
1108 | task_reference(task); | |||
1109 | pset_reference(pset); | |||
1110 | pset_unlock(pset)((void)(&(pset)->lock)); | |||
1111 | simple_unlock(&all_psets_lock)((void)(&all_psets_lock)); | |||
1112 | ||||
1113 | machine_task_collect (task); | |||
1114 | pmap_collect(task->map->pmap); | |||
1115 | ||||
1116 | if (prev_task != TASK_NULL((task_t) 0)) | |||
1117 | task_deallocate(prev_task); | |||
1118 | prev_task = task; | |||
1119 | ||||
1120 | if (prev_pset != PROCESSOR_SET_NULL((processor_set_t) 0)) | |||
1121 | pset_deallocate(prev_pset); | |||
1122 | prev_pset = pset; | |||
1123 | ||||
1124 | simple_lock(&all_psets_lock); | |||
1125 | pset_lock(pset); | |||
1126 | } | |||
1127 | pset_unlock(pset)((void)(&(pset)->lock)); | |||
1128 | } | |||
1129 | simple_unlock(&all_psets_lock)((void)(&all_psets_lock)); | |||
1130 | ||||
1131 | if (prev_task != TASK_NULL((task_t) 0)) | |||
1132 | task_deallocate(prev_task); | |||
1133 | if (prev_pset != PROCESSOR_SET_NULL((processor_set_t) 0)) | |||
1134 | pset_deallocate(prev_pset); | |||
1135 | } | |||
1136 | ||||
1137 | boolean_t task_collect_allowed = TRUE((boolean_t) 1); | |||
1138 | unsigned task_collect_last_tick = 0; | |||
1139 | unsigned task_collect_max_rate = 0; /* in ticks */ | |||
1140 | ||||
1141 | /* | |||
1142 | * consider_task_collect: | |||
1143 | * | |||
1144 | * Called by the pageout daemon when the system needs more free pages. | |||
1145 | */ | |||
1146 | ||||
1147 | void consider_task_collect(void) | |||
1148 | { | |||
1149 | /* | |||
1150 | * By default, don't attempt task collection more frequently | |||
1151 | * than once a second. | |||
1152 | */ | |||
1153 | ||||
1154 | if (task_collect_max_rate == 0) | |||
1155 | task_collect_max_rate = hz; | |||
1156 | ||||
1157 | if (task_collect_allowed && | |||
1158 | (sched_tick > (task_collect_last_tick + task_collect_max_rate))) { | |||
1159 | task_collect_last_tick = sched_tick; | |||
1160 | task_collect_scan(); | |||
1161 | } | |||
1162 | } | |||
1163 | ||||
1164 | kern_return_t | |||
1165 | task_ras_control( | |||
1166 | task_t task, | |||
1167 | vm_offset_t pc, | |||
1168 | vm_offset_t endpc, | |||
1169 | int flavor) | |||
1170 | { | |||
1171 | kern_return_t ret = KERN_FAILURE5; | |||
1172 | ||||
1173 | #if FAST_TAS0 | |||
1174 | int i; | |||
1175 | ||||
1176 | ret = KERN_SUCCESS0; | |||
1177 | task_lock(task); | |||
1178 | switch (flavor) { | |||
1179 | case TASK_RAS_CONTROL_PURGE_ALL0: /* remove all RAS */ | |||
1180 | for (i = 0; i < TASK_FAST_TAS_NRAS; i++) { | |||
1181 | task->fast_tas_base[i] = task->fast_tas_end[i] = 0; | |||
1182 | } | |||
1183 | break; | |||
1184 | case TASK_RAS_CONTROL_PURGE_ONE1: /* remove this RAS, collapse remaining */ | |||
1185 | for (i = 0; i < TASK_FAST_TAS_NRAS; i++) { | |||
1186 | if ( (task->fast_tas_base[i] == pc) | |||
1187 | && (task->fast_tas_end[i] == endpc)) { | |||
1188 | while (i < TASK_FAST_TAS_NRAS-1) { | |||
1189 | task->fast_tas_base[i] = task->fast_tas_base[i+1]; | |||
1190 | task->fast_tas_end[i] = task->fast_tas_end[i+1]; | |||
1191 | i++; | |||
1192 | } | |||
1193 | task->fast_tas_base[TASK_FAST_TAS_NRAS-1] = 0; | |||
1194 | task->fast_tas_end[TASK_FAST_TAS_NRAS-1] = 0; | |||
1195 | break; | |||
1196 | } | |||
1197 | } | |||
1198 | if (i == TASK_FAST_TAS_NRAS) { | |||
1199 | ret = KERN_INVALID_ADDRESS1; | |||
1200 | } | |||
1201 | break; | |||
1202 | case TASK_RAS_CONTROL_PURGE_ALL_AND_INSTALL_ONE2: | |||
1203 | /* remove all RAS an install this RAS */ | |||
1204 | for (i = 0; i < TASK_FAST_TAS_NRAS; i++) { | |||
1205 | task->fast_tas_base[i] = task->fast_tas_end[i] = 0; | |||
1206 | } | |||
1207 | /* FALL THROUGH */ | |||
1208 | case TASK_RAS_CONTROL_INSTALL_ONE3: /* install this RAS */ | |||
1209 | for (i = 0; i < TASK_FAST_TAS_NRAS; i++) { | |||
1210 | if ( (task->fast_tas_base[i] == pc) | |||
1211 | && (task->fast_tas_end[i] == endpc)) { | |||
1212 | /* already installed */ | |||
1213 | break; | |||
1214 | } | |||
1215 | if ((task->fast_tas_base[i] == 0) && (task->fast_tas_end[i] == 0)){ | |||
1216 | task->fast_tas_base[i] = pc; | |||
1217 | task->fast_tas_end[i] = endpc; | |||
1218 | break; | |||
1219 | } | |||
1220 | } | |||
1221 | if (i == TASK_FAST_TAS_NRAS) { | |||
1222 | ret = KERN_RESOURCE_SHORTAGE6; | |||
1223 | } | |||
1224 | break; | |||
1225 | default: ret = KERN_INVALID_VALUE18; | |||
1226 | break; | |||
1227 | } | |||
1228 | task_unlock(task)((void)(&(task)->lock)); | |||
1229 | #endif /* FAST_TAS */ | |||
1230 | return ret; | |||
1231 | } |