| File: | obj-scan-build/../kern/task.c |
| Location: | line 1153, column 17 |
| Description: | Access to field 'map' results in a dereference of a null pointer (loaded from variable 'task') |
| 1 | /* | |||
| 2 | * Mach Operating System | |||
| 3 | * Copyright (c) 1993-1988 Carnegie Mellon University | |||
| 4 | * All Rights Reserved. | |||
| 5 | * | |||
| 6 | * Permission to use, copy, modify and distribute this software and its | |||
| 7 | * documentation is hereby granted, provided that both the copyright | |||
| 8 | * notice and this permission notice appear in all copies of the | |||
| 9 | * software, derivative works or modified versions, and any portions | |||
| 10 | * thereof, and that both notices appear in supporting documentation. | |||
| 11 | * | |||
| 12 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |||
| 13 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |||
| 14 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |||
| 15 | * | |||
| 16 | * Carnegie Mellon requests users of this software to return to | |||
| 17 | * | |||
| 18 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |||
| 19 | * School of Computer Science | |||
| 20 | * Carnegie Mellon University | |||
| 21 | * Pittsburgh PA 15213-3890 | |||
| 22 | * | |||
| 23 | * any improvements or extensions that they make and grant Carnegie Mellon | |||
| 24 | * the rights to redistribute these changes. | |||
| 25 | */ | |||
| 26 | /* | |||
| 27 | * File: kern/task.c | |||
| 28 | * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub, | |||
| 29 | * David Black | |||
| 30 | * | |||
| 31 | * Task management primitives implementation. | |||
| 32 | */ | |||
| 33 | ||||
| 34 | #include <string.h> | |||
| 35 | ||||
| 36 | #include <mach/machine/vm_types.h> | |||
| 37 | #include <mach/vm_param.h> | |||
| 38 | #include <mach/task_info.h> | |||
| 39 | #include <mach/task_special_ports.h> | |||
| 40 | #include <mach_debug/mach_debug_types.h> | |||
| 41 | #include <ipc/ipc_space.h> | |||
| 42 | #include <ipc/ipc_types.h> | |||
| 43 | #include <kern/debug.h> | |||
| 44 | #include <kern/task.h> | |||
| 45 | #include <kern/thread.h> | |||
| 46 | #include <kern/slab.h> | |||
| 47 | #include <kern/kalloc.h> | |||
| 48 | #include <kern/processor.h> | |||
| 49 | #include <kern/printf.h> | |||
| 50 | #include <kern/sched_prim.h> /* for thread_wakeup */ | |||
| 51 | #include <kern/ipc_tt.h> | |||
| 52 | #include <kern/syscall_emulation.h> | |||
| 53 | #include <kern/task_notify.user.h> | |||
| 54 | #include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */ | |||
| 55 | #include <machine/machspl.h> /* for splsched */ | |||
| 56 | ||||
| 57 | task_t kernel_task = TASK_NULL((task_t) 0); | |||
| 58 | struct kmem_cache task_cache; | |||
| 59 | ||||
| 60 | /* Where to send notifications about newly created tasks. */ | |||
| 61 | ipc_port_t new_task_notification = NULL((void *) 0); | |||
| 62 | ||||
| 63 | void task_init(void) | |||
| 64 | { | |||
| 65 | kmem_cache_init(&task_cache, "task", sizeof(struct task), 0, | |||
| 66 | NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0); | |||
| 67 | ||||
| 68 | eml_init(); | |||
| 69 | machine_task_module_init (); | |||
| 70 | ||||
| 71 | /* | |||
| 72 | * Create the kernel task as the first task. | |||
| 73 | * Task_create must assign to kernel_task as a side effect, | |||
| 74 | * for other initialization. (:-() | |||
| 75 | */ | |||
| 76 | (void) task_create(TASK_NULL((task_t) 0), FALSE((boolean_t) 0), &kernel_task); | |||
| 77 | (void) task_set_name(kernel_task, "gnumach"); | |||
| 78 | } | |||
| 79 | ||||
| 80 | kern_return_t task_create( | |||
| 81 | task_t parent_task, | |||
| 82 | boolean_t inherit_memory, | |||
| 83 | task_t *child_task) /* OUT */ | |||
| 84 | { | |||
| 85 | task_t new_task; | |||
| 86 | processor_set_t pset; | |||
| 87 | #if FAST_TAS0 | |||
| 88 | int i; | |||
| 89 | #endif | |||
| 90 | ||||
| 91 | new_task = (task_t) kmem_cache_alloc(&task_cache); | |||
| 92 | if (new_task == TASK_NULL((task_t) 0)) | |||
| 93 | return KERN_RESOURCE_SHORTAGE6; | |||
| 94 | ||||
| 95 | /* one ref for just being alive; one for our caller */ | |||
| 96 | new_task->ref_count = 2; | |||
| 97 | ||||
| 98 | if (child_task == &kernel_task) { | |||
| 99 | new_task->map = kernel_map; | |||
| 100 | } else if (inherit_memory) { | |||
| 101 | new_task->map = vm_map_fork(parent_task->map); | |||
| 102 | } else { | |||
| 103 | new_task->map = vm_map_create(pmap_create(0), | |||
| 104 | round_page(VM_MIN_ADDRESS)((vm_offset_t)((((vm_offset_t)((0))) + ((1 << 12)-1)) & ~((1 << 12)-1))), | |||
| 105 | trunc_page(VM_MAX_ADDRESS)((vm_offset_t)(((vm_offset_t)((0xc0000000UL))) & ~((1 << 12)-1))), TRUE((boolean_t) 1)); | |||
| 106 | } | |||
| 107 | ||||
| 108 | simple_lock_init(&new_task->lock); | |||
| 109 | queue_init(&new_task->thread_list)((&new_task->thread_list)->next = (&new_task-> thread_list)->prev = &new_task->thread_list); | |||
| 110 | new_task->suspend_count = 0; | |||
| 111 | new_task->active = TRUE((boolean_t) 1); | |||
| 112 | new_task->user_stop_count = 0; | |||
| 113 | new_task->thread_count = 0; | |||
| 114 | new_task->faults = 0; | |||
| 115 | new_task->zero_fills = 0; | |||
| 116 | new_task->reactivations = 0; | |||
| 117 | new_task->pageins = 0; | |||
| 118 | new_task->cow_faults = 0; | |||
| 119 | new_task->messages_sent = 0; | |||
| 120 | new_task->messages_received = 0; | |||
| 121 | ||||
| 122 | eml_task_reference(new_task, parent_task); | |||
| 123 | ||||
| 124 | ipc_task_init(new_task, parent_task); | |||
| 125 | machine_task_init (new_task); | |||
| 126 | ||||
| 127 | new_task->total_user_time.seconds = 0; | |||
| 128 | new_task->total_user_time.microseconds = 0; | |||
| 129 | new_task->total_system_time.seconds = 0; | |||
| 130 | new_task->total_system_time.microseconds = 0; | |||
| 131 | ||||
| 132 | record_time_stamp (&new_task->creation_time); | |||
| 133 | ||||
| 134 | if (parent_task != TASK_NULL((task_t) 0)) { | |||
| 135 | task_lock(parent_task); | |||
| 136 | pset = parent_task->processor_set; | |||
| 137 | if (!pset->active) | |||
| 138 | pset = &default_pset; | |||
| 139 | pset_reference(pset); | |||
| 140 | new_task->priority = parent_task->priority; | |||
| 141 | task_unlock(parent_task)((void)(&(parent_task)->lock)); | |||
| 142 | } | |||
| 143 | else { | |||
| 144 | pset = &default_pset; | |||
| 145 | pset_reference(pset); | |||
| 146 | new_task->priority = BASEPRI_USER25; | |||
| 147 | } | |||
| 148 | pset_lock(pset); | |||
| 149 | pset_add_task(pset, new_task); | |||
| 150 | pset_unlock(pset)((void)(&(pset)->lock)); | |||
| 151 | ||||
| 152 | new_task->may_assign = TRUE((boolean_t) 1); | |||
| 153 | new_task->assign_active = FALSE((boolean_t) 0); | |||
| 154 | ||||
| 155 | #if MACH_PCSAMPLE1 | |||
| 156 | new_task->pc_sample.buffer = 0; | |||
| 157 | new_task->pc_sample.seqno = 0; | |||
| 158 | new_task->pc_sample.sampletypes = 0; | |||
| 159 | #endif /* MACH_PCSAMPLE */ | |||
| 160 | ||||
| 161 | #if FAST_TAS0 | |||
| 162 | for (i = 0; i < TASK_FAST_TAS_NRAS; i++) { | |||
| 163 | if (inherit_memory) { | |||
| 164 | new_task->fast_tas_base[i] = parent_task->fast_tas_base[i]; | |||
| 165 | new_task->fast_tas_end[i] = parent_task->fast_tas_end[i]; | |||
| 166 | } else { | |||
| 167 | new_task->fast_tas_base[i] = (vm_offset_t)0; | |||
| 168 | new_task->fast_tas_end[i] = (vm_offset_t)0; | |||
| 169 | } | |||
| 170 | } | |||
| 171 | #endif /* FAST_TAS */ | |||
| 172 | ||||
| 173 | if (parent_task == TASK_NULL((task_t) 0)) | |||
| 174 | snprintf (new_task->name, sizeof new_task->name, "%p", | |||
| 175 | new_task); | |||
| 176 | else | |||
| 177 | snprintf (new_task->name, sizeof new_task->name, "(%.*s)", | |||
| 178 | sizeof new_task->name - 3, parent_task->name); | |||
| 179 | ||||
| 180 | if (new_task_notification != NULL((void *) 0)) { | |||
| 181 | task_reference (new_task); | |||
| 182 | task_reference (parent_task); | |||
| 183 | mach_notify_new_task (new_task_notification, | |||
| 184 | convert_task_to_port (new_task), | |||
| 185 | convert_task_to_port (parent_task)); | |||
| 186 | } | |||
| 187 | ||||
| 188 | ipc_task_enable(new_task); | |||
| 189 | ||||
| 190 | *child_task = new_task; | |||
| 191 | return KERN_SUCCESS0; | |||
| 192 | } | |||
| 193 | ||||
| 194 | /* | |||
| 195 | * task_deallocate: | |||
| 196 | * | |||
| 197 | * Give up a reference to the specified task and destroy it if there | |||
| 198 | * are no other references left. It is assumed that the current thread | |||
| 199 | * is never in this task. | |||
| 200 | */ | |||
| 201 | void task_deallocate( | |||
| 202 | task_t task) | |||
| 203 | { | |||
| 204 | int c; | |||
| 205 | processor_set_t pset; | |||
| 206 | ||||
| 207 | if (task == TASK_NULL((task_t) 0)) | |||
| 208 | return; | |||
| 209 | ||||
| 210 | task_lock(task); | |||
| 211 | c = --(task->ref_count); | |||
| 212 | task_unlock(task)((void)(&(task)->lock)); | |||
| 213 | if (c != 0) | |||
| 214 | return; | |||
| 215 | ||||
| 216 | machine_task_terminate (task); | |||
| 217 | ||||
| 218 | eml_task_deallocate(task); | |||
| 219 | ||||
| 220 | pset = task->processor_set; | |||
| 221 | pset_lock(pset); | |||
| 222 | pset_remove_task(pset,task); | |||
| 223 | pset_unlock(pset)((void)(&(pset)->lock)); | |||
| 224 | pset_deallocate(pset); | |||
| 225 | vm_map_deallocate(task->map); | |||
| 226 | is_release(task->itk_space)ipc_space_release(task->itk_space); | |||
| 227 | kmem_cache_free(&task_cache, (vm_offset_t) task); | |||
| 228 | } | |||
| 229 | ||||
| 230 | void task_reference( | |||
| 231 | task_t task) | |||
| 232 | { | |||
| 233 | if (task == TASK_NULL((task_t) 0)) | |||
| 234 | return; | |||
| 235 | ||||
| 236 | task_lock(task); | |||
| 237 | task->ref_count++; | |||
| 238 | task_unlock(task)((void)(&(task)->lock)); | |||
| 239 | } | |||
| 240 | ||||
| 241 | /* | |||
| 242 | * task_terminate: | |||
| 243 | * | |||
| 244 | * Terminate the specified task. See comments on thread_terminate | |||
| 245 | * (kern/thread.c) about problems with terminating the "current task." | |||
| 246 | */ | |||
| 247 | kern_return_t task_terminate( | |||
| 248 | task_t task) | |||
| 249 | { | |||
| 250 | thread_t thread, cur_thread; | |||
| 251 | queue_head_t *list; | |||
| 252 | task_t cur_task; | |||
| 253 | spl_t s; | |||
| 254 | ||||
| 255 | if (task == TASK_NULL((task_t) 0)) | |||
| 256 | return KERN_INVALID_ARGUMENT4; | |||
| 257 | ||||
| 258 | list = &task->thread_list; | |||
| 259 | cur_task = current_task()((active_threads[(0)])->task); | |||
| 260 | cur_thread = current_thread()(active_threads[(0)]); | |||
| 261 | ||||
| 262 | /* | |||
| 263 | * Deactivate task so that it can't be terminated again, | |||
| 264 | * and so lengthy operations in progress will abort. | |||
| 265 | * | |||
| 266 | * If the current thread is in this task, remove it from | |||
| 267 | * the task's thread list to keep the thread-termination | |||
| 268 | * loop simple. | |||
| 269 | */ | |||
| 270 | if (task == cur_task) { | |||
| 271 | task_lock(task); | |||
| 272 | if (!task->active) { | |||
| 273 | /* | |||
| 274 | * Task is already being terminated. | |||
| 275 | */ | |||
| 276 | task_unlock(task)((void)(&(task)->lock)); | |||
| 277 | return KERN_FAILURE5; | |||
| 278 | } | |||
| 279 | /* | |||
| 280 | * Make sure current thread is not being terminated. | |||
| 281 | */ | |||
| 282 | s = splsched(); | |||
| 283 | thread_lock(cur_thread); | |||
| 284 | if (!cur_thread->active) { | |||
| 285 | thread_unlock(cur_thread)((void)(&(cur_thread)->lock)); | |||
| 286 | (void) splx(s); | |||
| 287 | task_unlock(task)((void)(&(task)->lock)); | |||
| 288 | thread_terminate(cur_thread); | |||
| 289 | return KERN_FAILURE5; | |||
| 290 | } | |||
| 291 | task_hold_locked(task); | |||
| 292 | task->active = FALSE((boolean_t) 0); | |||
| 293 | queue_remove(list, cur_thread, thread_t, thread_list){ queue_entry_t next, prev; next = (cur_thread)->thread_list .next; prev = (cur_thread)->thread_list.prev; if ((list) == next) (list)->prev = prev; else ((thread_t)next)->thread_list .prev = prev; if ((list) == prev) (list)->next = next; else ((thread_t)prev)->thread_list.next = next; }; | |||
| 294 | thread_unlock(cur_thread)((void)(&(cur_thread)->lock)); | |||
| 295 | (void) splx(s); | |||
| 296 | task_unlock(task)((void)(&(task)->lock)); | |||
| 297 | ||||
| 298 | /* | |||
| 299 | * Shut down this thread's ipc now because it must | |||
| 300 | * be left alone to terminate the task. | |||
| 301 | */ | |||
| 302 | ipc_thread_disable(cur_thread); | |||
| 303 | ipc_thread_terminate(cur_thread); | |||
| 304 | } | |||
| 305 | else { | |||
| 306 | /* | |||
| 307 | * Lock both current and victim task to check for | |||
| 308 | * potential deadlock. | |||
| 309 | */ | |||
| 310 | if ((vm_offset_t)task < (vm_offset_t)cur_task) { | |||
| 311 | task_lock(task); | |||
| 312 | task_lock(cur_task); | |||
| 313 | } | |||
| 314 | else { | |||
| 315 | task_lock(cur_task); | |||
| 316 | task_lock(task); | |||
| 317 | } | |||
| 318 | /* | |||
| 319 | * Check if current thread or task is being terminated. | |||
| 320 | */ | |||
| 321 | s = splsched(); | |||
| 322 | thread_lock(cur_thread); | |||
| 323 | if ((!cur_task->active) ||(!cur_thread->active)) { | |||
| 324 | /* | |||
| 325 | * Current task or thread is being terminated. | |||
| 326 | */ | |||
| 327 | thread_unlock(cur_thread)((void)(&(cur_thread)->lock)); | |||
| 328 | (void) splx(s); | |||
| 329 | task_unlock(task)((void)(&(task)->lock)); | |||
| 330 | task_unlock(cur_task)((void)(&(cur_task)->lock)); | |||
| 331 | thread_terminate(cur_thread); | |||
| 332 | return KERN_FAILURE5; | |||
| 333 | } | |||
| 334 | thread_unlock(cur_thread)((void)(&(cur_thread)->lock)); | |||
| 335 | (void) splx(s); | |||
| 336 | task_unlock(cur_task)((void)(&(cur_task)->lock)); | |||
| 337 | ||||
| 338 | if (!task->active) { | |||
| 339 | /* | |||
| 340 | * Task is already being terminated. | |||
| 341 | */ | |||
| 342 | task_unlock(task)((void)(&(task)->lock)); | |||
| 343 | return KERN_FAILURE5; | |||
| 344 | } | |||
| 345 | task_hold_locked(task); | |||
| 346 | task->active = FALSE((boolean_t) 0); | |||
| 347 | task_unlock(task)((void)(&(task)->lock)); | |||
| 348 | } | |||
| 349 | ||||
| 350 | /* | |||
| 351 | * Prevent further execution of the task. ipc_task_disable | |||
| 352 | * prevents further task operations via the task port. | |||
| 353 | * If this is the current task, the current thread will | |||
| 354 | * be left running. | |||
| 355 | */ | |||
| 356 | (void) task_dowait(task,TRUE((boolean_t) 1)); /* may block */ | |||
| 357 | ipc_task_disable(task); | |||
| 358 | ||||
| 359 | /* | |||
| 360 | * Terminate each thread in the task. | |||
| 361 | * | |||
| 362 | * The task_port is closed down, so no more thread_create | |||
| 363 | * operations can be done. Thread_force_terminate closes the | |||
| 364 | * thread port for each thread; when that is done, the | |||
| 365 | * thread will eventually disappear. Thus the loop will | |||
| 366 | * terminate. Call thread_force_terminate instead of | |||
| 367 | * thread_terminate to avoid deadlock checks. Need | |||
| 368 | * to call thread_block() inside loop because some other | |||
| 369 | * thread (e.g., the reaper) may have to run to get rid | |||
| 370 | * of all references to the thread; it won't vanish from | |||
| 371 | * the task's thread list until the last one is gone. | |||
| 372 | */ | |||
| 373 | task_lock(task); | |||
| 374 | while (!queue_empty(list)(((list)) == (((list)->next)))) { | |||
| 375 | thread = (thread_t) queue_first(list)((list)->next); | |||
| 376 | thread_reference(thread); | |||
| 377 | task_unlock(task)((void)(&(task)->lock)); | |||
| 378 | thread_force_terminate(thread); | |||
| 379 | thread_deallocate(thread); | |||
| 380 | thread_block((void (*)()) 0); | |||
| 381 | task_lock(task); | |||
| 382 | } | |||
| 383 | task_unlock(task)((void)(&(task)->lock)); | |||
| 384 | ||||
| 385 | /* | |||
| 386 | * Shut down IPC. | |||
| 387 | */ | |||
| 388 | ipc_task_terminate(task); | |||
| 389 | ||||
| 390 | ||||
| 391 | /* | |||
| 392 | * Deallocate the task's reference to itself. | |||
| 393 | */ | |||
| 394 | task_deallocate(task); | |||
| 395 | ||||
| 396 | /* | |||
| 397 | * If the current thread is in this task, it has not yet | |||
| 398 | * been terminated (since it was removed from the task's | |||
| 399 | * thread-list). Put it back in the thread list (for | |||
| 400 | * completeness), and terminate it. Since it holds the | |||
| 401 | * last reference to the task, terminating it will deallocate | |||
| 402 | * the task. | |||
| 403 | */ | |||
| 404 | if (cur_thread->task == task) { | |||
| 405 | task_lock(task); | |||
| 406 | s = splsched(); | |||
| 407 | queue_enter(list, cur_thread, thread_t, thread_list){ queue_entry_t prev; prev = (list)->prev; if ((list) == prev ) { (list)->next = (queue_entry_t) (cur_thread); } else { ( (thread_t)prev)->thread_list.next = (queue_entry_t)(cur_thread ); } (cur_thread)->thread_list.prev = prev; (cur_thread)-> thread_list.next = list; (list)->prev = (queue_entry_t) cur_thread ; }; | |||
| 408 | (void) splx(s); | |||
| 409 | task_unlock(task)((void)(&(task)->lock)); | |||
| 410 | (void) thread_terminate(cur_thread); | |||
| 411 | } | |||
| 412 | ||||
| 413 | return KERN_SUCCESS0; | |||
| 414 | } | |||
| 415 | ||||
| 416 | /* | |||
| 417 | * task_hold: | |||
| 418 | * | |||
| 419 | * Suspend execution of the specified task. | |||
| 420 | * This is a recursive-style suspension of the task, a count of | |||
| 421 | * suspends is maintained. | |||
| 422 | * | |||
| 423 | * CONDITIONS: the task is locked and active. | |||
| 424 | */ | |||
| 425 | void task_hold_locked( | |||
| 426 | task_t task) | |||
| 427 | { | |||
| 428 | queue_head_t *list; | |||
| 429 | thread_t thread, cur_thread; | |||
| 430 | ||||
| 431 | assert(task->active)((task->active) ? (void) (0) : Assert ("task->active", "../kern/task.c" , 431)); | |||
| 432 | ||||
| 433 | cur_thread = current_thread()(active_threads[(0)]); | |||
| 434 | ||||
| 435 | task->suspend_count++; | |||
| 436 | ||||
| 437 | /* | |||
| 438 | * Iterate through all the threads and hold them. | |||
| 439 | * Do not hold the current thread if it is within the | |||
| 440 | * task. | |||
| 441 | */ | |||
| 442 | list = &task->thread_list; | |||
| 443 | queue_iterate(list, thread, thread_t, thread_list)for ((thread) = (thread_t) ((list)->next); !(((list)) == ( (queue_entry_t)(thread))); (thread) = (thread_t) ((&(thread )->thread_list)->next)) { | |||
| 444 | if (thread != cur_thread) | |||
| 445 | thread_hold(thread); | |||
| 446 | } | |||
| 447 | } | |||
| 448 | ||||
| 449 | /* | |||
| 450 | * task_hold: | |||
| 451 | * | |||
| 452 | * Suspend execution of the specified task. | |||
| 453 | * This is a recursive-style suspension of the task, a count of | |||
| 454 | * suspends is maintained. | |||
| 455 | */ | |||
| 456 | kern_return_t task_hold( | |||
| 457 | task_t task) | |||
| 458 | { | |||
| 459 | task_lock(task); | |||
| 460 | if (!task->active) { | |||
| 461 | task_unlock(task)((void)(&(task)->lock)); | |||
| 462 | return KERN_FAILURE5; | |||
| 463 | } | |||
| 464 | ||||
| 465 | task_hold_locked(task); | |||
| 466 | ||||
| 467 | task_unlock(task)((void)(&(task)->lock)); | |||
| 468 | return KERN_SUCCESS0; | |||
| 469 | } | |||
| 470 | ||||
| 471 | /* | |||
| 472 | * task_dowait: | |||
| 473 | * | |||
| 474 | * Wait until the task has really been suspended (all of the threads | |||
| 475 | * are stopped). Skip the current thread if it is within the task. | |||
| 476 | * | |||
| 477 | * If task is deactivated while waiting, return a failure code unless | |||
| 478 | * must_wait is true. | |||
| 479 | */ | |||
| 480 | kern_return_t task_dowait( | |||
| 481 | task_t task, | |||
| 482 | boolean_t must_wait) | |||
| 483 | { | |||
| 484 | queue_head_t *list; | |||
| 485 | thread_t thread, cur_thread, prev_thread; | |||
| 486 | kern_return_t ret = KERN_SUCCESS0; | |||
| 487 | ||||
| 488 | /* | |||
| 489 | * Iterate through all the threads. | |||
| 490 | * While waiting for each thread, we gain a reference to it | |||
| 491 | * to prevent it from going away on us. This guarantees | |||
| 492 | * that the "next" thread in the list will be a valid thread. | |||
| 493 | * | |||
| 494 | * We depend on the fact that if threads are created while | |||
| 495 | * we are looping through the threads, they will be held | |||
| 496 | * automatically. We don't care about threads that get | |||
| 497 | * deallocated along the way (the reference prevents it | |||
| 498 | * from happening to the thread we are working with). | |||
| 499 | * | |||
| 500 | * If the current thread is in the affected task, it is skipped. | |||
| 501 | * | |||
| 502 | * If the task is deactivated before we're done, and we don't | |||
| 503 | * have to wait for it (must_wait is FALSE), just bail out. | |||
| 504 | */ | |||
| 505 | cur_thread = current_thread()(active_threads[(0)]); | |||
| 506 | ||||
| 507 | list = &task->thread_list; | |||
| 508 | prev_thread = THREAD_NULL((thread_t) 0); | |||
| 509 | task_lock(task); | |||
| 510 | queue_iterate(list, thread, thread_t, thread_list)for ((thread) = (thread_t) ((list)->next); !(((list)) == ( (queue_entry_t)(thread))); (thread) = (thread_t) ((&(thread )->thread_list)->next)) { | |||
| 511 | if (!(task->active) && !(must_wait)) { | |||
| 512 | ret = KERN_FAILURE5; | |||
| 513 | break; | |||
| 514 | } | |||
| 515 | if (thread != cur_thread) { | |||
| 516 | thread_reference(thread); | |||
| 517 | task_unlock(task)((void)(&(task)->lock)); | |||
| 518 | if (prev_thread != THREAD_NULL((thread_t) 0)) | |||
| 519 | thread_deallocate(prev_thread); | |||
| 520 | /* may block */ | |||
| 521 | (void) thread_dowait(thread, TRUE((boolean_t) 1)); /* may block */ | |||
| 522 | prev_thread = thread; | |||
| 523 | task_lock(task); | |||
| 524 | } | |||
| 525 | } | |||
| 526 | task_unlock(task)((void)(&(task)->lock)); | |||
| 527 | if (prev_thread != THREAD_NULL((thread_t) 0)) | |||
| 528 | thread_deallocate(prev_thread); /* may block */ | |||
| 529 | return ret; | |||
| 530 | } | |||
| 531 | ||||
| 532 | kern_return_t task_release( | |||
| 533 | task_t task) | |||
| 534 | { | |||
| 535 | queue_head_t *list; | |||
| 536 | thread_t thread, next; | |||
| 537 | ||||
| 538 | task_lock(task); | |||
| 539 | if (!task->active) { | |||
| 540 | task_unlock(task)((void)(&(task)->lock)); | |||
| 541 | return KERN_FAILURE5; | |||
| 542 | } | |||
| 543 | ||||
| 544 | task->suspend_count--; | |||
| 545 | ||||
| 546 | /* | |||
| 547 | * Iterate through all the threads and release them | |||
| 548 | */ | |||
| 549 | list = &task->thread_list; | |||
| 550 | thread = (thread_t) queue_first(list)((list)->next); | |||
| 551 | while (!queue_end(list, (queue_entry_t) thread)((list) == ((queue_entry_t) thread))) { | |||
| 552 | next = (thread_t) queue_next(&thread->thread_list)((&thread->thread_list)->next); | |||
| 553 | thread_release(thread); | |||
| 554 | thread = next; | |||
| 555 | } | |||
| 556 | task_unlock(task)((void)(&(task)->lock)); | |||
| 557 | return KERN_SUCCESS0; | |||
| 558 | } | |||
| 559 | ||||
| 560 | kern_return_t task_threads( | |||
| 561 | task_t task, | |||
| 562 | thread_array_t *thread_list, | |||
| 563 | natural_t *count) | |||
| 564 | { | |||
| 565 | unsigned int actual; /* this many threads */ | |||
| 566 | thread_t thread; | |||
| 567 | thread_t *threads; | |||
| 568 | int i; | |||
| 569 | ||||
| 570 | vm_size_t size, size_needed; | |||
| 571 | vm_offset_t addr; | |||
| 572 | ||||
| 573 | if (task == TASK_NULL((task_t) 0)) | |||
| 574 | return KERN_INVALID_ARGUMENT4; | |||
| 575 | ||||
| 576 | size = 0; addr = 0; | |||
| 577 | ||||
| 578 | for (;;) { | |||
| 579 | task_lock(task); | |||
| 580 | if (!task->active) { | |||
| 581 | task_unlock(task)((void)(&(task)->lock)); | |||
| 582 | return KERN_FAILURE5; | |||
| 583 | } | |||
| 584 | ||||
| 585 | actual = task->thread_count; | |||
| 586 | ||||
| 587 | /* do we have the memory we need? */ | |||
| 588 | ||||
| 589 | size_needed = actual * sizeof(mach_port_t); | |||
| 590 | if (size_needed <= size) | |||
| 591 | break; | |||
| 592 | ||||
| 593 | /* unlock the task and allocate more memory */ | |||
| 594 | task_unlock(task)((void)(&(task)->lock)); | |||
| 595 | ||||
| 596 | if (size != 0) | |||
| 597 | kfree(addr, size); | |||
| 598 | ||||
| 599 | assert(size_needed > 0)((size_needed > 0) ? (void) (0) : Assert ("size_needed > 0" , "../kern/task.c", 599)); | |||
| 600 | size = size_needed; | |||
| 601 | ||||
| 602 | addr = kalloc(size); | |||
| 603 | if (addr == 0) | |||
| 604 | return KERN_RESOURCE_SHORTAGE6; | |||
| 605 | } | |||
| 606 | ||||
| 607 | /* OK, have memory and the task is locked & active */ | |||
| 608 | ||||
| 609 | threads = (thread_t *) addr; | |||
| 610 | ||||
| 611 | for (i = 0, thread = (thread_t) queue_first(&task->thread_list)((&task->thread_list)->next); | |||
| 612 | i < actual; | |||
| 613 | i++, thread = (thread_t) queue_next(&thread->thread_list)((&thread->thread_list)->next)) { | |||
| 614 | /* take ref for convert_thread_to_port */ | |||
| 615 | thread_reference(thread); | |||
| 616 | threads[i] = thread; | |||
| 617 | } | |||
| 618 | assert(queue_end(&task->thread_list, (queue_entry_t) thread))((((&task->thread_list) == ((queue_entry_t) thread))) ? (void) (0) : Assert ("queue_end(&task->thread_list, (queue_entry_t) thread)" , "../kern/task.c", 618)); | |||
| 619 | ||||
| 620 | /* can unlock task now that we've got the thread refs */ | |||
| 621 | task_unlock(task)((void)(&(task)->lock)); | |||
| 622 | ||||
| 623 | if (actual == 0) { | |||
| 624 | /* no threads, so return null pointer and deallocate memory */ | |||
| 625 | ||||
| 626 | *thread_list = 0; | |||
| 627 | *count = 0; | |||
| 628 | ||||
| 629 | if (size != 0) | |||
| 630 | kfree(addr, size); | |||
| 631 | } else { | |||
| 632 | /* if we allocated too much, must copy */ | |||
| 633 | ||||
| 634 | if (size_needed < size) { | |||
| 635 | vm_offset_t newaddr; | |||
| 636 | ||||
| 637 | newaddr = kalloc(size_needed); | |||
| 638 | if (newaddr == 0) { | |||
| 639 | for (i = 0; i < actual; i++) | |||
| 640 | thread_deallocate(threads[i]); | |||
| 641 | kfree(addr, size); | |||
| 642 | return KERN_RESOURCE_SHORTAGE6; | |||
| 643 | } | |||
| 644 | ||||
| 645 | memcpy((void *) newaddr, (void *) addr, size_needed); | |||
| 646 | kfree(addr, size); | |||
| 647 | threads = (thread_t *) newaddr; | |||
| 648 | } | |||
| 649 | ||||
| 650 | *thread_list = (mach_port_t *) threads; | |||
| 651 | *count = actual; | |||
| 652 | ||||
| 653 | /* do the conversion that Mig should handle */ | |||
| 654 | ||||
| 655 | for (i = 0; i < actual; i++) | |||
| 656 | ((ipc_port_t *) threads)[i] = | |||
| 657 | convert_thread_to_port(threads[i]); | |||
| 658 | } | |||
| 659 | ||||
| 660 | return KERN_SUCCESS0; | |||
| 661 | } | |||
| 662 | ||||
| 663 | kern_return_t task_suspend( | |||
| 664 | task_t task) | |||
| 665 | { | |||
| 666 | boolean_t hold; | |||
| 667 | ||||
| 668 | if (task == TASK_NULL((task_t) 0)) | |||
| 669 | return KERN_INVALID_ARGUMENT4; | |||
| 670 | ||||
| 671 | hold = FALSE((boolean_t) 0); | |||
| 672 | task_lock(task); | |||
| 673 | if ((task->user_stop_count)++ == 0) | |||
| 674 | hold = TRUE((boolean_t) 1); | |||
| 675 | task_unlock(task)((void)(&(task)->lock)); | |||
| 676 | ||||
| 677 | /* | |||
| 678 | * If the stop count was positive, the task is | |||
| 679 | * already stopped and we can exit. | |||
| 680 | */ | |||
| 681 | if (!hold) { | |||
| 682 | return KERN_SUCCESS0; | |||
| 683 | } | |||
| 684 | ||||
| 685 | /* | |||
| 686 | * Hold all of the threads in the task, and wait for | |||
| 687 | * them to stop. If the current thread is within | |||
| 688 | * this task, hold it separately so that all of the | |||
| 689 | * other threads can stop first. | |||
| 690 | */ | |||
| 691 | ||||
| 692 | if (task_hold(task) != KERN_SUCCESS0) | |||
| 693 | return KERN_FAILURE5; | |||
| 694 | ||||
| 695 | if (task_dowait(task, FALSE((boolean_t) 0)) != KERN_SUCCESS0) | |||
| 696 | return KERN_FAILURE5; | |||
| 697 | ||||
| 698 | if (current_task()((active_threads[(0)])->task) == task) { | |||
| 699 | spl_t s; | |||
| 700 | ||||
| 701 | thread_hold(current_thread()(active_threads[(0)])); | |||
| 702 | /* | |||
| 703 | * We want to call thread_block on our way out, | |||
| 704 | * to stop running. | |||
| 705 | */ | |||
| 706 | s = splsched(); | |||
| 707 | ast_on(cpu_number(), AST_BLOCK)({ if ((need_ast[(0)] |= (0x4)) != 0x0) { ; } }); | |||
| 708 | (void) splx(s); | |||
| 709 | } | |||
| 710 | ||||
| 711 | return KERN_SUCCESS0; | |||
| 712 | } | |||
| 713 | ||||
| 714 | kern_return_t task_resume( | |||
| 715 | task_t task) | |||
| 716 | { | |||
| 717 | boolean_t release; | |||
| 718 | ||||
| 719 | if (task == TASK_NULL((task_t) 0)) | |||
| 720 | return KERN_INVALID_ARGUMENT4; | |||
| 721 | ||||
| 722 | release = FALSE((boolean_t) 0); | |||
| 723 | task_lock(task); | |||
| 724 | if (task->user_stop_count > 0) { | |||
| 725 | if (--(task->user_stop_count) == 0) | |||
| 726 | release = TRUE((boolean_t) 1); | |||
| 727 | } | |||
| 728 | else { | |||
| 729 | task_unlock(task)((void)(&(task)->lock)); | |||
| 730 | return KERN_FAILURE5; | |||
| 731 | } | |||
| 732 | task_unlock(task)((void)(&(task)->lock)); | |||
| 733 | ||||
| 734 | /* | |||
| 735 | * Release the task if necessary. | |||
| 736 | */ | |||
| 737 | if (release) | |||
| 738 | return task_release(task); | |||
| 739 | ||||
| 740 | return KERN_SUCCESS0; | |||
| 741 | } | |||
| 742 | ||||
| 743 | kern_return_t task_info( | |||
| 744 | task_t task, | |||
| 745 | int flavor, | |||
| 746 | task_info_t task_info_out, /* pointer to OUT array */ | |||
| 747 | natural_t *task_info_count) /* IN/OUT */ | |||
| 748 | { | |||
| 749 | vm_map_t map; | |||
| 750 | ||||
| 751 | if (task == TASK_NULL((task_t) 0)) | |||
| 752 | return KERN_INVALID_ARGUMENT4; | |||
| 753 | ||||
| 754 | switch (flavor) { | |||
| 755 | case TASK_BASIC_INFO1: | |||
| 756 | { | |||
| 757 | task_basic_info_t basic_info; | |||
| 758 | ||||
| 759 | /* Allow *task_info_count to be two words smaller than | |||
| 760 | the usual amount, because creation_time is a new member | |||
| 761 | that some callers might not know about. */ | |||
| 762 | ||||
| 763 | if (*task_info_count < TASK_BASIC_INFO_COUNT(sizeof(task_basic_info_data_t) / sizeof(natural_t)) - 2) { | |||
| 764 | return KERN_INVALID_ARGUMENT4; | |||
| 765 | } | |||
| 766 | ||||
| 767 | basic_info = (task_basic_info_t) task_info_out; | |||
| 768 | ||||
| 769 | map = (task == kernel_task) ? kernel_map : task->map; | |||
| 770 | ||||
| 771 | basic_info->virtual_size = map->size; | |||
| 772 | basic_info->resident_size = pmap_resident_count(map->pmap)((map->pmap)->stats.resident_count) | |||
| 773 | * PAGE_SIZE(1 << 12); | |||
| 774 | ||||
| 775 | task_lock(task); | |||
| 776 | basic_info->base_priority = task->priority; | |||
| 777 | basic_info->suspend_count = task->user_stop_count; | |||
| 778 | basic_info->user_time.seconds | |||
| 779 | = task->total_user_time.seconds; | |||
| 780 | basic_info->user_time.microseconds | |||
| 781 | = task->total_user_time.microseconds; | |||
| 782 | basic_info->system_time.seconds | |||
| 783 | = task->total_system_time.seconds; | |||
| 784 | basic_info->system_time.microseconds | |||
| 785 | = task->total_system_time.microseconds; | |||
| 786 | basic_info->creation_time = task->creation_time; | |||
| 787 | task_unlock(task)((void)(&(task)->lock)); | |||
| 788 | ||||
| 789 | if (*task_info_count > TASK_BASIC_INFO_COUNT(sizeof(task_basic_info_data_t) / sizeof(natural_t))) | |||
| 790 | *task_info_count = TASK_BASIC_INFO_COUNT(sizeof(task_basic_info_data_t) / sizeof(natural_t)); | |||
| 791 | break; | |||
| 792 | } | |||
| 793 | ||||
| 794 | case TASK_EVENTS_INFO2: | |||
| 795 | { | |||
| 796 | task_events_info_t event_info; | |||
| 797 | ||||
| 798 | if (*task_info_count < TASK_EVENTS_INFO_COUNT(sizeof(task_events_info_data_t) / sizeof(natural_t))) { | |||
| 799 | return KERN_INVALID_ARGUMENT4; | |||
| 800 | } | |||
| 801 | ||||
| 802 | event_info = (task_events_info_t) task_info_out; | |||
| 803 | ||||
| 804 | task_lock(task); | |||
| 805 | event_info->faults = task->faults; | |||
| 806 | event_info->zero_fills = task->zero_fills; | |||
| 807 | event_info->reactivations = task->reactivations; | |||
| 808 | event_info->pageins = task->pageins; | |||
| 809 | event_info->cow_faults = task->cow_faults; | |||
| 810 | event_info->messages_sent = task->messages_sent; | |||
| 811 | event_info->messages_received = task->messages_received; | |||
| 812 | task_unlock(task)((void)(&(task)->lock)); | |||
| 813 | ||||
| 814 | *task_info_count = TASK_EVENTS_INFO_COUNT(sizeof(task_events_info_data_t) / sizeof(natural_t)); | |||
| 815 | break; | |||
| 816 | } | |||
| 817 | ||||
| 818 | case TASK_THREAD_TIMES_INFO3: | |||
| 819 | { | |||
| 820 | task_thread_times_info_t times_info; | |||
| 821 | thread_t thread; | |||
| 822 | ||||
| 823 | if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT(sizeof(task_thread_times_info_data_t) / sizeof(natural_t))) { | |||
| 824 | return KERN_INVALID_ARGUMENT4; | |||
| 825 | } | |||
| 826 | ||||
| 827 | times_info = (task_thread_times_info_t) task_info_out; | |||
| 828 | times_info->user_time.seconds = 0; | |||
| 829 | times_info->user_time.microseconds = 0; | |||
| 830 | times_info->system_time.seconds = 0; | |||
| 831 | times_info->system_time.microseconds = 0; | |||
| 832 | ||||
| 833 | task_lock(task); | |||
| 834 | queue_iterate(&task->thread_list, thread,for ((thread) = (thread_t) ((&task->thread_list)->next ); !(((&task->thread_list)) == ((queue_entry_t)(thread ))); (thread) = (thread_t) ((&(thread)->thread_list)-> next)) | |||
| 835 | thread_t, thread_list)for ((thread) = (thread_t) ((&task->thread_list)->next ); !(((&task->thread_list)) == ((queue_entry_t)(thread ))); (thread) = (thread_t) ((&(thread)->thread_list)-> next)) | |||
| 836 | { | |||
| 837 | time_value_t user_time, system_time; | |||
| 838 | spl_t s; | |||
| 839 | ||||
| 840 | s = splsched(); | |||
| 841 | thread_lock(thread); | |||
| 842 | ||||
| 843 | thread_read_times(thread, &user_time, &system_time); | |||
| 844 | ||||
| 845 | thread_unlock(thread)((void)(&(thread)->lock)); | |||
| 846 | splx(s); | |||
| 847 | ||||
| 848 | time_value_add(×_info->user_time, &user_time){ (×_info->user_time)->microseconds += (&user_time )->microseconds; (×_info->user_time)->seconds += (&user_time)->seconds; if ((×_info->user_time )->microseconds >= (1000000)) { (×_info->user_time )->microseconds -= (1000000); (×_info->user_time )->seconds++; } }; | |||
| 849 | time_value_add(×_info->system_time, &system_time){ (×_info->system_time)->microseconds += (& system_time)->microseconds; (×_info->system_time )->seconds += (&system_time)->seconds; if ((×_info ->system_time)->microseconds >= (1000000)) { (×_info ->system_time)->microseconds -= (1000000); (×_info ->system_time)->seconds++; } }; | |||
| 850 | } | |||
| 851 | task_unlock(task)((void)(&(task)->lock)); | |||
| 852 | ||||
| 853 | *task_info_count = TASK_THREAD_TIMES_INFO_COUNT(sizeof(task_thread_times_info_data_t) / sizeof(natural_t)); | |||
| 854 | break; | |||
| 855 | } | |||
| 856 | ||||
| 857 | default: | |||
| 858 | return KERN_INVALID_ARGUMENT4; | |||
| 859 | } | |||
| 860 | ||||
| 861 | return KERN_SUCCESS0; | |||
| 862 | } | |||
| 863 | ||||
| 864 | #if MACH_HOST0 | |||
| 865 | /* | |||
| 866 | * task_assign: | |||
| 867 | * | |||
| 868 | * Change the assigned processor set for the task | |||
| 869 | */ | |||
| 870 | kern_return_t | |||
| 871 | task_assign( | |||
| 872 | task_t task, | |||
| 873 | processor_set_t new_pset, | |||
| 874 | boolean_t assign_threads) | |||
| 875 | { | |||
| 876 | kern_return_t ret = KERN_SUCCESS0; | |||
| 877 | thread_t thread, prev_thread; | |||
| 878 | queue_head_t *list; | |||
| 879 | processor_set_t pset; | |||
| 880 | ||||
| 881 | if (task == TASK_NULL((task_t) 0) || new_pset == PROCESSOR_SET_NULL((processor_set_t) 0)) { | |||
| 882 | return KERN_INVALID_ARGUMENT4; | |||
| 883 | } | |||
| 884 | ||||
| 885 | /* | |||
| 886 | * Freeze task`s assignment. Prelude to assigning | |||
| 887 | * task. Only one freeze may be held per task. | |||
| 888 | */ | |||
| 889 | ||||
| 890 | task_lock(task); | |||
| 891 | while (task->may_assign == FALSE((boolean_t) 0)) { | |||
| 892 | task->assign_active = TRUE((boolean_t) 1); | |||
| 893 | assert_wait((event_t)&task->assign_active, TRUE((boolean_t) 1)); | |||
| 894 | task_unlock(task)((void)(&(task)->lock)); | |||
| 895 | thread_block((void (*)()) 0); | |||
| 896 | task_lock(task); | |||
| 897 | } | |||
| 898 | ||||
| 899 | /* | |||
| 900 | * Avoid work if task already in this processor set. | |||
| 901 | */ | |||
| 902 | if (task->processor_set == new_pset) { | |||
| 903 | /* | |||
| 904 | * No need for task->assign_active wakeup: | |||
| 905 | * task->may_assign is still TRUE. | |||
| 906 | */ | |||
| 907 | task_unlock(task)((void)(&(task)->lock)); | |||
| 908 | return KERN_SUCCESS0; | |||
| 909 | } | |||
| 910 | ||||
| 911 | task->may_assign = FALSE((boolean_t) 0); | |||
| 912 | task_unlock(task)((void)(&(task)->lock)); | |||
| 913 | ||||
| 914 | /* | |||
| 915 | * Safe to get the task`s pset: it cannot change while | |||
| 916 | * task is frozen. | |||
| 917 | */ | |||
| 918 | pset = task->processor_set; | |||
| 919 | ||||
| 920 | /* | |||
| 921 | * Lock both psets now. Use ordering to avoid deadlock. | |||
| 922 | */ | |||
| 923 | Restart: | |||
| 924 | if ((vm_offset_t) pset < (vm_offset_t) new_pset) { | |||
| 925 | pset_lock(pset); | |||
| 926 | pset_lock(new_pset); | |||
| 927 | } | |||
| 928 | else { | |||
| 929 | pset_lock(new_pset); | |||
| 930 | pset_lock(pset); | |||
| 931 | } | |||
| 932 | ||||
| 933 | /* | |||
| 934 | * Check if new_pset is ok to assign to. If not, | |||
| 935 | * reassign to default_pset. | |||
| 936 | */ | |||
| 937 | if (!new_pset->active) { | |||
| 938 | pset_unlock(pset)((void)(&(pset)->lock)); | |||
| 939 | pset_unlock(new_pset)((void)(&(new_pset)->lock)); | |||
| 940 | new_pset = &default_pset; | |||
| 941 | goto Restart; | |||
| 942 | } | |||
| 943 | ||||
| 944 | pset_reference(new_pset); | |||
| 945 | ||||
| 946 | /* | |||
| 947 | * Now grab the task lock and move the task. | |||
| 948 | */ | |||
| 949 | ||||
| 950 | task_lock(task); | |||
| 951 | pset_remove_task(pset, task); | |||
| 952 | pset_add_task(new_pset, task); | |||
| 953 | ||||
| 954 | pset_unlock(pset)((void)(&(pset)->lock)); | |||
| 955 | pset_unlock(new_pset)((void)(&(new_pset)->lock)); | |||
| 956 | ||||
| 957 | if (assign_threads == FALSE((boolean_t) 0)) { | |||
| 958 | /* | |||
| 959 | * We leave existing threads at their | |||
| 960 | * old assignments. Unfreeze task`s | |||
| 961 | * assignment. | |||
| 962 | */ | |||
| 963 | task->may_assign = TRUE((boolean_t) 1); | |||
| 964 | if (task->assign_active) { | |||
| 965 | task->assign_active = FALSE((boolean_t) 0); | |||
| 966 | thread_wakeup((event_t) &task->assign_active)thread_wakeup_prim(((event_t) &task->assign_active), ( (boolean_t) 0), 0); | |||
| 967 | } | |||
| 968 | task_unlock(task)((void)(&(task)->lock)); | |||
| 969 | pset_deallocate(pset); | |||
| 970 | return KERN_SUCCESS0; | |||
| 971 | } | |||
| 972 | ||||
| 973 | /* | |||
| 974 | * If current thread is in task, freeze its assignment. | |||
| 975 | */ | |||
| 976 | if (current_thread()(active_threads[(0)])->task == task) { | |||
| 977 | task_unlock(task)((void)(&(task)->lock)); | |||
| 978 | thread_freeze(current_thread()(active_threads[(0)])); | |||
| 979 | task_lock(task); | |||
| 980 | } | |||
| 981 | ||||
| 982 | /* | |||
| 983 | * Iterate down the thread list reassigning all the threads. | |||
| 984 | * New threads pick up task's new processor set automatically. | |||
| 985 | * Do current thread last because new pset may be empty. | |||
| 986 | */ | |||
| 987 | list = &task->thread_list; | |||
| 988 | prev_thread = THREAD_NULL((thread_t) 0); | |||
| 989 | queue_iterate(list, thread, thread_t, thread_list)for ((thread) = (thread_t) ((list)->next); !(((list)) == ( (queue_entry_t)(thread))); (thread) = (thread_t) ((&(thread )->thread_list)->next)) { | |||
| 990 | if (!(task->active)) { | |||
| 991 | ret = KERN_FAILURE5; | |||
| 992 | break; | |||
| 993 | } | |||
| 994 | if (thread != current_thread()(active_threads[(0)])) { | |||
| 995 | thread_reference(thread); | |||
| 996 | task_unlock(task)((void)(&(task)->lock)); | |||
| 997 | if (prev_thread != THREAD_NULL((thread_t) 0)) | |||
| 998 | thread_deallocate(prev_thread); /* may block */ | |||
| 999 | thread_assign(thread,new_pset); /* may block */ | |||
| 1000 | prev_thread = thread; | |||
| 1001 | task_lock(task); | |||
| 1002 | } | |||
| 1003 | } | |||
| 1004 | ||||
| 1005 | /* | |||
| 1006 | * Done, wakeup anyone waiting for us. | |||
| 1007 | */ | |||
| 1008 | task->may_assign = TRUE((boolean_t) 1); | |||
| 1009 | if (task->assign_active) { | |||
| 1010 | task->assign_active = FALSE((boolean_t) 0); | |||
| 1011 | thread_wakeup((event_t)&task->assign_active)thread_wakeup_prim(((event_t)&task->assign_active), (( boolean_t) 0), 0); | |||
| 1012 | } | |||
| 1013 | task_unlock(task)((void)(&(task)->lock)); | |||
| 1014 | if (prev_thread != THREAD_NULL((thread_t) 0)) | |||
| 1015 | thread_deallocate(prev_thread); /* may block */ | |||
| 1016 | ||||
| 1017 | /* | |||
| 1018 | * Finish assignment of current thread. | |||
| 1019 | */ | |||
| 1020 | if (current_thread()(active_threads[(0)])->task == task) | |||
| 1021 | thread_doassign(current_thread()(active_threads[(0)]), new_pset, TRUE((boolean_t) 1)); | |||
| 1022 | ||||
| 1023 | pset_deallocate(pset); | |||
| 1024 | ||||
| 1025 | return ret; | |||
| 1026 | } | |||
| 1027 | #else /* MACH_HOST */ | |||
| 1028 | /* | |||
| 1029 | * task_assign: | |||
| 1030 | * | |||
| 1031 | * Change the assigned processor set for the task | |||
| 1032 | */ | |||
| 1033 | kern_return_t | |||
| 1034 | task_assign( | |||
| 1035 | task_t task, | |||
| 1036 | processor_set_t new_pset, | |||
| 1037 | boolean_t assign_threads) | |||
| 1038 | { | |||
| 1039 | return KERN_FAILURE5; | |||
| 1040 | } | |||
| 1041 | #endif /* MACH_HOST */ | |||
| 1042 | ||||
| 1043 | ||||
| 1044 | /* | |||
| 1045 | * task_assign_default: | |||
| 1046 | * | |||
| 1047 | * Version of task_assign to assign to default processor set. | |||
| 1048 | */ | |||
| 1049 | kern_return_t | |||
| 1050 | task_assign_default( | |||
| 1051 | task_t task, | |||
| 1052 | boolean_t assign_threads) | |||
| 1053 | { | |||
| 1054 | return task_assign(task, &default_pset, assign_threads); | |||
| 1055 | } | |||
| 1056 | ||||
| 1057 | /* | |||
| 1058 | * task_get_assignment | |||
| 1059 | * | |||
| 1060 | * Return name of processor set that task is assigned to. | |||
| 1061 | */ | |||
| 1062 | kern_return_t task_get_assignment( | |||
| 1063 | task_t task, | |||
| 1064 | processor_set_t *pset) | |||
| 1065 | { | |||
| 1066 | if (task == TASK_NULL((task_t) 0)) | |||
| 1067 | return KERN_INVALID_ARGUMENT4; | |||
| 1068 | ||||
| 1069 | if (!task->active) | |||
| 1070 | return KERN_FAILURE5; | |||
| 1071 | ||||
| 1072 | *pset = task->processor_set; | |||
| 1073 | pset_reference(*pset); | |||
| 1074 | return KERN_SUCCESS0; | |||
| 1075 | } | |||
| 1076 | ||||
| 1077 | /* | |||
| 1078 | * task_priority | |||
| 1079 | * | |||
| 1080 | * Set priority of task; used only for newly created threads. | |||
| 1081 | * Optionally change priorities of threads. | |||
| 1082 | */ | |||
| 1083 | kern_return_t | |||
| 1084 | task_priority( | |||
| 1085 | task_t task, | |||
| 1086 | int priority, | |||
| 1087 | boolean_t change_threads) | |||
| 1088 | { | |||
| 1089 | kern_return_t ret = KERN_SUCCESS0; | |||
| 1090 | ||||
| 1091 | if (task == TASK_NULL((task_t) 0) || invalid_pri(priority)(((priority) < 0) || ((priority) >= 50))) | |||
| 1092 | return KERN_INVALID_ARGUMENT4; | |||
| 1093 | ||||
| 1094 | task_lock(task); | |||
| 1095 | task->priority = priority; | |||
| 1096 | ||||
| 1097 | if (change_threads) { | |||
| 1098 | thread_t thread; | |||
| 1099 | queue_head_t *list; | |||
| 1100 | ||||
| 1101 | list = &task->thread_list; | |||
| 1102 | queue_iterate(list, thread, thread_t, thread_list)for ((thread) = (thread_t) ((list)->next); !(((list)) == ( (queue_entry_t)(thread))); (thread) = (thread_t) ((&(thread )->thread_list)->next)) { | |||
| 1103 | if (thread_priority(thread, priority, FALSE((boolean_t) 0)) | |||
| 1104 | != KERN_SUCCESS0) | |||
| 1105 | ret = KERN_FAILURE5; | |||
| 1106 | } | |||
| 1107 | } | |||
| 1108 | ||||
| 1109 | task_unlock(task)((void)(&(task)->lock)); | |||
| 1110 | return ret; | |||
| 1111 | } | |||
| 1112 | ||||
| 1113 | /* | |||
| 1114 | * task_set_name | |||
| 1115 | * | |||
| 1116 | * Set the name of task TASK to NAME. This is a debugging aid. | |||
| 1117 | * NAME will be used in error messages printed by the kernel. | |||
| 1118 | */ | |||
| 1119 | kern_return_t | |||
| 1120 | task_set_name( | |||
| 1121 | task_t task, | |||
| 1122 | kernel_debug_name_t name) | |||
| 1123 | { | |||
| 1124 | strncpy(task->name, name, sizeof task->name - 1); | |||
| 1125 | task->name[sizeof task->name - 1] = '\0'; | |||
| 1126 | return KERN_SUCCESS0; | |||
| 1127 | } | |||
| 1128 | ||||
| 1129 | /* | |||
| 1130 | * task_collect_scan: | |||
| 1131 | * | |||
| 1132 | * Attempt to free resources owned by tasks. | |||
| 1133 | */ | |||
| 1134 | ||||
| 1135 | void task_collect_scan(void) | |||
| 1136 | { | |||
| 1137 | task_t task, prev_task; | |||
| 1138 | processor_set_t pset, prev_pset; | |||
| 1139 | ||||
| 1140 | prev_task = TASK_NULL((task_t) 0); | |||
| 1141 | prev_pset = PROCESSOR_SET_NULL((processor_set_t) 0); | |||
| 1142 | ||||
| 1143 | simple_lock(&all_psets_lock); | |||
| 1144 | queue_iterate(&all_psets, pset, processor_set_t, all_psets)for ((pset) = (processor_set_t) ((&all_psets)->next); ! (((&all_psets)) == ((queue_entry_t)(pset))); (pset) = (processor_set_t ) ((&(pset)->all_psets)->next)) { | |||
| 1145 | pset_lock(pset); | |||
| 1146 | queue_iterate(&pset->tasks, task, task_t, pset_tasks)for ((task) = (task_t) ((&pset->tasks)->next); !((( &pset->tasks)) == ((queue_entry_t)(task))); (task) = ( task_t) ((&(task)->pset_tasks)->next)) { | |||
| 1147 | task_reference(task); | |||
| 1148 | pset_reference(pset); | |||
| 1149 | pset_unlock(pset)((void)(&(pset)->lock)); | |||
| 1150 | simple_unlock(&all_psets_lock)((void)(&all_psets_lock)); | |||
| 1151 | ||||
| 1152 | machine_task_collect (task); | |||
| 1153 | pmap_collect(task->map->pmap); | |||
| ||||
| 1154 | ||||
| 1155 | if (prev_task != TASK_NULL((task_t) 0)) | |||
| 1156 | task_deallocate(prev_task); | |||
| 1157 | prev_task = task; | |||
| 1158 | ||||
| 1159 | if (prev_pset != PROCESSOR_SET_NULL((processor_set_t) 0)) | |||
| 1160 | pset_deallocate(prev_pset); | |||
| 1161 | prev_pset = pset; | |||
| 1162 | ||||
| 1163 | simple_lock(&all_psets_lock); | |||
| 1164 | pset_lock(pset); | |||
| 1165 | } | |||
| 1166 | pset_unlock(pset)((void)(&(pset)->lock)); | |||
| 1167 | } | |||
| 1168 | simple_unlock(&all_psets_lock)((void)(&all_psets_lock)); | |||
| 1169 | ||||
| 1170 | if (prev_task != TASK_NULL((task_t) 0)) | |||
| 1171 | task_deallocate(prev_task); | |||
| 1172 | if (prev_pset != PROCESSOR_SET_NULL((processor_set_t) 0)) | |||
| 1173 | pset_deallocate(prev_pset); | |||
| 1174 | } | |||
| 1175 | ||||
| 1176 | boolean_t task_collect_allowed = TRUE((boolean_t) 1); | |||
| 1177 | unsigned task_collect_last_tick = 0; | |||
| 1178 | unsigned task_collect_max_rate = 0; /* in ticks */ | |||
| 1179 | ||||
| 1180 | /* | |||
| 1181 | * consider_task_collect: | |||
| 1182 | * | |||
| 1183 | * Called by the pageout daemon when the system needs more free pages. | |||
| 1184 | */ | |||
| 1185 | ||||
| 1186 | void consider_task_collect(void) | |||
| 1187 | { | |||
| 1188 | /* | |||
| 1189 | * By default, don't attempt task collection more frequently | |||
| 1190 | * than once a second. | |||
| 1191 | */ | |||
| 1192 | ||||
| 1193 | if (task_collect_max_rate == 0) | |||
| ||||
| 1194 | task_collect_max_rate = hz; | |||
| 1195 | ||||
| 1196 | if (task_collect_allowed && | |||
| 1197 | (sched_tick > (task_collect_last_tick + task_collect_max_rate))) { | |||
| 1198 | task_collect_last_tick = sched_tick; | |||
| 1199 | task_collect_scan(); | |||
| 1200 | } | |||
| 1201 | } | |||
| 1202 | ||||
| 1203 | kern_return_t | |||
| 1204 | task_ras_control( | |||
| 1205 | task_t task, | |||
| 1206 | vm_offset_t pc, | |||
| 1207 | vm_offset_t endpc, | |||
| 1208 | int flavor) | |||
| 1209 | { | |||
| 1210 | kern_return_t ret = KERN_FAILURE5; | |||
| 1211 | ||||
| 1212 | #if FAST_TAS0 | |||
| 1213 | int i; | |||
| 1214 | ||||
| 1215 | ret = KERN_SUCCESS0; | |||
| 1216 | task_lock(task); | |||
| 1217 | switch (flavor) { | |||
| 1218 | case TASK_RAS_CONTROL_PURGE_ALL0: /* remove all RAS */ | |||
| 1219 | for (i = 0; i < TASK_FAST_TAS_NRAS; i++) { | |||
| 1220 | task->fast_tas_base[i] = task->fast_tas_end[i] = 0; | |||
| 1221 | } | |||
| 1222 | break; | |||
| 1223 | case TASK_RAS_CONTROL_PURGE_ONE1: /* remove this RAS, collapse remaining */ | |||
| 1224 | for (i = 0; i < TASK_FAST_TAS_NRAS; i++) { | |||
| 1225 | if ( (task->fast_tas_base[i] == pc) | |||
| 1226 | && (task->fast_tas_end[i] == endpc)) { | |||
| 1227 | while (i < TASK_FAST_TAS_NRAS-1) { | |||
| 1228 | task->fast_tas_base[i] = task->fast_tas_base[i+1]; | |||
| 1229 | task->fast_tas_end[i] = task->fast_tas_end[i+1]; | |||
| 1230 | i++; | |||
| 1231 | } | |||
| 1232 | task->fast_tas_base[TASK_FAST_TAS_NRAS-1] = 0; | |||
| 1233 | task->fast_tas_end[TASK_FAST_TAS_NRAS-1] = 0; | |||
| 1234 | break; | |||
| 1235 | } | |||
| 1236 | } | |||
| 1237 | if (i == TASK_FAST_TAS_NRAS) { | |||
| 1238 | ret = KERN_INVALID_ADDRESS1; | |||
| 1239 | } | |||
| 1240 | break; | |||
| 1241 | case TASK_RAS_CONTROL_PURGE_ALL_AND_INSTALL_ONE2: | |||
| 1242 | /* remove all RAS an install this RAS */ | |||
| 1243 | for (i = 0; i < TASK_FAST_TAS_NRAS; i++) { | |||
| 1244 | task->fast_tas_base[i] = task->fast_tas_end[i] = 0; | |||
| 1245 | } | |||
| 1246 | /* FALL THROUGH */ | |||
| 1247 | case TASK_RAS_CONTROL_INSTALL_ONE3: /* install this RAS */ | |||
| 1248 | for (i = 0; i < TASK_FAST_TAS_NRAS; i++) { | |||
| 1249 | if ( (task->fast_tas_base[i] == pc) | |||
| 1250 | && (task->fast_tas_end[i] == endpc)) { | |||
| 1251 | /* already installed */ | |||
| 1252 | break; | |||
| 1253 | } | |||
| 1254 | if ((task->fast_tas_base[i] == 0) && (task->fast_tas_end[i] == 0)){ | |||
| 1255 | task->fast_tas_base[i] = pc; | |||
| 1256 | task->fast_tas_end[i] = endpc; | |||
| 1257 | break; | |||
| 1258 | } | |||
| 1259 | } | |||
| 1260 | if (i == TASK_FAST_TAS_NRAS) { | |||
| 1261 | ret = KERN_RESOURCE_SHORTAGE6; | |||
| 1262 | } | |||
| 1263 | break; | |||
| 1264 | default: ret = KERN_INVALID_VALUE18; | |||
| 1265 | break; | |||
| 1266 | } | |||
| 1267 | task_unlock(task)((void)(&(task)->lock)); | |||
| 1268 | #endif /* FAST_TAS */ | |||
| 1269 | return ret; | |||
| 1270 | } | |||
| 1271 | ||||
| 1272 | /* | |||
| 1273 | * register_new_task_notification | |||
| 1274 | * | |||
| 1275 | * Register a port to which a notification about newly created | |||
| 1276 | * tasks are sent. | |||
| 1277 | */ | |||
| 1278 | kern_return_t | |||
| 1279 | register_new_task_notification( | |||
| 1280 | const host_t host, | |||
| 1281 | ipc_port_t notification) | |||
| 1282 | { | |||
| 1283 | if (host == HOST_NULL((host_t)0)) | |||
| 1284 | return KERN_INVALID_HOST22; | |||
| 1285 | ||||
| 1286 | if (new_task_notification != NULL((void *) 0)) | |||
| 1287 | return KERN_NO_ACCESS8; | |||
| 1288 | ||||
| 1289 | new_task_notification = notification; | |||
| 1290 | return KERN_SUCCESS0; | |||
| 1291 | } |