Bug Summary

File:obj-scan-build/../kern/processor.c
Location:line 922, column 4
Description:Array access (from variable 'threads') results in a null pointer dereference

Annotated Source Code

1/*
2 * Mach Operating System
3 * Copyright (c) 1993-1988 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26/*
27 * processor.c: processor and processor_set manipulation routines.
28 */
29
30#include <string.h>
31
32#include <mach/boolean.h>
33#include <mach/policy.h>
34#include <mach/processor_info.h>
35#include <mach/vm_param.h>
36#include <kern/cpu_number.h>
37#include <kern/debug.h>
38#include <kern/kalloc.h>
39#include <kern/lock.h>
40#include <kern/host.h>
41#include <kern/ipc_tt.h>
42#include <kern/processor.h>
43#include <kern/sched.h>
44#include <kern/task.h>
45#include <kern/thread.h>
46#include <kern/ipc_host.h>
47#include <ipc/ipc_port.h>
48
49#if MACH_HOST0
50#include <kern/slab.h>
51struct kmem_cache pset_cache;
52#endif /* MACH_HOST */
53
54
55/*
56 * Exported variables.
57 */
58struct processor_set default_pset;
59struct processor processor_array[NCPUS1];
60
61queue_head_t all_psets;
62int all_psets_count;
63decl_simple_lock_data(, all_psets_lock)struct simple_lock_data_empty all_psets_lock;;
64
65processor_t master_processor;
66processor_t processor_ptr[NCPUS1];
67
68/*
69 * Bootstrap the processor/pset system so the scheduler can run.
70 */
71void pset_sys_bootstrap(void)
72{
73 int i;
74
75 pset_init(&default_pset);
76 default_pset.empty = FALSE((boolean_t) 0);
77 for (i = 0; i < NCPUS1; i++) {
78 /*
79 * Initialize processor data structures.
80 * Note that cpu_to_processor(i) is processor_ptr[i].
81 */
82 processor_ptr[i] = &processor_array[i];
83 processor_init(processor_ptr[i], i);
84 }
85 master_processor = cpu_to_processor(master_cpu)(processor_ptr[master_cpu]);
86 queue_init(&all_psets)((&all_psets)->next = (&all_psets)->prev = &
all_psets)
;
87 simple_lock_init(&all_psets_lock);
88 queue_enter(&all_psets, &default_pset, processor_set_t, all_psets){ queue_entry_t prev; prev = (&all_psets)->prev; if ((
&all_psets) == prev) { (&all_psets)->next = (queue_entry_t
) (&default_pset); } else { ((processor_set_t)prev)->all_psets
.next = (queue_entry_t)(&default_pset); } (&default_pset
)->all_psets.prev = prev; (&default_pset)->all_psets
.next = &all_psets; (&all_psets)->prev = (queue_entry_t
) &default_pset; }
;
89 all_psets_count = 1;
90 default_pset.active = TRUE((boolean_t) 1);
91 default_pset.empty = FALSE((boolean_t) 0);
92
93 /*
94 * Note: the default_pset has a max_priority of BASEPRI_USER.
95 * Internal kernel threads override this in kernel_thread.
96 */
97}
98
99#if MACH_HOST0
100/*
101 * Rest of pset system initializations.
102 */
103void pset_sys_init(void)
104{
105 int i;
106 processor_t processor;
107
108 /*
109 * Allocate the cache for processor sets.
110 */
111 kmem_cache_init(&pset_cache, "processor_set",
112 sizeof(struct processor_set), 0, NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0);
113
114 /*
115 * Give each processor a control port.
116 * The master processor already has one.
117 */
118 for (i = 0; i < NCPUS1; i++) {
119 processor = cpu_to_processor(i)(processor_ptr[i]);
120 if (processor != master_processor &&
121 machine_slot[i].is_cpu)
122 {
123 ipc_processor_init(processor);
124 }
125 }
126}
127#endif /* MACH_HOST */
128
129/*
130 * Initialize the given processor_set structure.
131 */
132
133void pset_init(
134 processor_set_t pset)
135{
136 int i;
137
138 simple_lock_init(&pset->runq.lock);
139 pset->runq.low = 0;
140 pset->runq.count = 0;
141 for (i = 0; i < NRQS50; i++) {
142 queue_init(&(pset->runq.runq[i]))((&(pset->runq.runq[i]))->next = (&(pset->runq
.runq[i]))->prev = &(pset->runq.runq[i]))
;
143 }
144 queue_init(&pset->idle_queue)((&pset->idle_queue)->next = (&pset->idle_queue
)->prev = &pset->idle_queue)
;
145 pset->idle_count = 0;
146 simple_lock_init(&pset->idle_lock);
147 queue_init(&pset->processors)((&pset->processors)->next = (&pset->processors
)->prev = &pset->processors)
;
148 pset->processor_count = 0;
149 pset->empty = TRUE((boolean_t) 1);
150 queue_init(&pset->tasks)((&pset->tasks)->next = (&pset->tasks)->prev
= &pset->tasks)
;
151 pset->task_count = 0;
152 queue_init(&pset->threads)((&pset->threads)->next = (&pset->threads)->
prev = &pset->threads)
;
153 pset->thread_count = 0;
154 pset->ref_count = 1;
155 simple_lock_init(&pset->ref_lock);
156 queue_init(&pset->all_psets)((&pset->all_psets)->next = (&pset->all_psets
)->prev = &pset->all_psets)
;
157 pset->active = FALSE((boolean_t) 0);
158 simple_lock_init(&pset->lock);
159 pset->pset_self = IP_NULL((ipc_port_t) ((ipc_object_t) 0));
160 pset->pset_name_self = IP_NULL((ipc_port_t) ((ipc_object_t) 0));
161 pset->max_priority = BASEPRI_USER25;
162#if MACH_FIXPRI1
163 pset->policies = POLICY_TIMESHARE1;
164#endif /* MACH_FIXPRI */
165 pset->set_quantum = min_quantum;
166#if NCPUS1 > 1
167 pset->quantum_adj_index = 0;
168 simple_lock_init(&pset->quantum_adj_lock);
169
170 for (i = 0; i <= NCPUS1; i++) {
171 pset->machine_quantum[i] = min_quantum;
172 }
173#endif /* NCPUS > 1 */
174 pset->mach_factor = 0;
175 pset->load_average = 0;
176 pset->sched_load = SCHED_SCALE128; /* i.e. 1 */
177}
178
179/*
180 * Initialize the given processor structure for the processor in
181 * the slot specified by slot_num.
182 */
183
184void processor_init(
185 processor_t pr,
186 int slot_num)
187{
188 int i;
189
190 simple_lock_init(&pr->runq.lock);
191 pr->runq.low = 0;
192 pr->runq.count = 0;
193 for (i = 0; i < NRQS50; i++) {
194 queue_init(&(pr->runq.runq[i]))((&(pr->runq.runq[i]))->next = (&(pr->runq.runq
[i]))->prev = &(pr->runq.runq[i]))
;
195 }
196 queue_init(&pr->processor_queue)((&pr->processor_queue)->next = (&pr->processor_queue
)->prev = &pr->processor_queue)
;
197 pr->state = PROCESSOR_OFF_LINE0;
198 pr->next_thread = THREAD_NULL((thread_t) 0);
199 pr->idle_thread = THREAD_NULL((thread_t) 0);
200 pr->quantum = 0;
201 pr->first_quantum = FALSE((boolean_t) 0);
202 pr->last_quantum = 0;
203 pr->processor_set = PROCESSOR_SET_NULL((processor_set_t) 0);
204 pr->processor_set_next = PROCESSOR_SET_NULL((processor_set_t) 0);
205 queue_init(&pr->processors)((&pr->processors)->next = (&pr->processors)
->prev = &pr->processors)
;
206 simple_lock_init(&pr->lock);
207 pr->processor_self = IP_NULL((ipc_port_t) ((ipc_object_t) 0));
208 pr->slot_num = slot_num;
209}
210
211/*
212 * pset_remove_processor() removes a processor from a processor_set.
213 * It can only be called on the current processor. Caller must
214 * hold lock on current processor and processor set.
215 */
216
217void pset_remove_processor(
218 processor_set_t pset,
219 processor_t processor)
220{
221 if (pset != processor->processor_set)
222 panic("pset_remove_processor: wrong pset");
223
224 queue_remove(&pset->processors, processor, processor_t, processors){ queue_entry_t next, prev; next = (processor)->processors
.next; prev = (processor)->processors.prev; if ((&pset
->processors) == next) (&pset->processors)->prev
= prev; else ((processor_t)next)->processors.prev = prev;
if ((&pset->processors) == prev) (&pset->processors
)->next = next; else ((processor_t)prev)->processors.next
= next; }
;
225 processor->processor_set = PROCESSOR_SET_NULL((processor_set_t) 0);
226 pset->processor_count--;
227 quantum_set(pset);
228}
229
230/*
231 * pset_add_processor() adds a processor to a processor_set.
232 * It can only be called on the current processor. Caller must
233 * hold lock on curent processor and on pset. No reference counting on
234 * processors. Processor reference to pset is implicit.
235 */
236
237void pset_add_processor(
238 processor_set_t pset,
239 processor_t processor)
240{
241 queue_enter(&pset->processors, processor, processor_t, processors){ queue_entry_t prev; prev = (&pset->processors)->prev
; if ((&pset->processors) == prev) { (&pset->processors
)->next = (queue_entry_t) (processor); } else { ((processor_t
)prev)->processors.next = (queue_entry_t)(processor); } (processor
)->processors.prev = prev; (processor)->processors.next
= &pset->processors; (&pset->processors)->prev
= (queue_entry_t) processor; }
;
242 processor->processor_set = pset;
243 pset->processor_count++;
244 quantum_set(pset);
245}
246
247/*
248 * pset_remove_task() removes a task from a processor_set.
249 * Caller must hold locks on pset and task. Pset reference count
250 * is not decremented; caller must explicitly pset_deallocate.
251 */
252
253void pset_remove_task(
254 processor_set_t pset,
255 task_t task)
256{
257 if (pset != task->processor_set)
258 return;
259
260 queue_remove(&pset->tasks, task, task_t, pset_tasks){ queue_entry_t next, prev; next = (task)->pset_tasks.next
; prev = (task)->pset_tasks.prev; if ((&pset->tasks
) == next) (&pset->tasks)->prev = prev; else ((task_t
)next)->pset_tasks.prev = prev; if ((&pset->tasks) ==
prev) (&pset->tasks)->next = next; else ((task_t)prev
)->pset_tasks.next = next; }
;
261 task->processor_set = PROCESSOR_SET_NULL((processor_set_t) 0);
262 pset->task_count--;
263}
264
265/*
266 * pset_add_task() adds a task to a processor_set.
267 * Caller must hold locks on pset and task. Pset references to
268 * tasks are implicit.
269 */
270
271void pset_add_task(
272 processor_set_t pset,
273 task_t task)
274{
275 queue_enter(&pset->tasks, task, task_t, pset_tasks){ queue_entry_t prev; prev = (&pset->tasks)->prev; if
((&pset->tasks) == prev) { (&pset->tasks)->
next = (queue_entry_t) (task); } else { ((task_t)prev)->pset_tasks
.next = (queue_entry_t)(task); } (task)->pset_tasks.prev =
prev; (task)->pset_tasks.next = &pset->tasks; (&
pset->tasks)->prev = (queue_entry_t) task; }
;
276 task->processor_set = pset;
277 pset->task_count++;
278}
279
280/*
281 * pset_remove_thread() removes a thread from a processor_set.
282 * Caller must hold locks on pset and thread. Pset reference count
283 * is not decremented; caller must explicitly pset_deallocate.
284 */
285
286void pset_remove_thread(
287 processor_set_t pset,
288 thread_t thread)
289{
290 queue_remove(&pset->threads, thread, thread_t, pset_threads){ queue_entry_t next, prev; next = (thread)->pset_threads.
next; prev = (thread)->pset_threads.prev; if ((&pset->
threads) == next) (&pset->threads)->prev = prev; else
((thread_t)next)->pset_threads.prev = prev; if ((&pset
->threads) == prev) (&pset->threads)->next = next
; else ((thread_t)prev)->pset_threads.next = next; }
;
291 thread->processor_set = PROCESSOR_SET_NULL((processor_set_t) 0);
292 pset->thread_count--;
293}
294
295/*
296 * pset_add_thread() adds a thread to a processor_set.
297 * Caller must hold locks on pset and thread. Pset references to
298 * threads are implicit.
299 */
300
301void pset_add_thread(
302 processor_set_t pset,
303 thread_t thread)
304{
305 queue_enter(&pset->threads, thread, thread_t, pset_threads){ queue_entry_t prev; prev = (&pset->threads)->prev
; if ((&pset->threads) == prev) { (&pset->threads
)->next = (queue_entry_t) (thread); } else { ((thread_t)prev
)->pset_threads.next = (queue_entry_t)(thread); } (thread)
->pset_threads.prev = prev; (thread)->pset_threads.next
= &pset->threads; (&pset->threads)->prev = (
queue_entry_t) thread; }
;
306 thread->processor_set = pset;
307 pset->thread_count++;
308}
309
310/*
311 * thread_change_psets() changes the pset of a thread. Caller must
312 * hold locks on both psets and thread. The old pset must be
313 * explicitly pset_deallocat()'ed by caller.
314 */
315
316void thread_change_psets(
317 thread_t thread,
318 processor_set_t old_pset,
319 processor_set_t new_pset)
320{
321 queue_remove(&old_pset->threads, thread, thread_t, pset_threads){ queue_entry_t next, prev; next = (thread)->pset_threads.
next; prev = (thread)->pset_threads.prev; if ((&old_pset
->threads) == next) (&old_pset->threads)->prev =
prev; else ((thread_t)next)->pset_threads.prev = prev; if
((&old_pset->threads) == prev) (&old_pset->threads
)->next = next; else ((thread_t)prev)->pset_threads.next
= next; }
;
322 old_pset->thread_count--;
323 queue_enter(&new_pset->threads, thread, thread_t, pset_threads){ queue_entry_t prev; prev = (&new_pset->threads)->
prev; if ((&new_pset->threads) == prev) { (&new_pset
->threads)->next = (queue_entry_t) (thread); } else { (
(thread_t)prev)->pset_threads.next = (queue_entry_t)(thread
); } (thread)->pset_threads.prev = prev; (thread)->pset_threads
.next = &new_pset->threads; (&new_pset->threads
)->prev = (queue_entry_t) thread; }
;
324 thread->processor_set = new_pset;
325 new_pset->thread_count++;
326}
327
328/*
329 * pset_deallocate:
330 *
331 * Remove one reference to the processor set. Destroy processor_set
332 * if this was the last reference.
333 */
334void pset_deallocate(
335 processor_set_t pset)
336{
337 if (pset == PROCESSOR_SET_NULL((processor_set_t) 0))
338 return;
339
340 pset_ref_lock(pset);
341 if (--pset->ref_count > 0) {
342 pset_ref_unlock(pset)((void)(&(pset)->ref_lock));
343 return;
344 }
345#if !MACH_HOST0
346 panic("pset_deallocate: default_pset destroyed");
347#endif /* !MACH_HOST */
348
349#if MACH_HOST0
350 /*
351 * Reference count is zero, however the all_psets list
352 * holds an implicit reference and may make new ones.
353 * Its lock also dominates the pset lock. To check for this,
354 * temporarily restore one reference, and then lock the
355 * other structures in the right order.
356 */
357 pset->ref_count = 1;
358 pset_ref_unlock(pset)((void)(&(pset)->ref_lock));
359
360 simple_lock(&all_psets_lock);
361 pset_ref_lock(pset);
362 if (--pset->ref_count > 0) {
363 /*
364 * Made an extra reference.
365 */
366 pset_ref_unlock(pset)((void)(&(pset)->ref_lock));
367 simple_unlock(&all_psets_lock)((void)(&all_psets_lock));
368 return;
369 }
370
371 /*
372 * Ok to destroy pset. Make a few paranoia checks.
373 */
374
375 if ((pset == &default_pset) || (pset->thread_count > 0) ||
376 (pset->task_count > 0) || pset->processor_count > 0) {
377 panic("pset_deallocate: destroy default or active pset");
378 }
379 /*
380 * Remove from all_psets queue.
381 */
382 queue_remove(&all_psets, pset, processor_set_t, all_psets){ queue_entry_t next, prev; next = (pset)->all_psets.next;
prev = (pset)->all_psets.prev; if ((&all_psets) == next
) (&all_psets)->prev = prev; else ((processor_set_t)next
)->all_psets.prev = prev; if ((&all_psets) == prev) (&
all_psets)->next = next; else ((processor_set_t)prev)->
all_psets.next = next; }
;
383 all_psets_count--;
384
385 pset_ref_unlock(pset)((void)(&(pset)->ref_lock));
386 simple_unlock(&all_psets_lock)((void)(&all_psets_lock));
387
388 /*
389 * That's it, free data structure.
390 */
391 kmem_cache_free(&pset_cache, (vm_offset_t)pset);
392#endif /* MACH_HOST */
393}
394
395/*
396 * pset_reference:
397 *
398 * Add one reference to the processor set.
399 */
400void pset_reference(
401 processor_set_t pset)
402{
403 pset_ref_lock(pset);
404 pset->ref_count++;
405 pset_ref_unlock(pset)((void)(&(pset)->ref_lock));
406}
407
408kern_return_t
409processor_info(
410 processor_t processor,
411 int flavor,
412 host_t *host,
413 processor_info_t info,
414 natural_t *count)
415{
416 int slot_num, state;
417 processor_basic_info_t basic_info;
418
419 if (processor == PROCESSOR_NULL((processor_t) 0))
420 return KERN_INVALID_ARGUMENT4;
421
422 if (flavor != PROCESSOR_BASIC_INFO1 ||
423 *count < PROCESSOR_BASIC_INFO_COUNT(sizeof(processor_basic_info_data_t)/sizeof(integer_t)))
424 return KERN_FAILURE5;
425
426 basic_info = (processor_basic_info_t) info;
427
428 slot_num = processor->slot_num;
429 basic_info->cpu_type = machine_slot[slot_num].cpu_type;
430 basic_info->cpu_subtype = machine_slot[slot_num].cpu_subtype;
431 state = processor->state;
432 if (state == PROCESSOR_SHUTDOWN5 || state == PROCESSOR_OFF_LINE0)
433 basic_info->running = FALSE((boolean_t) 0);
434 else
435 basic_info->running = TRUE((boolean_t) 1);
436 basic_info->slot_num = slot_num;
437 if (processor == master_processor)
438 basic_info->is_master = TRUE((boolean_t) 1);
439 else
440 basic_info->is_master = FALSE((boolean_t) 0);
441
442 *count = PROCESSOR_BASIC_INFO_COUNT(sizeof(processor_basic_info_data_t)/sizeof(integer_t));
443 *host = &realhost;
444 return KERN_SUCCESS0;
445}
446
447kern_return_t processor_start(
448 processor_t processor)
449{
450 if (processor == PROCESSOR_NULL((processor_t) 0))
451 return KERN_INVALID_ARGUMENT4;
452#if NCPUS1 > 1
453 return cpu_start(processor->slot_num);
454#else /* NCPUS > 1 */
455 return KERN_FAILURE5;
456#endif /* NCPUS > 1 */
457}
458
459kern_return_t processor_exit(
460 processor_t processor)
461{
462 if (processor == PROCESSOR_NULL((processor_t) 0))
463 return KERN_INVALID_ARGUMENT4;
464
465#if NCPUS1 > 1
466 return processor_shutdown(processor);
467#else /* NCPUS > 1 */
468 return KERN_FAILURE5;
469#endif /* NCPUS > 1 */
470}
471
472kern_return_t
473processor_control(
474 processor_t processor,
475 processor_info_t info,
476 natural_t count)
477{
478 if (processor == PROCESSOR_NULL((processor_t) 0))
479 return KERN_INVALID_ARGUMENT4;
480
481#if NCPUS1 > 1
482 return cpu_control(processor->slot_num, (int *)info, count);
483#else /* NCPUS > 1 */
484 return KERN_FAILURE5;
485#endif /* NCPUS > 1 */
486}
487
488/*
489 * Precalculate the appropriate system quanta based on load. The
490 * index into machine_quantum is the number of threads on the
491 * processor set queue. It is limited to the number of processors in
492 * the set.
493 */
494
495void quantum_set(
496 processor_set_t pset)
497{
498#if NCPUS1 > 1
499 int i, ncpus;
500
501 ncpus = pset->processor_count;
502
503 for ( i=1 ; i <= ncpus ; i++) {
504 pset->machine_quantum[i] =
505 ((min_quantum * ncpus) + (i/2)) / i ;
506 }
507 pset->machine_quantum[0] = 2 * pset->machine_quantum[1];
508
509 i = ((pset->runq.count > pset->processor_count) ?
510 pset->processor_count : pset->runq.count);
511 pset->set_quantum = pset->machine_quantum[i];
512#else /* NCPUS > 1 */
513 default_pset.set_quantum = min_quantum;
514#endif /* NCPUS > 1 */
515}
516
517#if MACH_HOST0
518/*
519 * processor_set_create:
520 *
521 * Create and return a new processor set.
522 */
523
524kern_return_t
525processor_set_create(
526 host_t host,
527 processor_set_t *new_set,
528 processor_set_t *new_name)
529{
530 processor_set_t pset;
531
532 if (host == HOST_NULL((host_t)0))
533 return KERN_INVALID_ARGUMENT4;
534
535 pset = (processor_set_t) kmem_cache_alloc(&pset_cache);
536 pset_init(pset);
537 pset_reference(pset); /* for new_set out argument */
538 pset_reference(pset); /* for new_name out argument */
539 ipc_pset_init(pset);
540 pset->active = TRUE((boolean_t) 1);
541
542 simple_lock(&all_psets_lock);
543 queue_enter(&all_psets, pset, processor_set_t, all_psets){ queue_entry_t prev; prev = (&all_psets)->prev; if ((
&all_psets) == prev) { (&all_psets)->next = (queue_entry_t
) (pset); } else { ((processor_set_t)prev)->all_psets.next
= (queue_entry_t)(pset); } (pset)->all_psets.prev = prev;
(pset)->all_psets.next = &all_psets; (&all_psets)
->prev = (queue_entry_t) pset; }
;
544 all_psets_count++;
545 simple_unlock(&all_psets_lock)((void)(&all_psets_lock));
546
547 ipc_pset_enable(pset);
548
549 *new_set = pset;
550 *new_name = pset;
551 return KERN_SUCCESS0;
552}
553
554/*
555 * processor_set_destroy:
556 *
557 * destroy a processor set. Any tasks, threads or processors
558 * currently assigned to it are reassigned to the default pset.
559 */
560kern_return_t processor_set_destroy(
561 processor_set_t pset)
562{
563 queue_entry_t elem;
564 queue_head_t *list;
565
566 if (pset == PROCESSOR_SET_NULL((processor_set_t) 0) || pset == &default_pset)
567 return KERN_INVALID_ARGUMENT4;
568
569 /*
570 * Handle multiple termination race. First one through sets
571 * active to FALSE and disables ipc access.
572 */
573 pset_lock(pset);
574 if (!(pset->active)) {
575 pset_unlock(pset)((void)(&(pset)->lock));
576 return KERN_FAILURE5;
577 }
578
579 pset->active = FALSE((boolean_t) 0);
580 ipc_pset_disable(pset);
581
582
583 /*
584 * Now reassign everything in this set to the default set.
585 */
586
587 if (pset->task_count > 0) {
588 list = &pset->tasks;
589 while (!queue_empty(list)(((list)) == (((list)->next)))) {
590 elem = queue_first(list)((list)->next);
591 task_reference((task_t) elem);
592 pset_unlock(pset)((void)(&(pset)->lock));
593 task_assign((task_t) elem, &default_pset, FALSE((boolean_t) 0));
594 task_deallocate((task_t) elem);
595 pset_lock(pset);
596 }
597 }
598
599 if (pset->thread_count > 0) {
600 list = &pset->threads;
601 while (!queue_empty(list)(((list)) == (((list)->next)))) {
602 elem = queue_first(list)((list)->next);
603 thread_reference((thread_t) elem);
604 pset_unlock(pset)((void)(&(pset)->lock));
605 thread_assign((thread_t) elem, &default_pset);
606 thread_deallocate((thread_t) elem);
607 pset_lock(pset);
608 }
609 }
610
611 if (pset->processor_count > 0) {
612 list = &pset->processors;
613 while(!queue_empty(list)(((list)) == (((list)->next)))) {
614 elem = queue_first(list)((list)->next);
615 pset_unlock(pset)((void)(&(pset)->lock));
616 processor_assign((processor_t) elem, &default_pset, TRUE((boolean_t) 1));
617 pset_lock(pset);
618 }
619 }
620
621 pset_unlock(pset)((void)(&(pset)->lock));
622
623 /*
624 * Destroy ipc state.
625 */
626 ipc_pset_terminate(pset);
627
628 /*
629 * Deallocate pset's reference to itself.
630 */
631 pset_deallocate(pset);
632 return KERN_SUCCESS0;
633}
634
635#else /* MACH_HOST */
636
637kern_return_t
638processor_set_create(
639 host_t host,
640 processor_set_t *new_set,
641 processor_set_t *new_name)
642{
643 return KERN_FAILURE5;
644}
645
646kern_return_t processor_set_destroy(
647 processor_set_t pset)
648{
649 return KERN_FAILURE5;
650}
651
652#endif /* MACH_HOST */
653
654kern_return_t
655processor_get_assignment(
656 processor_t processor,
657 processor_set_t *pset)
658{
659 int state;
660 if (processor == PROCESSOR_NULL((processor_t) 0))
661 return KERN_INVALID_ARGUMENT4;
662
663 state = processor->state;
664 if (state == PROCESSOR_SHUTDOWN5 || state == PROCESSOR_OFF_LINE0)
665 return KERN_FAILURE5;
666
667 *pset = processor->processor_set;
668 pset_reference(*pset);
669 return KERN_SUCCESS0;
670}
671
672kern_return_t
673processor_set_info(
674 processor_set_t pset,
675 int flavor,
676 host_t *host,
677 processor_set_info_t info,
678 natural_t *count)
679{
680 if (pset == PROCESSOR_SET_NULL((processor_set_t) 0))
681 return KERN_INVALID_ARGUMENT4;
682
683 if (flavor == PROCESSOR_SET_BASIC_INFO1) {
684 processor_set_basic_info_t basic_info;
685
686 if (*count < PROCESSOR_SET_BASIC_INFO_COUNT(sizeof(processor_set_basic_info_data_t)/sizeof(integer_t)))
687 return KERN_FAILURE5;
688
689 basic_info = (processor_set_basic_info_t) info;
690
691 pset_lock(pset);
692 basic_info->processor_count = pset->processor_count;
693 basic_info->task_count = pset->task_count;
694 basic_info->thread_count = pset->thread_count;
695 basic_info->mach_factor = pset->mach_factor;
696 basic_info->load_average = pset->load_average;
697 pset_unlock(pset)((void)(&(pset)->lock));
698
699 *count = PROCESSOR_SET_BASIC_INFO_COUNT(sizeof(processor_set_basic_info_data_t)/sizeof(integer_t));
700 *host = &realhost;
701 return KERN_SUCCESS0;
702 }
703 else if (flavor == PROCESSOR_SET_SCHED_INFO2) {
704 processor_set_sched_info_t sched_info;
705
706 if (*count < PROCESSOR_SET_SCHED_INFO_COUNT(sizeof(processor_set_sched_info_data_t)/sizeof(integer_t)))
707 return KERN_FAILURE5;
708
709 sched_info = (processor_set_sched_info_t) info;
710
711 pset_lock(pset);
712#if MACH_FIXPRI1
713 sched_info->policies = pset->policies;
714#else /* MACH_FIXPRI */
715 sched_info->policies = POLICY_TIMESHARE1;
716#endif /* MACH_FIXPRI */
717 sched_info->max_priority = pset->max_priority;
718 pset_unlock(pset)((void)(&(pset)->lock));
719
720 *count = PROCESSOR_SET_SCHED_INFO_COUNT(sizeof(processor_set_sched_info_data_t)/sizeof(integer_t));
721 *host = &realhost;
722 return KERN_SUCCESS0;
723 }
724
725 *host = HOST_NULL((host_t)0);
726 return KERN_INVALID_ARGUMENT4;
727}
728
729/*
730 * processor_set_max_priority:
731 *
732 * Specify max priority permitted on processor set. This affects
733 * newly created and assigned threads. Optionally change existing
734 * ones.
735 */
736kern_return_t
737processor_set_max_priority(
738 processor_set_t pset,
739 int max_priority,
740 boolean_t change_threads)
741{
742 if (pset == PROCESSOR_SET_NULL((processor_set_t) 0) || invalid_pri(max_priority)(((max_priority) < 0) || ((max_priority) >= 50)))
743 return KERN_INVALID_ARGUMENT4;
744
745 pset_lock(pset);
746 pset->max_priority = max_priority;
747
748 if (change_threads) {
749 queue_head_t *list;
750 thread_t thread;
751
752 list = &pset->threads;
753 queue_iterate(list, thread, thread_t, pset_threads)for ((thread) = (thread_t) ((list)->next); !(((list)) == (
(queue_entry_t)(thread))); (thread) = (thread_t) ((&(thread
)->pset_threads)->next))
{
754 if (thread->max_priority < max_priority)
755 thread_max_priority(thread, pset, max_priority);
756 }
757 }
758
759 pset_unlock(pset)((void)(&(pset)->lock));
760
761 return KERN_SUCCESS0;
762}
763
764/*
765 * processor_set_policy_enable:
766 *
767 * Allow indicated policy on processor set.
768 */
769
770kern_return_t
771processor_set_policy_enable(
772 processor_set_t pset,
773 int policy)
774{
775 if ((pset == PROCESSOR_SET_NULL((processor_set_t) 0)) || invalid_policy(policy)(((policy) <= 0) || ((policy) > 2)))
776 return KERN_INVALID_ARGUMENT4;
777
778#if MACH_FIXPRI1
779 pset_lock(pset);
780 pset->policies |= policy;
781 pset_unlock(pset)((void)(&(pset)->lock));
782
783 return KERN_SUCCESS0;
784#else /* MACH_FIXPRI */
785 if (policy == POLICY_TIMESHARE1)
786 return KERN_SUCCESS0;
787 else
788 return KERN_FAILURE5;
789#endif /* MACH_FIXPRI */
790}
791
792/*
793 * processor_set_policy_disable:
794 *
795 * Forbid indicated policy on processor set. Time sharing cannot
796 * be forbidden.
797 */
798
799kern_return_t
800processor_set_policy_disable(
801 processor_set_t pset,
802 int policy,
803 boolean_t change_threads)
804{
805 if ((pset == PROCESSOR_SET_NULL((processor_set_t) 0)) || policy == POLICY_TIMESHARE1 ||
806 invalid_policy(policy)(((policy) <= 0) || ((policy) > 2)))
807 return KERN_INVALID_ARGUMENT4;
808
809#if MACH_FIXPRI1
810 pset_lock(pset);
811
812 /*
813 * Check if policy enabled. Disable if so, then handle
814 * change_threads.
815 */
816 if (pset->policies & policy) {
817 pset->policies &= ~policy;
818
819 if (change_threads) {
820 queue_head_t *list;
821 thread_t thread;
822
823 list = &pset->threads;
824 queue_iterate(list, thread, thread_t, pset_threads)for ((thread) = (thread_t) ((list)->next); !(((list)) == (
(queue_entry_t)(thread))); (thread) = (thread_t) ((&(thread
)->pset_threads)->next))
{
825 if (thread->policy == policy)
826 thread_policy(thread, POLICY_TIMESHARE1, 0);
827 }
828 }
829 }
830 pset_unlock(pset)((void)(&(pset)->lock));
831#endif /* MACH_FIXPRI */
832
833 return KERN_SUCCESS0;
834}
835
836#define THING_TASK0 0
837#define THING_THREAD1 1
838
839/*
840 * processor_set_things:
841 *
842 * Common internals for processor_set_{threads,tasks}
843 */
844kern_return_t
845processor_set_things(
846 processor_set_t pset,
847 mach_port_t **thing_list,
848 natural_t *count,
849 int type)
850{
851 unsigned int actual; /* this many things */
852 int i;
853
854 vm_size_t size, size_needed;
855 vm_offset_t addr;
856
857 if (pset == PROCESSOR_SET_NULL((processor_set_t) 0))
2
Assuming 'pset' is not equal to null
3
Taking false branch
858 return KERN_INVALID_ARGUMENT4;
859
860 size = 0; addr = 0;
4
The value 0 is assigned to 'addr'
861
862 for (;;) {
5
Loop condition is true. Entering loop body
863 pset_lock(pset);
864 if (!pset->active) {
6
Taking false branch
865 pset_unlock(pset)((void)(&(pset)->lock));
866 return KERN_FAILURE5;
867 }
868
869 if (type == THING_TASK0)
7
Taking false branch
870 actual = pset->task_count;
871 else
872 actual = pset->thread_count;
873
874 /* do we have the memory we need? */
875
876 size_needed = actual * sizeof(mach_port_t);
877 if (size_needed <= size)
8
Assuming 'size_needed' is <= 'size'
9
Taking true branch
878 break;
10
Execution continues on line 896
879
880 /* unlock the pset and allocate more memory */
881 pset_unlock(pset)((void)(&(pset)->lock));
882
883 if (size != 0)
884 kfree(addr, size);
885
886 assert(size_needed > 0)((size_needed > 0) ? (void) (0) : Assert ("size_needed > 0"
, "../kern/processor.c", 886))
;
887 size = size_needed;
888
889 addr = kalloc(size);
890 if (addr == 0)
891 return KERN_RESOURCE_SHORTAGE6;
892 }
893
894 /* OK, have memory and the processor_set is locked & active */
895
896 switch (type) {
11
Control jumps to 'case 1:' at line 912
897 case THING_TASK0: {
898 task_t *tasks = (task_t *) addr;
899 task_t task;
900
901 for (i = 0, task = (task_t) queue_first(&pset->tasks)((&pset->tasks)->next);
902 i < actual;
903 i++, task = (task_t) queue_next(&task->pset_tasks)((&task->pset_tasks)->next)) {
904 /* take ref for convert_task_to_port */
905 task_reference(task);
906 tasks[i] = task;
907 }
908 assert(queue_end(&pset->tasks, (queue_entry_t) task))((((&pset->tasks) == ((queue_entry_t) task))) ? (void)
(0) : Assert ("queue_end(&pset->tasks, (queue_entry_t) task)"
, "../kern/processor.c", 908))
;
909 break;
910 }
911
912 case THING_THREAD1: {
913 thread_t *threads = (thread_t *) addr;
12
Variable 'threads' initialized to a null pointer value
914 thread_t thread;
915
916 for (i = 0, thread = (thread_t) queue_first(&pset->threads)((&pset->threads)->next);
14
Loop condition is true. Entering loop body
917 i < actual;
13
Assuming 'i' is < 'actual'
918 i++,
919 thread = (thread_t) queue_next(&thread->pset_threads)((&thread->pset_threads)->next)) {
920 /* take ref for convert_thread_to_port */
921 thread_reference(thread);
922 threads[i] = thread;
15
Array access (from variable 'threads') results in a null pointer dereference
923 }
924 assert(queue_end(&pset->threads, (queue_entry_t) thread))((((&pset->threads) == ((queue_entry_t) thread))) ? (void
) (0) : Assert ("queue_end(&pset->threads, (queue_entry_t) thread)"
, "../kern/processor.c", 924))
;
925 break;
926 }
927 }
928
929 /* can unlock processor set now that we have the task/thread refs */
930 pset_unlock(pset)((void)(&(pset)->lock));
931
932 if (actual == 0) {
933 /* no things, so return null pointer and deallocate memory */
934 *thing_list = 0;
935 *count = 0;
936
937 if (size != 0)
938 kfree(addr, size);
939 } else {
940 /* if we allocated too much, must copy */
941
942 if (size_needed < size) {
943 vm_offset_t newaddr;
944
945 newaddr = kalloc(size_needed);
946 if (newaddr == 0) {
947 switch (type) {
948 case THING_TASK0: {
949 task_t *tasks = (task_t *) addr;
950
951 for (i = 0; i < actual; i++)
952 task_deallocate(tasks[i]);
953 break;
954 }
955
956 case THING_THREAD1: {
957 thread_t *threads = (thread_t *) addr;
958
959 for (i = 0; i < actual; i++)
960 thread_deallocate(threads[i]);
961 break;
962 }
963 }
964 kfree(addr, size);
965 return KERN_RESOURCE_SHORTAGE6;
966 }
967
968 memcpy((void *) newaddr, (void *) addr, size_needed);
969 kfree(addr, size);
970 addr = newaddr;
971 }
972
973 *thing_list = (mach_port_t *) addr;
974 *count = actual;
975
976 /* do the conversion that Mig should handle */
977
978 switch (type) {
979 case THING_TASK0: {
980 task_t *tasks = (task_t *) addr;
981
982 for (i = 0; i < actual; i++)
983 ((mach_port_t *) tasks)[i] =
984 (mach_port_t)convert_task_to_port(tasks[i]);
985 break;
986 }
987
988 case THING_THREAD1: {
989 thread_t *threads = (thread_t *) addr;
990
991 for (i = 0; i < actual; i++)
992 ((mach_port_t *) threads)[i] =
993 (mach_port_t)convert_thread_to_port(threads[i]);
994 break;
995 }
996 }
997 }
998
999 return KERN_SUCCESS0;
1000}
1001
1002
1003/*
1004 * processor_set_tasks:
1005 *
1006 * List all tasks in the processor set.
1007 */
1008kern_return_t
1009processor_set_tasks(
1010 processor_set_t pset,
1011 task_array_t *task_list,
1012 natural_t *count)
1013{
1014 return processor_set_things(pset, task_list, count, THING_TASK0);
1015}
1016
1017/*
1018 * processor_set_threads:
1019 *
1020 * List all threads in the processor set.
1021 */
1022kern_return_t
1023processor_set_threads(
1024 processor_set_t pset,
1025 thread_array_t *thread_list,
1026 natural_t *count)
1027{
1028 return processor_set_things(pset, thread_list, count, THING_THREAD1);
1
Calling 'processor_set_things'
1029}