Bug Summary

File:obj-scan-build/../kern/processor.c
Location:line 920, column 4
Description:Array access (from variable 'threads') results in a null pointer dereference

Annotated Source Code

1/*
2 * Mach Operating System
3 * Copyright (c) 1993-1988 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26/*
27 * processor.c: processor and processor_set manipulation routines.
28 */
29
30#include <string.h>
31
32#include <mach/boolean.h>
33#include <mach/policy.h>
34#include <mach/processor_info.h>
35#include <mach/vm_param.h>
36#include <kern/cpu_number.h>
37#include <kern/debug.h>
38#include <kern/kalloc.h>
39#include <kern/lock.h>
40#include <kern/host.h>
41#include <kern/ipc_tt.h>
42#include <kern/processor.h>
43#include <kern/sched.h>
44#include <kern/task.h>
45#include <kern/thread.h>
46#include <kern/ipc_host.h>
47#include <ipc/ipc_port.h>
48
49#if MACH_HOST0
50#include <kern/slab.h>
51struct kmem_cache pset_cache;
52#endif /* MACH_HOST */
53
54
55/*
56 * Exported variables.
57 */
58struct processor_set default_pset;
59struct processor processor_array[NCPUS1];
60
61queue_head_t all_psets;
62int all_psets_count;
63decl_simple_lock_data(, all_psets_lock)struct simple_lock_data_empty all_psets_lock;;
64
65processor_t master_processor;
66processor_t processor_ptr[NCPUS1];
67
68/*
69 * Bootstrap the processor/pset system so the scheduler can run.
70 */
71void pset_sys_bootstrap(void)
72{
73 int i;
74
75 pset_init(&default_pset);
76 default_pset.empty = FALSE((boolean_t) 0);
77 for (i = 0; i < NCPUS1; i++) {
78 /*
79 * Initialize processor data structures.
80 * Note that cpu_to_processor(i) is processor_ptr[i].
81 */
82 processor_ptr[i] = &processor_array[i];
83 processor_init(processor_ptr[i], i);
84 }
85 master_processor = cpu_to_processor(master_cpu)(processor_ptr[master_cpu]);
86 queue_init(&all_psets)((&all_psets)->next = (&all_psets)->prev = &
all_psets)
;
87 simple_lock_init(&all_psets_lock);
88 queue_enter(&all_psets, &default_pset, processor_set_t, all_psets){ queue_entry_t prev; prev = (&all_psets)->prev; if ((
&all_psets) == prev) { (&all_psets)->next = (queue_entry_t
) (&default_pset); } else { ((processor_set_t)prev)->all_psets
.next = (queue_entry_t)(&default_pset); } (&default_pset
)->all_psets.prev = prev; (&default_pset)->all_psets
.next = &all_psets; (&all_psets)->prev = (queue_entry_t
) &default_pset; }
;
89 all_psets_count = 1;
90 default_pset.active = TRUE((boolean_t) 1);
91 default_pset.empty = FALSE((boolean_t) 0);
92
93 /*
94 * Note: the default_pset has a max_priority of BASEPRI_USER.
95 * Internal kernel threads override this in kernel_thread.
96 */
97}
98
99#if MACH_HOST0
100/*
101 * Rest of pset system initializations.
102 */
103void pset_sys_init(void)
104{
105 int i;
106 processor_t processor;
107
108 /*
109 * Allocate the cache for processor sets.
110 */
111 kmem_cache_init(&pset_cache, "processor_set",
112 sizeof(struct processor_set), 0, NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0);
113
114 /*
115 * Give each processor a control port.
116 * The master processor already has one.
117 */
118 for (i = 0; i < NCPUS1; i++) {
119 processor = cpu_to_processor(i)(processor_ptr[i]);
120 if (processor != master_processor &&
121 machine_slot[i].is_cpu)
122 {
123 ipc_processor_init(processor);
124 }
125 }
126}
127#endif /* MACH_HOST */
128
129/*
130 * Initialize the given processor_set structure.
131 */
132
133void pset_init(
134 processor_set_t pset)
135{
136 int i;
137
138 simple_lock_init(&pset->runq.lock);
139 pset->runq.low = 0;
140 pset->runq.count = 0;
141 for (i = 0; i < NRQS50; i++) {
142 queue_init(&(pset->runq.runq[i]))((&(pset->runq.runq[i]))->next = (&(pset->runq
.runq[i]))->prev = &(pset->runq.runq[i]))
;
143 }
144 queue_init(&pset->idle_queue)((&pset->idle_queue)->next = (&pset->idle_queue
)->prev = &pset->idle_queue)
;
145 pset->idle_count = 0;
146 simple_lock_init(&pset->idle_lock);
147 queue_init(&pset->processors)((&pset->processors)->next = (&pset->processors
)->prev = &pset->processors)
;
148 pset->processor_count = 0;
149 pset->empty = TRUE((boolean_t) 1);
150 queue_init(&pset->tasks)((&pset->tasks)->next = (&pset->tasks)->prev
= &pset->tasks)
;
151 pset->task_count = 0;
152 queue_init(&pset->threads)((&pset->threads)->next = (&pset->threads)->
prev = &pset->threads)
;
153 pset->thread_count = 0;
154 pset->ref_count = 1;
155 simple_lock_init(&pset->ref_lock);
156 queue_init(&pset->all_psets)((&pset->all_psets)->next = (&pset->all_psets
)->prev = &pset->all_psets)
;
157 pset->active = FALSE((boolean_t) 0);
158 simple_lock_init(&pset->lock);
159 pset->pset_self = IP_NULL((ipc_port_t) ((ipc_object_t) 0));
160 pset->pset_name_self = IP_NULL((ipc_port_t) ((ipc_object_t) 0));
161 pset->max_priority = BASEPRI_USER25;
162#if MACH_FIXPRI1
163 pset->policies = POLICY_TIMESHARE1;
164#endif /* MACH_FIXPRI */
165 pset->set_quantum = min_quantum;
166#if NCPUS1 > 1
167 pset->quantum_adj_index = 0;
168 simple_lock_init(&pset->quantum_adj_lock);
169
170 for (i = 0; i <= NCPUS1; i++) {
171 pset->machine_quantum[i] = min_quantum;
172 }
173#endif /* NCPUS > 1 */
174 pset->mach_factor = 0;
175 pset->load_average = 0;
176 pset->sched_load = SCHED_SCALE128; /* i.e. 1 */
177}
178
179/*
180 * Initialize the given processor structure for the processor in
181 * the slot specified by slot_num.
182 */
183
184void processor_init(
185 processor_t pr,
186 int slot_num)
187{
188 int i;
189
190 simple_lock_init(&pr->runq.lock);
191 pr->runq.low = 0;
192 pr->runq.count = 0;
193 for (i = 0; i < NRQS50; i++) {
194 queue_init(&(pr->runq.runq[i]))((&(pr->runq.runq[i]))->next = (&(pr->runq.runq
[i]))->prev = &(pr->runq.runq[i]))
;
195 }
196 queue_init(&pr->processor_queue)((&pr->processor_queue)->next = (&pr->processor_queue
)->prev = &pr->processor_queue)
;
197 pr->state = PROCESSOR_OFF_LINE0;
198 pr->next_thread = THREAD_NULL((thread_t) 0);
199 pr->idle_thread = THREAD_NULL((thread_t) 0);
200 pr->quantum = 0;
201 pr->first_quantum = FALSE((boolean_t) 0);
202 pr->last_quantum = 0;
203 pr->processor_set = PROCESSOR_SET_NULL((processor_set_t) 0);
204 pr->processor_set_next = PROCESSOR_SET_NULL((processor_set_t) 0);
205 queue_init(&pr->processors)((&pr->processors)->next = (&pr->processors)
->prev = &pr->processors)
;
206 simple_lock_init(&pr->lock);
207 pr->processor_self = IP_NULL((ipc_port_t) ((ipc_object_t) 0));
208 pr->slot_num = slot_num;
209}
210
211/*
212 * pset_remove_processor() removes a processor from a processor_set.
213 * It can only be called on the current processor. Caller must
214 * hold lock on current processor and processor set.
215 */
216
217void pset_remove_processor(
218 processor_set_t pset,
219 processor_t processor)
220{
221 if (pset != processor->processor_set)
222 panic("pset_remove_processor: wrong pset");
223
224 queue_remove(&pset->processors, processor, processor_t, processors){ queue_entry_t next, prev; next = (processor)->processors
.next; prev = (processor)->processors.prev; if ((&pset
->processors) == next) (&pset->processors)->prev
= prev; else ((processor_t)next)->processors.prev = prev;
if ((&pset->processors) == prev) (&pset->processors
)->next = next; else ((processor_t)prev)->processors.next
= next; }
;
225 processor->processor_set = PROCESSOR_SET_NULL((processor_set_t) 0);
226 pset->processor_count--;
227 quantum_set(pset);
228}
229
230/*
231 * pset_add_processor() adds a processor to a processor_set.
232 * It can only be called on the current processor. Caller must
233 * hold lock on curent processor and on pset. No reference counting on
234 * processors. Processor reference to pset is implicit.
235 */
236
237void pset_add_processor(
238 processor_set_t pset,
239 processor_t processor)
240{
241 queue_enter(&pset->processors, processor, processor_t, processors){ queue_entry_t prev; prev = (&pset->processors)->prev
; if ((&pset->processors) == prev) { (&pset->processors
)->next = (queue_entry_t) (processor); } else { ((processor_t
)prev)->processors.next = (queue_entry_t)(processor); } (processor
)->processors.prev = prev; (processor)->processors.next
= &pset->processors; (&pset->processors)->prev
= (queue_entry_t) processor; }
;
242 processor->processor_set = pset;
243 pset->processor_count++;
244 quantum_set(pset);
245}
246
247/*
248 * pset_remove_task() removes a task from a processor_set.
249 * Caller must hold locks on pset and task. Pset reference count
250 * is not decremented; caller must explicitly pset_deallocate.
251 */
252
253void pset_remove_task(
254 processor_set_t pset,
255 task_t task)
256{
257 if (pset != task->processor_set)
258 return;
259
260 queue_remove(&pset->tasks, task, task_t, pset_tasks){ queue_entry_t next, prev; next = (task)->pset_tasks.next
; prev = (task)->pset_tasks.prev; if ((&pset->tasks
) == next) (&pset->tasks)->prev = prev; else ((task_t
)next)->pset_tasks.prev = prev; if ((&pset->tasks) ==
prev) (&pset->tasks)->next = next; else ((task_t)prev
)->pset_tasks.next = next; }
;
261 task->processor_set = PROCESSOR_SET_NULL((processor_set_t) 0);
262 pset->task_count--;
263}
264
265/*
266 * pset_add_task() adds a task to a processor_set.
267 * Caller must hold locks on pset and task. Pset references to
268 * tasks are implicit.
269 */
270
271void pset_add_task(
272 processor_set_t pset,
273 task_t task)
274{
275 queue_enter(&pset->tasks, task, task_t, pset_tasks){ queue_entry_t prev; prev = (&pset->tasks)->prev; if
((&pset->tasks) == prev) { (&pset->tasks)->
next = (queue_entry_t) (task); } else { ((task_t)prev)->pset_tasks
.next = (queue_entry_t)(task); } (task)->pset_tasks.prev =
prev; (task)->pset_tasks.next = &pset->tasks; (&
pset->tasks)->prev = (queue_entry_t) task; }
;
276 task->processor_set = pset;
277 pset->task_count++;
278}
279
280/*
281 * pset_remove_thread() removes a thread from a processor_set.
282 * Caller must hold locks on pset and thread. Pset reference count
283 * is not decremented; caller must explicitly pset_deallocate.
284 */
285
286void pset_remove_thread(
287 processor_set_t pset,
288 thread_t thread)
289{
290 queue_remove(&pset->threads, thread, thread_t, pset_threads){ queue_entry_t next, prev; next = (thread)->pset_threads.
next; prev = (thread)->pset_threads.prev; if ((&pset->
threads) == next) (&pset->threads)->prev = prev; else
((thread_t)next)->pset_threads.prev = prev; if ((&pset
->threads) == prev) (&pset->threads)->next = next
; else ((thread_t)prev)->pset_threads.next = next; }
;
291 thread->processor_set = PROCESSOR_SET_NULL((processor_set_t) 0);
292 pset->thread_count--;
293}
294
295/*
296 * pset_add_thread() adds a thread to a processor_set.
297 * Caller must hold locks on pset and thread. Pset references to
298 * threads are implicit.
299 */
300
301void pset_add_thread(
302 processor_set_t pset,
303 thread_t thread)
304{
305 queue_enter(&pset->threads, thread, thread_t, pset_threads){ queue_entry_t prev; prev = (&pset->threads)->prev
; if ((&pset->threads) == prev) { (&pset->threads
)->next = (queue_entry_t) (thread); } else { ((thread_t)prev
)->pset_threads.next = (queue_entry_t)(thread); } (thread)
->pset_threads.prev = prev; (thread)->pset_threads.next
= &pset->threads; (&pset->threads)->prev = (
queue_entry_t) thread; }
;
306 thread->processor_set = pset;
307 pset->thread_count++;
308}
309
310/*
311 * thread_change_psets() changes the pset of a thread. Caller must
312 * hold locks on both psets and thread. The old pset must be
313 * explicitly pset_deallocat()'ed by caller.
314 */
315
316void thread_change_psets(
317 thread_t thread,
318 processor_set_t old_pset,
319 processor_set_t new_pset)
320{
321 queue_remove(&old_pset->threads, thread, thread_t, pset_threads){ queue_entry_t next, prev; next = (thread)->pset_threads.
next; prev = (thread)->pset_threads.prev; if ((&old_pset
->threads) == next) (&old_pset->threads)->prev =
prev; else ((thread_t)next)->pset_threads.prev = prev; if
((&old_pset->threads) == prev) (&old_pset->threads
)->next = next; else ((thread_t)prev)->pset_threads.next
= next; }
;
322 old_pset->thread_count--;
323 queue_enter(&new_pset->threads, thread, thread_t, pset_threads){ queue_entry_t prev; prev = (&new_pset->threads)->
prev; if ((&new_pset->threads) == prev) { (&new_pset
->threads)->next = (queue_entry_t) (thread); } else { (
(thread_t)prev)->pset_threads.next = (queue_entry_t)(thread
); } (thread)->pset_threads.prev = prev; (thread)->pset_threads
.next = &new_pset->threads; (&new_pset->threads
)->prev = (queue_entry_t) thread; }
;
324 thread->processor_set = new_pset;
325 new_pset->thread_count++;
326}
327
328/*
329 * pset_deallocate:
330 *
331 * Remove one reference to the processor set. Destroy processor_set
332 * if this was the last reference.
333 */
334void pset_deallocate(
335 processor_set_t pset)
336{
337 if (pset == PROCESSOR_SET_NULL((processor_set_t) 0))
338 return;
339
340 pset_ref_lock(pset);
341 if (--pset->ref_count > 0) {
342 pset_ref_unlock(pset)((void)(&(pset)->ref_lock));
343 return;
344 }
345#if !MACH_HOST0
346 panic("pset_deallocate: default_pset destroyed");
347#endif /* !MACH_HOST */
348
349#if MACH_HOST0
350 /*
351 * Reference count is zero, however the all_psets list
352 * holds an implicit reference and may make new ones.
353 * Its lock also dominates the pset lock. To check for this,
354 * temporarily restore one reference, and then lock the
355 * other structures in the right order.
356 */
357 pset->ref_count = 1;
358 pset_ref_unlock(pset)((void)(&(pset)->ref_lock));
359
360 simple_lock(&all_psets_lock);
361 pset_ref_lock(pset);
362 if (--pset->ref_count > 0) {
363 /*
364 * Made an extra reference.
365 */
366 pset_ref_unlock(pset)((void)(&(pset)->ref_lock));
367 simple_unlock(&all_psets_lock)((void)(&all_psets_lock));
368 return;
369 }
370
371 /*
372 * Ok to destroy pset. Make a few paranoia checks.
373 */
374
375 if ((pset == &default_pset) || (pset->thread_count > 0) ||
376 (pset->task_count > 0) || pset->processor_count > 0) {
377 panic("pset_deallocate: destroy default or active pset");
378 }
379 /*
380 * Remove from all_psets queue.
381 */
382 queue_remove(&all_psets, pset, processor_set_t, all_psets){ queue_entry_t next, prev; next = (pset)->all_psets.next;
prev = (pset)->all_psets.prev; if ((&all_psets) == next
) (&all_psets)->prev = prev; else ((processor_set_t)next
)->all_psets.prev = prev; if ((&all_psets) == prev) (&
all_psets)->next = next; else ((processor_set_t)prev)->
all_psets.next = next; }
;
383 all_psets_count--;
384
385 pset_ref_unlock(pset)((void)(&(pset)->ref_lock));
386 simple_unlock(&all_psets_lock)((void)(&all_psets_lock));
387
388 /*
389 * That's it, free data structure.
390 */
391 kmem_cache_free(&pset_cache, (vm_offset_t)pset);
392#endif /* MACH_HOST */
393}
394
395/*
396 * pset_reference:
397 *
398 * Add one reference to the processor set.
399 */
400void pset_reference(
401 processor_set_t pset)
402{
403 pset_ref_lock(pset);
404 pset->ref_count++;
405 pset_ref_unlock(pset)((void)(&(pset)->ref_lock));
406}
407
408kern_return_t
409processor_info(
410 processor_t processor,
411 int flavor,
412 host_t *host,
413 processor_info_t info,
414 natural_t *count)
415{
416 int slot_num, state;
417 processor_basic_info_t basic_info;
418
419 if (processor == PROCESSOR_NULL((processor_t) 0))
420 return KERN_INVALID_ARGUMENT4;
421
422 if (flavor != PROCESSOR_BASIC_INFO1 ||
423 *count < PROCESSOR_BASIC_INFO_COUNT(sizeof(processor_basic_info_data_t)/sizeof(integer_t)))
424 return KERN_FAILURE5;
425
426 basic_info = (processor_basic_info_t) info;
427
428 slot_num = processor->slot_num;
429 basic_info->cpu_type = machine_slot[slot_num].cpu_type;
430 basic_info->cpu_subtype = machine_slot[slot_num].cpu_subtype;
431 state = processor->state;
432 if (state == PROCESSOR_SHUTDOWN5 || state == PROCESSOR_OFF_LINE0)
433 basic_info->running = FALSE((boolean_t) 0);
434 else
435 basic_info->running = TRUE((boolean_t) 1);
436 basic_info->slot_num = slot_num;
437 if (processor == master_processor)
438 basic_info->is_master = TRUE((boolean_t) 1);
439 else
440 basic_info->is_master = FALSE((boolean_t) 0);
441
442 *count = PROCESSOR_BASIC_INFO_COUNT(sizeof(processor_basic_info_data_t)/sizeof(integer_t));
443 *host = &realhost;
444 return KERN_SUCCESS0;
445}
446
447kern_return_t processor_start(
448 processor_t processor)
449{
450 if (processor == PROCESSOR_NULL((processor_t) 0))
451 return KERN_INVALID_ARGUMENT4;
452#if NCPUS1 > 1
453 return cpu_start(processor->slot_num);
454#else /* NCPUS > 1 */
455 return KERN_FAILURE5;
456#endif /* NCPUS > 1 */
457}
458
459kern_return_t processor_exit(
460 processor_t processor)
461{
462 if (processor == PROCESSOR_NULL((processor_t) 0))
463 return KERN_INVALID_ARGUMENT4;
464
465#if NCPUS1 > 1
466 return processor_shutdown(processor);
467#else /* NCPUS > 1 */
468 return KERN_FAILURE5;
469#endif /* NCPUS > 1 */
470}
471
472kern_return_t
473processor_control(
474 processor_t processor,
475 processor_info_t info,
476 natural_t count)
477{
478 if (processor == PROCESSOR_NULL((processor_t) 0))
479 return KERN_INVALID_ARGUMENT4;
480
481#if NCPUS1 > 1
482 return cpu_control(processor->slot_num, (int *)info, count);
483#else /* NCPUS > 1 */
484 return KERN_FAILURE5;
485#endif /* NCPUS > 1 */
486}
487
488/*
489 * Precalculate the appropriate system quanta based on load. The
490 * index into machine_quantum is the number of threads on the
491 * processor set queue. It is limited to the number of processors in
492 * the set.
493 */
494
495void quantum_set(
496 processor_set_t pset)
497{
498#if NCPUS1 > 1
499 int i, ncpus;
500
501 ncpus = pset->processor_count;
502
503 for ( i=1 ; i <= ncpus ; i++) {
504 pset->machine_quantum[i] =
505 ((min_quantum * ncpus) + (i/2)) / i ;
506 }
507 pset->machine_quantum[0] = 2 * pset->machine_quantum[1];
508
509 i = ((pset->runq.count > pset->processor_count) ?
510 pset->processor_count : pset->runq.count);
511 pset->set_quantum = pset->machine_quantum[i];
512#else /* NCPUS > 1 */
513 default_pset.set_quantum = min_quantum;
514#endif /* NCPUS > 1 */
515}
516
517#if MACH_HOST0
518/*
519 * processor_set_create:
520 *
521 * Create and return a new processor set.
522 */
523
524kern_return_t
525processor_set_create(
526 host_t host,
527 processor_set_t *new_set,
528 processor_set_t *new_name)
529{
530 processor_set_t pset;
531
532 if (host == HOST_NULL((host_t)0))
533 return KERN_INVALID_ARGUMENT4;
534
535 pset = (processor_set_t) kmem_cache_alloc(&pset_cache);
536 pset_init(pset);
537 pset_reference(pset); /* for new_set out argument */
538 pset_reference(pset); /* for new_name out argument */
539 ipc_pset_init(pset);
540 pset->active = TRUE((boolean_t) 1);
541
542 simple_lock(&all_psets_lock);
543 queue_enter(&all_psets, pset, processor_set_t, all_psets){ queue_entry_t prev; prev = (&all_psets)->prev; if ((
&all_psets) == prev) { (&all_psets)->next = (queue_entry_t
) (pset); } else { ((processor_set_t)prev)->all_psets.next
= (queue_entry_t)(pset); } (pset)->all_psets.prev = prev;
(pset)->all_psets.next = &all_psets; (&all_psets)
->prev = (queue_entry_t) pset; }
;
544 all_psets_count++;
545 simple_unlock(&all_psets_lock)((void)(&all_psets_lock));
546
547 ipc_pset_enable(pset);
548
549 *new_set = pset;
550 *new_name = pset;
551 return KERN_SUCCESS0;
552}
553
554/*
555 * processor_set_destroy:
556 *
557 * destroy a processor set. Any tasks, threads or processors
558 * currently assigned to it are reassigned to the default pset.
559 */
560kern_return_t processor_set_destroy(
561 processor_set_t pset)
562{
563 queue_entry_t elem;
564 queue_head_t *list;
565
566 if (pset == PROCESSOR_SET_NULL((processor_set_t) 0) || pset == &default_pset)
567 return KERN_INVALID_ARGUMENT4;
568
569 /*
570 * Handle multiple termination race. First one through sets
571 * active to FALSE and disables ipc access.
572 */
573 pset_lock(pset);
574 if (!(pset->active)) {
575 pset_unlock(pset)((void)(&(pset)->lock));
576 return KERN_FAILURE5;
577 }
578
579 pset->active = FALSE((boolean_t) 0);
580 ipc_pset_disable(pset);
581
582
583 /*
584 * Now reassign everything in this set to the default set.
585 */
586
587 if (pset->task_count > 0) {
588 list = &pset->tasks;
589 while (!queue_empty(list)(((list)) == (((list)->next)))) {
590 elem = queue_first(list)((list)->next);
591 task_reference((task_t) elem);
592 pset_unlock(pset)((void)(&(pset)->lock));
593 task_assign((task_t) elem, &default_pset, FALSE((boolean_t) 0));
594 task_deallocate((task_t) elem);
595 pset_lock(pset);
596 }
597 }
598
599 if (pset->thread_count > 0) {
600 list = &pset->threads;
601 while (!queue_empty(list)(((list)) == (((list)->next)))) {
602 elem = queue_first(list)((list)->next);
603 thread_reference((thread_t) elem);
604 pset_unlock(pset)((void)(&(pset)->lock));
605 thread_assign((thread_t) elem, &default_pset);
606 thread_deallocate((thread_t) elem);
607 pset_lock(pset);
608 }
609 }
610
611 if (pset->processor_count > 0) {
612 list = &pset->processors;
613 while(!queue_empty(list)(((list)) == (((list)->next)))) {
614 elem = queue_first(list)((list)->next);
615 pset_unlock(pset)((void)(&(pset)->lock));
616 processor_assign((processor_t) elem, &default_pset, TRUE((boolean_t) 1));
617 pset_lock(pset);
618 }
619 }
620
621 pset_unlock(pset)((void)(&(pset)->lock));
622
623 /*
624 * Destroy ipc state.
625 */
626 ipc_pset_terminate(pset);
627
628 /*
629 * Deallocate pset's reference to itself.
630 */
631 pset_deallocate(pset);
632 return KERN_SUCCESS0;
633}
634
635#else /* MACH_HOST */
636
637kern_return_t
638processor_set_create(
639 host_t host,
640 processor_set_t *new_set,
641 processor_set_t *new_name)
642{
643 return KERN_FAILURE5;
644}
645
646kern_return_t processor_set_destroy(
647 processor_set_t pset)
648{
649 return KERN_FAILURE5;
650}
651
652#endif /* MACH_HOST */
653
654kern_return_t
655processor_get_assignment(
656 processor_t processor,
657 processor_set_t *pset)
658{
659 int state;
660
661 state = processor->state;
662 if (state == PROCESSOR_SHUTDOWN5 || state == PROCESSOR_OFF_LINE0)
663 return KERN_FAILURE5;
664
665 *pset = processor->processor_set;
666 pset_reference(*pset);
667 return KERN_SUCCESS0;
668}
669
670kern_return_t
671processor_set_info(
672 processor_set_t pset,
673 int flavor,
674 host_t *host,
675 processor_set_info_t info,
676 natural_t *count)
677{
678 if (pset == PROCESSOR_SET_NULL((processor_set_t) 0))
679 return KERN_INVALID_ARGUMENT4;
680
681 if (flavor == PROCESSOR_SET_BASIC_INFO1) {
682 processor_set_basic_info_t basic_info;
683
684 if (*count < PROCESSOR_SET_BASIC_INFO_COUNT(sizeof(processor_set_basic_info_data_t)/sizeof(integer_t)))
685 return KERN_FAILURE5;
686
687 basic_info = (processor_set_basic_info_t) info;
688
689 pset_lock(pset);
690 basic_info->processor_count = pset->processor_count;
691 basic_info->task_count = pset->task_count;
692 basic_info->thread_count = pset->thread_count;
693 basic_info->mach_factor = pset->mach_factor;
694 basic_info->load_average = pset->load_average;
695 pset_unlock(pset)((void)(&(pset)->lock));
696
697 *count = PROCESSOR_SET_BASIC_INFO_COUNT(sizeof(processor_set_basic_info_data_t)/sizeof(integer_t));
698 *host = &realhost;
699 return KERN_SUCCESS0;
700 }
701 else if (flavor == PROCESSOR_SET_SCHED_INFO2) {
702 processor_set_sched_info_t sched_info;
703
704 if (*count < PROCESSOR_SET_SCHED_INFO_COUNT(sizeof(processor_set_sched_info_data_t)/sizeof(integer_t)))
705 return KERN_FAILURE5;
706
707 sched_info = (processor_set_sched_info_t) info;
708
709 pset_lock(pset);
710#if MACH_FIXPRI1
711 sched_info->policies = pset->policies;
712#else /* MACH_FIXPRI */
713 sched_info->policies = POLICY_TIMESHARE1;
714#endif /* MACH_FIXPRI */
715 sched_info->max_priority = pset->max_priority;
716 pset_unlock(pset)((void)(&(pset)->lock));
717
718 *count = PROCESSOR_SET_SCHED_INFO_COUNT(sizeof(processor_set_sched_info_data_t)/sizeof(integer_t));
719 *host = &realhost;
720 return KERN_SUCCESS0;
721 }
722
723 *host = HOST_NULL((host_t)0);
724 return KERN_INVALID_ARGUMENT4;
725}
726
727/*
728 * processor_set_max_priority:
729 *
730 * Specify max priority permitted on processor set. This affects
731 * newly created and assigned threads. Optionally change existing
732 * ones.
733 */
734kern_return_t
735processor_set_max_priority(
736 processor_set_t pset,
737 int max_priority,
738 boolean_t change_threads)
739{
740 if (pset == PROCESSOR_SET_NULL((processor_set_t) 0) || invalid_pri(max_priority)(((max_priority) < 0) || ((max_priority) >= 50)))
741 return KERN_INVALID_ARGUMENT4;
742
743 pset_lock(pset);
744 pset->max_priority = max_priority;
745
746 if (change_threads) {
747 queue_head_t *list;
748 thread_t thread;
749
750 list = &pset->threads;
751 queue_iterate(list, thread, thread_t, pset_threads)for ((thread) = (thread_t) ((list)->next); !(((list)) == (
(queue_entry_t)(thread))); (thread) = (thread_t) ((&(thread
)->pset_threads)->next))
{
752 if (thread->max_priority < max_priority)
753 thread_max_priority(thread, pset, max_priority);
754 }
755 }
756
757 pset_unlock(pset)((void)(&(pset)->lock));
758
759 return KERN_SUCCESS0;
760}
761
762/*
763 * processor_set_policy_enable:
764 *
765 * Allow indicated policy on processor set.
766 */
767
768kern_return_t
769processor_set_policy_enable(
770 processor_set_t pset,
771 int policy)
772{
773 if ((pset == PROCESSOR_SET_NULL((processor_set_t) 0)) || invalid_policy(policy)(((policy) <= 0) || ((policy) > 2)))
774 return KERN_INVALID_ARGUMENT4;
775
776#if MACH_FIXPRI1
777 pset_lock(pset);
778 pset->policies |= policy;
779 pset_unlock(pset)((void)(&(pset)->lock));
780
781 return KERN_SUCCESS0;
782#else /* MACH_FIXPRI */
783 if (policy == POLICY_TIMESHARE1)
784 return KERN_SUCCESS0;
785 else
786 return KERN_FAILURE5;
787#endif /* MACH_FIXPRI */
788}
789
790/*
791 * processor_set_policy_disable:
792 *
793 * Forbid indicated policy on processor set. Time sharing cannot
794 * be forbidden.
795 */
796
797kern_return_t
798processor_set_policy_disable(
799 processor_set_t pset,
800 int policy,
801 boolean_t change_threads)
802{
803 if ((pset == PROCESSOR_SET_NULL((processor_set_t) 0)) || policy == POLICY_TIMESHARE1 ||
804 invalid_policy(policy)(((policy) <= 0) || ((policy) > 2)))
805 return KERN_INVALID_ARGUMENT4;
806
807#if MACH_FIXPRI1
808 pset_lock(pset);
809
810 /*
811 * Check if policy enabled. Disable if so, then handle
812 * change_threads.
813 */
814 if (pset->policies & policy) {
815 pset->policies &= ~policy;
816
817 if (change_threads) {
818 queue_head_t *list;
819 thread_t thread;
820
821 list = &pset->threads;
822 queue_iterate(list, thread, thread_t, pset_threads)for ((thread) = (thread_t) ((list)->next); !(((list)) == (
(queue_entry_t)(thread))); (thread) = (thread_t) ((&(thread
)->pset_threads)->next))
{
823 if (thread->policy == policy)
824 thread_policy(thread, POLICY_TIMESHARE1, 0);
825 }
826 }
827 }
828 pset_unlock(pset)((void)(&(pset)->lock));
829#endif /* MACH_FIXPRI */
830
831 return KERN_SUCCESS0;
832}
833
834#define THING_TASK0 0
835#define THING_THREAD1 1
836
837/*
838 * processor_set_things:
839 *
840 * Common internals for processor_set_{threads,tasks}
841 */
842kern_return_t
843processor_set_things(
844 processor_set_t pset,
845 mach_port_t **thing_list,
846 natural_t *count,
847 int type)
848{
849 unsigned int actual; /* this many things */
850 int i;
851
852 vm_size_t size, size_needed;
853 vm_offset_t addr;
854
855 if (pset == PROCESSOR_SET_NULL((processor_set_t) 0))
2
Assuming 'pset' is not equal to null
3
Taking false branch
856 return KERN_INVALID_ARGUMENT4;
857
858 size = 0; addr = 0;
4
The value 0 is assigned to 'addr'
859
860 for (;;) {
5
Loop condition is true. Entering loop body
861 pset_lock(pset);
862 if (!pset->active) {
6
Taking false branch
863 pset_unlock(pset)((void)(&(pset)->lock));
864 return KERN_FAILURE5;
865 }
866
867 if (type == THING_TASK0)
7
Taking false branch
868 actual = pset->task_count;
869 else
870 actual = pset->thread_count;
871
872 /* do we have the memory we need? */
873
874 size_needed = actual * sizeof(mach_port_t);
875 if (size_needed <= size)
8
Assuming 'size_needed' is <= 'size'
9
Taking true branch
876 break;
10
Execution continues on line 894
877
878 /* unlock the pset and allocate more memory */
879 pset_unlock(pset)((void)(&(pset)->lock));
880
881 if (size != 0)
882 kfree(addr, size);
883
884 assert(size_needed > 0)({ if (!(size_needed > 0)) Assert("size_needed > 0", "../kern/processor.c"
, 884); })
;
885 size = size_needed;
886
887 addr = kalloc(size);
888 if (addr == 0)
889 return KERN_RESOURCE_SHORTAGE6;
890 }
891
892 /* OK, have memory and the processor_set is locked & active */
893
894 switch (type) {
11
Control jumps to 'case 1:' at line 910
895 case THING_TASK0: {
896 task_t *tasks = (task_t *) addr;
897 task_t task;
898
899 for (i = 0, task = (task_t) queue_first(&pset->tasks)((&pset->tasks)->next);
900 i < actual;
901 i++, task = (task_t) queue_next(&task->pset_tasks)((&task->pset_tasks)->next)) {
902 /* take ref for convert_task_to_port */
903 task_reference(task);
904 tasks[i] = task;
905 }
906 assert(queue_end(&pset->tasks, (queue_entry_t) task))({ if (!(((&pset->tasks) == ((queue_entry_t) task)))) Assert
("queue_end(&pset->tasks, (queue_entry_t) task)", "../kern/processor.c"
, 906); })
;
907 break;
908 }
909
910 case THING_THREAD1: {
911 thread_t *threads = (thread_t *) addr;
12
Variable 'threads' initialized to a null pointer value
912 thread_t thread;
913
914 for (i = 0, thread = (thread_t) queue_first(&pset->threads)((&pset->threads)->next);
14
Loop condition is true. Entering loop body
915 i < actual;
13
Assuming 'i' is < 'actual'
916 i++,
917 thread = (thread_t) queue_next(&thread->pset_threads)((&thread->pset_threads)->next)) {
918 /* take ref for convert_thread_to_port */
919 thread_reference(thread);
920 threads[i] = thread;
15
Array access (from variable 'threads') results in a null pointer dereference
921 }
922 assert(queue_end(&pset->threads, (queue_entry_t) thread))({ if (!(((&pset->threads) == ((queue_entry_t) thread)
))) Assert("queue_end(&pset->threads, (queue_entry_t) thread)"
, "../kern/processor.c", 922); })
;
923 break;
924 }
925 }
926
927 /* can unlock processor set now that we have the task/thread refs */
928 pset_unlock(pset)((void)(&(pset)->lock));
929
930 if (actual == 0) {
931 /* no things, so return null pointer and deallocate memory */
932 *thing_list = 0;
933 *count = 0;
934
935 if (size != 0)
936 kfree(addr, size);
937 } else {
938 /* if we allocated too much, must copy */
939
940 if (size_needed < size) {
941 vm_offset_t newaddr;
942
943 newaddr = kalloc(size_needed);
944 if (newaddr == 0) {
945 switch (type) {
946 case THING_TASK0: {
947 task_t *tasks = (task_t *) addr;
948
949 for (i = 0; i < actual; i++)
950 task_deallocate(tasks[i]);
951 break;
952 }
953
954 case THING_THREAD1: {
955 thread_t *threads = (thread_t *) addr;
956
957 for (i = 0; i < actual; i++)
958 thread_deallocate(threads[i]);
959 break;
960 }
961 }
962 kfree(addr, size);
963 return KERN_RESOURCE_SHORTAGE6;
964 }
965
966 memcpy((void *) newaddr, (void *) addr, size_needed);
967 kfree(addr, size);
968 addr = newaddr;
969 }
970
971 *thing_list = (mach_port_t *) addr;
972 *count = actual;
973
974 /* do the conversion that Mig should handle */
975
976 switch (type) {
977 case THING_TASK0: {
978 task_t *tasks = (task_t *) addr;
979
980 for (i = 0; i < actual; i++)
981 ((mach_port_t *) tasks)[i] =
982 (mach_port_t)convert_task_to_port(tasks[i]);
983 break;
984 }
985
986 case THING_THREAD1: {
987 thread_t *threads = (thread_t *) addr;
988
989 for (i = 0; i < actual; i++)
990 ((mach_port_t *) threads)[i] =
991 (mach_port_t)convert_thread_to_port(threads[i]);
992 break;
993 }
994 }
995 }
996
997 return KERN_SUCCESS0;
998}
999
1000
1001/*
1002 * processor_set_tasks:
1003 *
1004 * List all tasks in the processor set.
1005 */
1006kern_return_t
1007processor_set_tasks(
1008 processor_set_t pset,
1009 task_array_t *task_list,
1010 natural_t *count)
1011{
1012 return processor_set_things(pset, task_list, count, THING_TASK0);
1013}
1014
1015/*
1016 * processor_set_threads:
1017 *
1018 * List all threads in the processor set.
1019 */
1020kern_return_t
1021processor_set_threads(
1022 processor_set_t pset,
1023 thread_array_t *thread_list,
1024 natural_t *count)
1025{
1026 return processor_set_things(pset, thread_list, count, THING_THREAD1);
1
Calling 'processor_set_things'
1027}