Bug Summary

File:obj-scan-build/../kern/processor.c
Location:line 927, column 4
Description:Array access (from variable 'threads') results in a null pointer dereference

Annotated Source Code

1/*
2 * Mach Operating System
3 * Copyright (c) 1993-1988 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26/*
27 * processor.c: processor and processor_set manipulation routines.
28 */
29
30#include <string.h>
31
32#include <mach/boolean.h>
33#include <mach/policy.h>
34#include <mach/processor_info.h>
35#include <mach/vm_param.h>
36#include <kern/cpu_number.h>
37#include <kern/debug.h>
38#include <kern/kalloc.h>
39#include <kern/lock.h>
40#include <kern/host.h>
41#include <kern/ipc_tt.h>
42#include <kern/processor.h>
43#include <kern/sched.h>
44#include <kern/task.h>
45#include <kern/thread.h>
46#include <kern/ipc_host.h>
47#include <ipc/ipc_port.h>
48
49#if MACH_HOST0
50#include <kern/slab.h>
51struct kmem_cache pset_cache;
52#endif /* MACH_HOST */
53
54
55/*
56 * Exported variables.
57 */
58struct processor_set default_pset;
59struct processor processor_array[NCPUS1];
60
61queue_head_t all_psets;
62int all_psets_count;
63decl_simple_lock_data(, all_psets_lock);
64
65processor_t master_processor;
66processor_t processor_ptr[NCPUS1];
67
68/*
69 * Forward declarations.
70 */
71void quantum_set(processor_set_t);
72void pset_init(processor_set_t);
73void processor_init(processor_t, int);
74
75/*
76 * Bootstrap the processor/pset system so the scheduler can run.
77 */
78void pset_sys_bootstrap(void)
79{
80 int i;
81
82 pset_init(&default_pset);
83 default_pset.empty = FALSE((boolean_t) 0);
84 for (i = 0; i < NCPUS1; i++) {
85 /*
86 * Initialize processor data structures.
87 * Note that cpu_to_processor(i) is processor_ptr[i].
88 */
89 processor_ptr[i] = &processor_array[i];
90 processor_init(processor_ptr[i], i);
91 }
92 master_processor = cpu_to_processor(master_cpu)(processor_ptr[master_cpu]);
93 queue_init(&all_psets)((&all_psets)->next = (&all_psets)->prev = &
all_psets)
;
94 simple_lock_init(&all_psets_lock);
95 queue_enter(&all_psets, &default_pset, processor_set_t, all_psets){ queue_entry_t prev; prev = (&all_psets)->prev; if ((
&all_psets) == prev) { (&all_psets)->next = (queue_entry_t
) (&default_pset); } else { ((processor_set_t)prev)->all_psets
.next = (queue_entry_t)(&default_pset); } (&default_pset
)->all_psets.prev = prev; (&default_pset)->all_psets
.next = &all_psets; (&all_psets)->prev = (queue_entry_t
) &default_pset; }
;
96 all_psets_count = 1;
97 default_pset.active = TRUE((boolean_t) 1);
98 default_pset.empty = FALSE((boolean_t) 0);
99
100 /*
101 * Note: the default_pset has a max_priority of BASEPRI_USER.
102 * Internal kernel threads override this in kernel_thread.
103 */
104}
105
106#if MACH_HOST0
107/*
108 * Rest of pset system initializations.
109 */
110void pset_sys_init(void)
111{
112 int i;
113 processor_t processor;
114
115 /*
116 * Allocate the cache for processor sets.
117 */
118 kmem_cache_init(&pset_cache, "processor_set",
119 sizeof(struct processor_set), 0, NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0);
120
121 /*
122 * Give each processor a control port.
123 * The master processor already has one.
124 */
125 for (i = 0; i < NCPUS1; i++) {
126 processor = cpu_to_processor(i)(processor_ptr[i]);
127 if (processor != master_processor &&
128 machine_slot[i].is_cpu)
129 {
130 ipc_processor_init(processor);
131 }
132 }
133}
134#endif /* MACH_HOST */
135
136/*
137 * Initialize the given processor_set structure.
138 */
139
140void pset_init(
141 processor_set_t pset)
142{
143 int i;
144
145 simple_lock_init(&pset->runq.lock);
146 pset->runq.low = 0;
147 pset->runq.count = 0;
148 for (i = 0; i < NRQS50; i++) {
149 queue_init(&(pset->runq.runq[i]))((&(pset->runq.runq[i]))->next = (&(pset->runq
.runq[i]))->prev = &(pset->runq.runq[i]))
;
150 }
151 queue_init(&pset->idle_queue)((&pset->idle_queue)->next = (&pset->idle_queue
)->prev = &pset->idle_queue)
;
152 pset->idle_count = 0;
153 simple_lock_init(&pset->idle_lock);
154 queue_init(&pset->processors)((&pset->processors)->next = (&pset->processors
)->prev = &pset->processors)
;
155 pset->processor_count = 0;
156 pset->empty = TRUE((boolean_t) 1);
157 queue_init(&pset->tasks)((&pset->tasks)->next = (&pset->tasks)->prev
= &pset->tasks)
;
158 pset->task_count = 0;
159 queue_init(&pset->threads)((&pset->threads)->next = (&pset->threads)->
prev = &pset->threads)
;
160 pset->thread_count = 0;
161 pset->ref_count = 1;
162 simple_lock_init(&pset->ref_lock);
163 queue_init(&pset->all_psets)((&pset->all_psets)->next = (&pset->all_psets
)->prev = &pset->all_psets)
;
164 pset->active = FALSE((boolean_t) 0);
165 simple_lock_init(&pset->lock);
166 pset->pset_self = IP_NULL((ipc_port_t) ((ipc_object_t) 0));
167 pset->pset_name_self = IP_NULL((ipc_port_t) ((ipc_object_t) 0));
168 pset->max_priority = BASEPRI_USER25;
169#if MACH_FIXPRI1
170 pset->policies = POLICY_TIMESHARE1;
171#endif /* MACH_FIXPRI */
172 pset->set_quantum = min_quantum;
173#if NCPUS1 > 1
174 pset->quantum_adj_index = 0;
175 simple_lock_init(&pset->quantum_adj_lock);
176
177 for (i = 0; i <= NCPUS1; i++) {
178 pset->machine_quantum[i] = min_quantum;
179 }
180#endif /* NCPUS > 1 */
181 pset->mach_factor = 0;
182 pset->load_average = 0;
183 pset->sched_load = SCHED_SCALE128; /* i.e. 1 */
184}
185
186/*
187 * Initialize the given processor structure for the processor in
188 * the slot specified by slot_num.
189 */
190
191void processor_init(
192 processor_t pr,
193 int slot_num)
194{
195 int i;
196
197 simple_lock_init(&pr->runq.lock);
198 pr->runq.low = 0;
199 pr->runq.count = 0;
200 for (i = 0; i < NRQS50; i++) {
201 queue_init(&(pr->runq.runq[i]))((&(pr->runq.runq[i]))->next = (&(pr->runq.runq
[i]))->prev = &(pr->runq.runq[i]))
;
202 }
203 queue_init(&pr->processor_queue)((&pr->processor_queue)->next = (&pr->processor_queue
)->prev = &pr->processor_queue)
;
204 pr->state = PROCESSOR_OFF_LINE0;
205 pr->next_thread = THREAD_NULL((thread_t) 0);
206 pr->idle_thread = THREAD_NULL((thread_t) 0);
207 pr->quantum = 0;
208 pr->first_quantum = FALSE((boolean_t) 0);
209 pr->last_quantum = 0;
210 pr->processor_set = PROCESSOR_SET_NULL((processor_set_t) 0);
211 pr->processor_set_next = PROCESSOR_SET_NULL((processor_set_t) 0);
212 queue_init(&pr->processors)((&pr->processors)->next = (&pr->processors)
->prev = &pr->processors)
;
213 simple_lock_init(&pr->lock);
214 pr->processor_self = IP_NULL((ipc_port_t) ((ipc_object_t) 0));
215 pr->slot_num = slot_num;
216}
217
218/*
219 * pset_remove_processor() removes a processor from a processor_set.
220 * It can only be called on the current processor. Caller must
221 * hold lock on current processor and processor set.
222 */
223
224void pset_remove_processor(
225 processor_set_t pset,
226 processor_t processor)
227{
228 if (pset != processor->processor_set)
229 panic("pset_remove_processor: wrong pset");
230
231 queue_remove(&pset->processors, processor, processor_t, processors){ queue_entry_t next, prev; next = (processor)->processors
.next; prev = (processor)->processors.prev; if ((&pset
->processors) == next) (&pset->processors)->prev
= prev; else ((processor_t)next)->processors.prev = prev;
if ((&pset->processors) == prev) (&pset->processors
)->next = next; else ((processor_t)prev)->processors.next
= next; }
;
232 processor->processor_set = PROCESSOR_SET_NULL((processor_set_t) 0);
233 pset->processor_count--;
234 quantum_set(pset);
235}
236
237/*
238 * pset_add_processor() adds a processor to a processor_set.
239 * It can only be called on the current processor. Caller must
240 * hold lock on curent processor and on pset. No reference counting on
241 * processors. Processor reference to pset is implicit.
242 */
243
244void pset_add_processor(
245 processor_set_t pset,
246 processor_t processor)
247{
248 queue_enter(&pset->processors, processor, processor_t, processors){ queue_entry_t prev; prev = (&pset->processors)->prev
; if ((&pset->processors) == prev) { (&pset->processors
)->next = (queue_entry_t) (processor); } else { ((processor_t
)prev)->processors.next = (queue_entry_t)(processor); } (processor
)->processors.prev = prev; (processor)->processors.next
= &pset->processors; (&pset->processors)->prev
= (queue_entry_t) processor; }
;
249 processor->processor_set = pset;
250 pset->processor_count++;
251 quantum_set(pset);
252}
253
254/*
255 * pset_remove_task() removes a task from a processor_set.
256 * Caller must hold locks on pset and task. Pset reference count
257 * is not decremented; caller must explicitly pset_deallocate.
258 */
259
260void pset_remove_task(
261 processor_set_t pset,
262 task_t task)
263{
264 if (pset != task->processor_set)
265 return;
266
267 queue_remove(&pset->tasks, task, task_t, pset_tasks){ queue_entry_t next, prev; next = (task)->pset_tasks.next
; prev = (task)->pset_tasks.prev; if ((&pset->tasks
) == next) (&pset->tasks)->prev = prev; else ((task_t
)next)->pset_tasks.prev = prev; if ((&pset->tasks) ==
prev) (&pset->tasks)->next = next; else ((task_t)prev
)->pset_tasks.next = next; }
;
268 task->processor_set = PROCESSOR_SET_NULL((processor_set_t) 0);
269 pset->task_count--;
270}
271
272/*
273 * pset_add_task() adds a task to a processor_set.
274 * Caller must hold locks on pset and task. Pset references to
275 * tasks are implicit.
276 */
277
278void pset_add_task(
279 processor_set_t pset,
280 task_t task)
281{
282 queue_enter(&pset->tasks, task, task_t, pset_tasks){ queue_entry_t prev; prev = (&pset->tasks)->prev; if
((&pset->tasks) == prev) { (&pset->tasks)->
next = (queue_entry_t) (task); } else { ((task_t)prev)->pset_tasks
.next = (queue_entry_t)(task); } (task)->pset_tasks.prev =
prev; (task)->pset_tasks.next = &pset->tasks; (&
pset->tasks)->prev = (queue_entry_t) task; }
;
283 task->processor_set = pset;
284 pset->task_count++;
285}
286
287/*
288 * pset_remove_thread() removes a thread from a processor_set.
289 * Caller must hold locks on pset and thread. Pset reference count
290 * is not decremented; caller must explicitly pset_deallocate.
291 */
292
293void pset_remove_thread(
294 processor_set_t pset,
295 thread_t thread)
296{
297 queue_remove(&pset->threads, thread, thread_t, pset_threads){ queue_entry_t next, prev; next = (thread)->pset_threads.
next; prev = (thread)->pset_threads.prev; if ((&pset->
threads) == next) (&pset->threads)->prev = prev; else
((thread_t)next)->pset_threads.prev = prev; if ((&pset
->threads) == prev) (&pset->threads)->next = next
; else ((thread_t)prev)->pset_threads.next = next; }
;
298 thread->processor_set = PROCESSOR_SET_NULL((processor_set_t) 0);
299 pset->thread_count--;
300}
301
302/*
303 * pset_add_thread() adds a thread to a processor_set.
304 * Caller must hold locks on pset and thread. Pset references to
305 * threads are implicit.
306 */
307
308void pset_add_thread(
309 processor_set_t pset,
310 thread_t thread)
311{
312 queue_enter(&pset->threads, thread, thread_t, pset_threads){ queue_entry_t prev; prev = (&pset->threads)->prev
; if ((&pset->threads) == prev) { (&pset->threads
)->next = (queue_entry_t) (thread); } else { ((thread_t)prev
)->pset_threads.next = (queue_entry_t)(thread); } (thread)
->pset_threads.prev = prev; (thread)->pset_threads.next
= &pset->threads; (&pset->threads)->prev = (
queue_entry_t) thread; }
;
313 thread->processor_set = pset;
314 pset->thread_count++;
315}
316
317/*
318 * thread_change_psets() changes the pset of a thread. Caller must
319 * hold locks on both psets and thread. The old pset must be
320 * explicitly pset_deallocat()'ed by caller.
321 */
322
323void thread_change_psets(
324 thread_t thread,
325 processor_set_t old_pset,
326 processor_set_t new_pset)
327{
328 queue_remove(&old_pset->threads, thread, thread_t, pset_threads){ queue_entry_t next, prev; next = (thread)->pset_threads.
next; prev = (thread)->pset_threads.prev; if ((&old_pset
->threads) == next) (&old_pset->threads)->prev =
prev; else ((thread_t)next)->pset_threads.prev = prev; if
((&old_pset->threads) == prev) (&old_pset->threads
)->next = next; else ((thread_t)prev)->pset_threads.next
= next; }
;
329 old_pset->thread_count--;
330 queue_enter(&new_pset->threads, thread, thread_t, pset_threads){ queue_entry_t prev; prev = (&new_pset->threads)->
prev; if ((&new_pset->threads) == prev) { (&new_pset
->threads)->next = (queue_entry_t) (thread); } else { (
(thread_t)prev)->pset_threads.next = (queue_entry_t)(thread
); } (thread)->pset_threads.prev = prev; (thread)->pset_threads
.next = &new_pset->threads; (&new_pset->threads
)->prev = (queue_entry_t) thread; }
;
331 thread->processor_set = new_pset;
332 new_pset->thread_count++;
333}
334
335/*
336 * pset_deallocate:
337 *
338 * Remove one reference to the processor set. Destroy processor_set
339 * if this was the last reference.
340 */
341void pset_deallocate(
342 processor_set_t pset)
343{
344 if (pset == PROCESSOR_SET_NULL((processor_set_t) 0))
345 return;
346
347 pset_ref_lock(pset);
348 if (--pset->ref_count > 0) {
349 pset_ref_unlock(pset);
350 return;
351 }
352#if !MACH_HOST0
353 panic("pset_deallocate: default_pset destroyed");
354#endif /* !MACH_HOST */
355
356#if MACH_HOST0
357 /*
358 * Reference count is zero, however the all_psets list
359 * holds an implicit reference and may make new ones.
360 * Its lock also dominates the pset lock. To check for this,
361 * temporarily restore one reference, and then lock the
362 * other structures in the right order.
363 */
364 pset->ref_count = 1;
365 pset_ref_unlock(pset);
366
367 simple_lock(&all_psets_lock);
368 pset_ref_lock(pset);
369 if (--pset->ref_count > 0) {
370 /*
371 * Made an extra reference.
372 */
373 pset_ref_unlock(pset);
374 simple_unlock(&all_psets_lock);
375 return;
376 }
377
378 /*
379 * Ok to destroy pset. Make a few paranoia checks.
380 */
381
382 if ((pset == &default_pset) || (pset->thread_count > 0) ||
383 (pset->task_count > 0) || pset->processor_count > 0) {
384 panic("pset_deallocate: destroy default or active pset");
385 }
386 /*
387 * Remove from all_psets queue.
388 */
389 queue_remove(&all_psets, pset, processor_set_t, all_psets){ queue_entry_t next, prev; next = (pset)->all_psets.next;
prev = (pset)->all_psets.prev; if ((&all_psets) == next
) (&all_psets)->prev = prev; else ((processor_set_t)next
)->all_psets.prev = prev; if ((&all_psets) == prev) (&
all_psets)->next = next; else ((processor_set_t)prev)->
all_psets.next = next; }
;
390 all_psets_count--;
391
392 pset_ref_unlock(pset);
393 simple_unlock(&all_psets_lock);
394
395 /*
396 * That's it, free data structure.
397 */
398 kmem_cache_free(&pset_cache, (vm_offset_t)pset);
399#endif /* MACH_HOST */
400}
401
402/*
403 * pset_reference:
404 *
405 * Add one reference to the processor set.
406 */
407void pset_reference(
408 processor_set_t pset)
409{
410 pset_ref_lock(pset);
411 pset->ref_count++;
412 pset_ref_unlock(pset);
413}
414
415kern_return_t
416processor_info(
417 processor_t processor,
418 int flavor,
419 host_t *host,
420 processor_info_t info,
421 natural_t *count)
422{
423 int slot_num, state;
424 processor_basic_info_t basic_info;
425
426 if (processor == PROCESSOR_NULL((processor_t) 0))
427 return KERN_INVALID_ARGUMENT4;
428
429 if (flavor != PROCESSOR_BASIC_INFO1 ||
430 *count < PROCESSOR_BASIC_INFO_COUNT(sizeof(processor_basic_info_data_t)/sizeof(integer_t)))
431 return KERN_FAILURE5;
432
433 basic_info = (processor_basic_info_t) info;
434
435 slot_num = processor->slot_num;
436 basic_info->cpu_type = machine_slot[slot_num].cpu_type;
437 basic_info->cpu_subtype = machine_slot[slot_num].cpu_subtype;
438 state = processor->state;
439 if (state == PROCESSOR_SHUTDOWN5 || state == PROCESSOR_OFF_LINE0)
440 basic_info->running = FALSE((boolean_t) 0);
441 else
442 basic_info->running = TRUE((boolean_t) 1);
443 basic_info->slot_num = slot_num;
444 if (processor == master_processor)
445 basic_info->is_master = TRUE((boolean_t) 1);
446 else
447 basic_info->is_master = FALSE((boolean_t) 0);
448
449 *count = PROCESSOR_BASIC_INFO_COUNT(sizeof(processor_basic_info_data_t)/sizeof(integer_t));
450 *host = &realhost;
451 return KERN_SUCCESS0;
452}
453
454kern_return_t processor_start(
455 processor_t processor)
456{
457 if (processor == PROCESSOR_NULL((processor_t) 0))
458 return KERN_INVALID_ARGUMENT4;
459#if NCPUS1 > 1
460 return cpu_start(processor->slot_num);
461#else /* NCPUS > 1 */
462 return KERN_FAILURE5;
463#endif /* NCPUS > 1 */
464}
465
466kern_return_t processor_exit(
467 processor_t processor)
468{
469 if (processor == PROCESSOR_NULL((processor_t) 0))
470 return KERN_INVALID_ARGUMENT4;
471
472#if NCPUS1 > 1
473 return processor_shutdown(processor);
474#else /* NCPUS > 1 */
475 return KERN_FAILURE5;
476#endif /* NCPUS > 1 */
477}
478
479kern_return_t
480processor_control(
481 processor_t processor,
482 processor_info_t info,
483 natural_t count)
484{
485 if (processor == PROCESSOR_NULL((processor_t) 0))
486 return KERN_INVALID_ARGUMENT4;
487
488#if NCPUS1 > 1
489 return cpu_control(processor->slot_num, (int *)info, count);
490#else /* NCPUS > 1 */
491 return KERN_FAILURE5;
492#endif /* NCPUS > 1 */
493}
494
495/*
496 * Precalculate the appropriate system quanta based on load. The
497 * index into machine_quantum is the number of threads on the
498 * processor set queue. It is limited to the number of processors in
499 * the set.
500 */
501
502void quantum_set(
503 processor_set_t pset)
504{
505#if NCPUS1 > 1
506 int i, ncpus;
507
508 ncpus = pset->processor_count;
509
510 for ( i=1 ; i <= ncpus ; i++) {
511 pset->machine_quantum[i] =
512 ((min_quantum * ncpus) + (i/2)) / i ;
513 }
514 pset->machine_quantum[0] = 2 * pset->machine_quantum[1];
515
516 i = ((pset->runq.count > pset->processor_count) ?
517 pset->processor_count : pset->runq.count);
518 pset->set_quantum = pset->machine_quantum[i];
519#else /* NCPUS > 1 */
520 default_pset.set_quantum = min_quantum;
521#endif /* NCPUS > 1 */
522}
523
524#if MACH_HOST0
525/*
526 * processor_set_create:
527 *
528 * Create and return a new processor set.
529 */
530
531kern_return_t
532processor_set_create(
533 host_t host,
534 processor_set_t *new_set,
535 processor_set_t *new_name)
536{
537 processor_set_t pset;
538
539 if (host == HOST_NULL((host_t)0))
540 return KERN_INVALID_ARGUMENT4;
541
542 pset = (processor_set_t) kmem_cache_alloc(&pset_cache);
543 pset_init(pset);
544 pset_reference(pset); /* for new_set out argument */
545 pset_reference(pset); /* for new_name out argument */
546 ipc_pset_init(pset);
547 pset->active = TRUE((boolean_t) 1);
548
549 simple_lock(&all_psets_lock);
550 queue_enter(&all_psets, pset, processor_set_t, all_psets){ queue_entry_t prev; prev = (&all_psets)->prev; if ((
&all_psets) == prev) { (&all_psets)->next = (queue_entry_t
) (pset); } else { ((processor_set_t)prev)->all_psets.next
= (queue_entry_t)(pset); } (pset)->all_psets.prev = prev;
(pset)->all_psets.next = &all_psets; (&all_psets)
->prev = (queue_entry_t) pset; }
;
551 all_psets_count++;
552 simple_unlock(&all_psets_lock);
553
554 ipc_pset_enable(pset);
555
556 *new_set = pset;
557 *new_name = pset;
558 return KERN_SUCCESS0;
559}
560
561/*
562 * processor_set_destroy:
563 *
564 * destroy a processor set. Any tasks, threads or processors
565 * currently assigned to it are reassigned to the default pset.
566 */
567kern_return_t processor_set_destroy(
568 processor_set_t pset)
569{
570 queue_entry_t elem;
571 queue_head_t *list;
572
573 if (pset == PROCESSOR_SET_NULL((processor_set_t) 0) || pset == &default_pset)
574 return KERN_INVALID_ARGUMENT4;
575
576 /*
577 * Handle multiple termination race. First one through sets
578 * active to FALSE and disables ipc access.
579 */
580 pset_lock(pset);
581 if (!(pset->active)) {
582 pset_unlock(pset);
583 return KERN_FAILURE5;
584 }
585
586 pset->active = FALSE((boolean_t) 0);
587 ipc_pset_disable(pset);
588
589
590 /*
591 * Now reassign everything in this set to the default set.
592 */
593
594 if (pset->task_count > 0) {
595 list = &pset->tasks;
596 while (!queue_empty(list)(((list)) == (((list)->next)))) {
597 elem = queue_first(list)((list)->next);
598 task_reference((task_t) elem);
599 pset_unlock(pset);
600 task_assign((task_t) elem, &default_pset, FALSE((boolean_t) 0));
601 task_deallocate((task_t) elem);
602 pset_lock(pset);
603 }
604 }
605
606 if (pset->thread_count > 0) {
607 list = &pset->threads;
608 while (!queue_empty(list)(((list)) == (((list)->next)))) {
609 elem = queue_first(list)((list)->next);
610 thread_reference((thread_t) elem);
611 pset_unlock(pset);
612 thread_assign((thread_t) elem, &default_pset);
613 thread_deallocate((thread_t) elem);
614 pset_lock(pset);
615 }
616 }
617
618 if (pset->processor_count > 0) {
619 list = &pset->processors;
620 while(!queue_empty(list)(((list)) == (((list)->next)))) {
621 elem = queue_first(list)((list)->next);
622 pset_unlock(pset);
623 processor_assign((processor_t) elem, &default_pset, TRUE((boolean_t) 1));
624 pset_lock(pset);
625 }
626 }
627
628 pset_unlock(pset);
629
630 /*
631 * Destroy ipc state.
632 */
633 ipc_pset_terminate(pset);
634
635 /*
636 * Deallocate pset's reference to itself.
637 */
638 pset_deallocate(pset);
639 return KERN_SUCCESS0;
640}
641
642#else /* MACH_HOST */
643
644kern_return_t
645processor_set_create(
646 host_t host,
647 processor_set_t *new_set,
648 processor_set_t *new_name)
649{
650 return KERN_FAILURE5;
651}
652
653kern_return_t processor_set_destroy(
654 processor_set_t pset)
655{
656 return KERN_FAILURE5;
657}
658
659#endif /* MACH_HOST */
660
661kern_return_t
662processor_get_assignment(
663 processor_t processor,
664 processor_set_t *pset)
665{
666 int state;
667
668 state = processor->state;
669 if (state == PROCESSOR_SHUTDOWN5 || state == PROCESSOR_OFF_LINE0)
670 return KERN_FAILURE5;
671
672 *pset = processor->processor_set;
673 pset_reference(*pset);
674 return KERN_SUCCESS0;
675}
676
677kern_return_t
678processor_set_info(
679 processor_set_t pset,
680 int flavor,
681 host_t *host,
682 processor_set_info_t info,
683 natural_t *count)
684{
685 if (pset == PROCESSOR_SET_NULL((processor_set_t) 0))
686 return KERN_INVALID_ARGUMENT4;
687
688 if (flavor == PROCESSOR_SET_BASIC_INFO1) {
689 processor_set_basic_info_t basic_info;
690
691 if (*count < PROCESSOR_SET_BASIC_INFO_COUNT(sizeof(processor_set_basic_info_data_t)/sizeof(integer_t)))
692 return KERN_FAILURE5;
693
694 basic_info = (processor_set_basic_info_t) info;
695
696 pset_lock(pset);
697 basic_info->processor_count = pset->processor_count;
698 basic_info->task_count = pset->task_count;
699 basic_info->thread_count = pset->thread_count;
700 basic_info->mach_factor = pset->mach_factor;
701 basic_info->load_average = pset->load_average;
702 pset_unlock(pset);
703
704 *count = PROCESSOR_SET_BASIC_INFO_COUNT(sizeof(processor_set_basic_info_data_t)/sizeof(integer_t));
705 *host = &realhost;
706 return KERN_SUCCESS0;
707 }
708 else if (flavor == PROCESSOR_SET_SCHED_INFO2) {
709 processor_set_sched_info_t sched_info;
710
711 if (*count < PROCESSOR_SET_SCHED_INFO_COUNT(sizeof(processor_set_sched_info_data_t)/sizeof(integer_t)))
712 return KERN_FAILURE5;
713
714 sched_info = (processor_set_sched_info_t) info;
715
716 pset_lock(pset);
717#if MACH_FIXPRI1
718 sched_info->policies = pset->policies;
719#else /* MACH_FIXPRI */
720 sched_info->policies = POLICY_TIMESHARE1;
721#endif /* MACH_FIXPRI */
722 sched_info->max_priority = pset->max_priority;
723 pset_unlock(pset);
724
725 *count = PROCESSOR_SET_SCHED_INFO_COUNT(sizeof(processor_set_sched_info_data_t)/sizeof(integer_t));
726 *host = &realhost;
727 return KERN_SUCCESS0;
728 }
729
730 *host = HOST_NULL((host_t)0);
731 return KERN_INVALID_ARGUMENT4;
732}
733
734/*
735 * processor_set_max_priority:
736 *
737 * Specify max priority permitted on processor set. This affects
738 * newly created and assigned threads. Optionally change existing
739 * ones.
740 */
741kern_return_t
742processor_set_max_priority(
743 processor_set_t pset,
744 int max_priority,
745 boolean_t change_threads)
746{
747 if (pset == PROCESSOR_SET_NULL((processor_set_t) 0) || invalid_pri(max_priority)(((max_priority) < 0) || ((max_priority) >= 50)))
748 return KERN_INVALID_ARGUMENT4;
749
750 pset_lock(pset);
751 pset->max_priority = max_priority;
752
753 if (change_threads) {
754 queue_head_t *list;
755 thread_t thread;
756
757 list = &pset->threads;
758 queue_iterate(list, thread, thread_t, pset_threads)for ((thread) = (thread_t) ((list)->next); !(((list)) == (
(queue_entry_t)(thread))); (thread) = (thread_t) ((&(thread
)->pset_threads)->next))
{
759 if (thread->max_priority < max_priority)
760 thread_max_priority(thread, pset, max_priority);
761 }
762 }
763
764 pset_unlock(pset);
765
766 return KERN_SUCCESS0;
767}
768
769/*
770 * processor_set_policy_enable:
771 *
772 * Allow indicated policy on processor set.
773 */
774
775kern_return_t
776processor_set_policy_enable(
777 processor_set_t pset,
778 int policy)
779{
780 if ((pset == PROCESSOR_SET_NULL((processor_set_t) 0)) || invalid_policy(policy)(((policy) <= 0) || ((policy) > 2)))
781 return KERN_INVALID_ARGUMENT4;
782
783#if MACH_FIXPRI1
784 pset_lock(pset);
785 pset->policies |= policy;
786 pset_unlock(pset);
787
788 return KERN_SUCCESS0;
789#else /* MACH_FIXPRI */
790 if (policy == POLICY_TIMESHARE1)
791 return KERN_SUCCESS0;
792 else
793 return KERN_FAILURE5;
794#endif /* MACH_FIXPRI */
795}
796
797/*
798 * processor_set_policy_disable:
799 *
800 * Forbid indicated policy on processor set. Time sharing cannot
801 * be forbidden.
802 */
803
804kern_return_t
805processor_set_policy_disable(
806 processor_set_t pset,
807 int policy,
808 boolean_t change_threads)
809{
810 if ((pset == PROCESSOR_SET_NULL((processor_set_t) 0)) || policy == POLICY_TIMESHARE1 ||
811 invalid_policy(policy)(((policy) <= 0) || ((policy) > 2)))
812 return KERN_INVALID_ARGUMENT4;
813
814#if MACH_FIXPRI1
815 pset_lock(pset);
816
817 /*
818 * Check if policy enabled. Disable if so, then handle
819 * change_threads.
820 */
821 if (pset->policies & policy) {
822 pset->policies &= ~policy;
823
824 if (change_threads) {
825 queue_head_t *list;
826 thread_t thread;
827
828 list = &pset->threads;
829 queue_iterate(list, thread, thread_t, pset_threads)for ((thread) = (thread_t) ((list)->next); !(((list)) == (
(queue_entry_t)(thread))); (thread) = (thread_t) ((&(thread
)->pset_threads)->next))
{
830 if (thread->policy == policy)
831 thread_policy(thread, POLICY_TIMESHARE1, 0);
832 }
833 }
834 }
835 pset_unlock(pset);
836#endif /* MACH_FIXPRI */
837
838 return KERN_SUCCESS0;
839}
840
841#define THING_TASK0 0
842#define THING_THREAD1 1
843
844/*
845 * processor_set_things:
846 *
847 * Common internals for processor_set_{threads,tasks}
848 */
849kern_return_t
850processor_set_things(
851 processor_set_t pset,
852 mach_port_t **thing_list,
853 natural_t *count,
854 int type)
855{
856 unsigned int actual; /* this many things */
857 int i;
858
859 vm_size_t size, size_needed;
860 vm_offset_t addr;
861
862 if (pset == PROCESSOR_SET_NULL((processor_set_t) 0))
2
Assuming 'pset' is not equal to null
3
Taking false branch
863 return KERN_INVALID_ARGUMENT4;
864
865 size = 0; addr = 0;
4
The value 0 is assigned to 'addr'
866
867 for (;;) {
5
Loop condition is true. Entering loop body
868 pset_lock(pset);
869 if (!pset->active) {
6
Taking false branch
870 pset_unlock(pset);
871 return KERN_FAILURE5;
872 }
873
874 if (type == THING_TASK0)
7
Taking false branch
875 actual = pset->task_count;
876 else
877 actual = pset->thread_count;
878
879 /* do we have the memory we need? */
880
881 size_needed = actual * sizeof(mach_port_t);
882 if (size_needed <= size)
8
Assuming 'size_needed' is <= 'size'
9
Taking true branch
883 break;
10
Execution continues on line 901
884
885 /* unlock the pset and allocate more memory */
886 pset_unlock(pset);
887
888 if (size != 0)
889 kfree(addr, size);
890
891 assert(size_needed > 0)({ if (!(size_needed > 0)) Assert("size_needed > 0", "../kern/processor.c"
, 891); })
;
892 size = size_needed;
893
894 addr = kalloc(size);
895 if (addr == 0)
896 return KERN_RESOURCE_SHORTAGE6;
897 }
898
899 /* OK, have memory and the processor_set is locked & active */
900
901 switch (type) {
11
Control jumps to 'case 1:' at line 917
902 case THING_TASK0: {
903 task_t *tasks = (task_t *) addr;
904 task_t task;
905
906 for (i = 0, task = (task_t) queue_first(&pset->tasks)((&pset->tasks)->next);
907 i < actual;
908 i++, task = (task_t) queue_next(&task->pset_tasks)((&task->pset_tasks)->next)) {
909 /* take ref for convert_task_to_port */
910 task_reference(task);
911 tasks[i] = task;
912 }
913 assert(queue_end(&pset->tasks, (queue_entry_t) task))({ if (!(((&pset->tasks) == ((queue_entry_t) task)))) Assert
("queue_end(&pset->tasks, (queue_entry_t) task)", "../kern/processor.c"
, 913); })
;
914 break;
915 }
916
917 case THING_THREAD1: {
918 thread_t *threads = (thread_t *) addr;
12
Variable 'threads' initialized to a null pointer value
919 thread_t thread;
920
921 for (i = 0, thread = (thread_t) queue_first(&pset->threads)((&pset->threads)->next);
14
Loop condition is true. Entering loop body
922 i < actual;
13
Assuming 'i' is < 'actual'
923 i++,
924 thread = (thread_t) queue_next(&thread->pset_threads)((&thread->pset_threads)->next)) {
925 /* take ref for convert_thread_to_port */
926 thread_reference(thread);
927 threads[i] = thread;
15
Array access (from variable 'threads') results in a null pointer dereference
928 }
929 assert(queue_end(&pset->threads, (queue_entry_t) thread))({ if (!(((&pset->threads) == ((queue_entry_t) thread)
))) Assert("queue_end(&pset->threads, (queue_entry_t) thread)"
, "../kern/processor.c", 929); })
;
930 break;
931 }
932 }
933
934 /* can unlock processor set now that we have the task/thread refs */
935 pset_unlock(pset);
936
937 if (actual == 0) {
938 /* no things, so return null pointer and deallocate memory */
939 *thing_list = 0;
940 *count = 0;
941
942 if (size != 0)
943 kfree(addr, size);
944 } else {
945 /* if we allocated too much, must copy */
946
947 if (size_needed < size) {
948 vm_offset_t newaddr;
949
950 newaddr = kalloc(size_needed);
951 if (newaddr == 0) {
952 switch (type) {
953 case THING_TASK0: {
954 task_t *tasks = (task_t *) addr;
955
956 for (i = 0; i < actual; i++)
957 task_deallocate(tasks[i]);
958 break;
959 }
960
961 case THING_THREAD1: {
962 thread_t *threads = (thread_t *) addr;
963
964 for (i = 0; i < actual; i++)
965 thread_deallocate(threads[i]);
966 break;
967 }
968 }
969 kfree(addr, size);
970 return KERN_RESOURCE_SHORTAGE6;
971 }
972
973 memcpy((void *) newaddr, (void *) addr, size_needed);
974 kfree(addr, size);
975 addr = newaddr;
976 }
977
978 *thing_list = (mach_port_t *) addr;
979 *count = actual;
980
981 /* do the conversion that Mig should handle */
982
983 switch (type) {
984 case THING_TASK0: {
985 task_t *tasks = (task_t *) addr;
986
987 for (i = 0; i < actual; i++)
988 ((mach_port_t *) tasks)[i] =
989 (mach_port_t)convert_task_to_port(tasks[i]);
990 break;
991 }
992
993 case THING_THREAD1: {
994 thread_t *threads = (thread_t *) addr;
995
996 for (i = 0; i < actual; i++)
997 ((mach_port_t *) threads)[i] =
998 (mach_port_t)convert_thread_to_port(threads[i]);
999 break;
1000 }
1001 }
1002 }
1003
1004 return KERN_SUCCESS0;
1005}
1006
1007
1008/*
1009 * processor_set_tasks:
1010 *
1011 * List all tasks in the processor set.
1012 */
1013kern_return_t
1014processor_set_tasks(
1015 processor_set_t pset,
1016 task_array_t *task_list,
1017 natural_t *count)
1018{
1019 return processor_set_things(pset, task_list, count, THING_TASK0);
1020}
1021
1022/*
1023 * processor_set_threads:
1024 *
1025 * List all threads in the processor set.
1026 */
1027kern_return_t
1028processor_set_threads(
1029 processor_set_t pset,
1030 thread_array_t *thread_list,
1031 natural_t *count)
1032{
1033 return processor_set_things(pset, thread_list, count, THING_THREAD1);
1
Calling 'processor_set_things'
1034}