Bug Summary

File:obj/../kern/processor.c
Location:line 933, column 4
Description:Array access (from variable 'threads') results in a null pointer dereference

Annotated Source Code

1/*
2 * Mach Operating System
3 * Copyright (c) 1993-1988 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26/*
27 * processor.c: processor and processor_set manipulation routines.
28 */
29
30#include <string.h>
31
32#include <mach/boolean.h>
33#include <mach/policy.h>
34#include <mach/processor_info.h>
35#include <mach/vm_param.h>
36#include <kern/cpu_number.h>
37#include <kern/debug.h>
38#include <kern/kalloc.h>
39#include <kern/lock.h>
40#include <kern/host.h>
41#include <kern/ipc_tt.h>
42#include <kern/processor.h>
43#include <kern/sched.h>
44#include <kern/task.h>
45#include <kern/thread.h>
46#include <kern/ipc_host.h>
47#include <ipc/ipc_port.h>
48
49#if MACH_HOST0
50#include <kern/slab.h>
51struct kmem_cache pset_cache;
52#endif /* MACH_HOST */
53
54
55/*
56 * Exported variables.
57 */
58struct processor_set default_pset;
59struct processor processor_array[NCPUS1];
60
61queue_head_t all_psets;
62int all_psets_count;
63decl_simple_lock_data(, all_psets_lock);
64
65processor_t master_processor;
66processor_t processor_ptr[NCPUS1];
67
68/*
69 * Forward declarations.
70 */
71void quantum_set(processor_set_t);
72void pset_init(processor_set_t);
73void processor_init(processor_t, int);
74
75/*
76 * Bootstrap the processor/pset system so the scheduler can run.
77 */
78void pset_sys_bootstrap(void)
79{
80 register int i;
81
82 pset_init(&default_pset);
83 default_pset.empty = FALSE((boolean_t) 0);
84 for (i = 0; i < NCPUS1; i++) {
85 /*
86 * Initialize processor data structures.
87 * Note that cpu_to_processor(i) is processor_ptr[i].
88 */
89 processor_ptr[i] = &processor_array[i];
90 processor_init(processor_ptr[i], i);
91 }
92 master_processor = cpu_to_processor(master_cpu)(processor_ptr[master_cpu]);
93 queue_init(&all_psets)((&all_psets)->next = (&all_psets)->prev = &
all_psets)
;
94 simple_lock_init(&all_psets_lock);
95 queue_enter(&all_psets, &default_pset, processor_set_t, all_psets){ register queue_entry_t prev; prev = (&all_psets)->prev
; if ((&all_psets) == prev) { (&all_psets)->next =
(queue_entry_t) (&default_pset); } else { ((processor_set_t
)prev)->all_psets.next = (queue_entry_t)(&default_pset
); } (&default_pset)->all_psets.prev = prev; (&default_pset
)->all_psets.next = &all_psets; (&all_psets)->prev
= (queue_entry_t) &default_pset; }
;
96 all_psets_count = 1;
97 default_pset.active = TRUE((boolean_t) 1);
98 default_pset.empty = FALSE((boolean_t) 0);
99
100 /*
101 * Note: the default_pset has a max_priority of BASEPRI_USER.
102 * Internal kernel threads override this in kernel_thread.
103 */
104}
105
106#if MACH_HOST0
107/*
108 * Rest of pset system initializations.
109 */
110void pset_sys_init(void)
111{
112 register int i;
113 register processor_t processor;
114
115 /*
116 * Allocate the cache for processor sets.
117 */
118 kmem_cache_init(&pset_cache, "processor_set",
119 sizeof(struct processor_set), 0, NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0);
120
121 /*
122 * Give each processor a control port.
123 * The master processor already has one.
124 */
125 for (i = 0; i < NCPUS1; i++) {
126 processor = cpu_to_processor(i)(processor_ptr[i]);
127 if (processor != master_processor &&
128 machine_slot[i].is_cpu)
129 {
130 ipc_processor_init(processor);
131 }
132 }
133}
134#endif /* MACH_HOST */
135
136/*
137 * Initialize the given processor_set structure.
138 */
139
140void pset_init(
141 register processor_set_t pset)
142{
143 int i;
144
145 simple_lock_init(&pset->runq.lock);
146 pset->runq.low = 0;
147 pset->runq.count = 0;
148 for (i = 0; i < NRQS50; i++) {
149 queue_init(&(pset->runq.runq[i]))((&(pset->runq.runq[i]))->next = (&(pset->runq
.runq[i]))->prev = &(pset->runq.runq[i]))
;
150 }
151 queue_init(&pset->idle_queue)((&pset->idle_queue)->next = (&pset->idle_queue
)->prev = &pset->idle_queue)
;
152 pset->idle_count = 0;
153 simple_lock_init(&pset->idle_lock);
154 queue_init(&pset->processors)((&pset->processors)->next = (&pset->processors
)->prev = &pset->processors)
;
155 pset->processor_count = 0;
156 pset->empty = TRUE((boolean_t) 1);
157 queue_init(&pset->tasks)((&pset->tasks)->next = (&pset->tasks)->prev
= &pset->tasks)
;
158 pset->task_count = 0;
159 queue_init(&pset->threads)((&pset->threads)->next = (&pset->threads)->
prev = &pset->threads)
;
160 pset->thread_count = 0;
161 pset->ref_count = 1;
162 simple_lock_init(&pset->ref_lock);
163 queue_init(&pset->all_psets)((&pset->all_psets)->next = (&pset->all_psets
)->prev = &pset->all_psets)
;
164 pset->active = FALSE((boolean_t) 0);
165 simple_lock_init(&pset->lock);
166 pset->pset_self = IP_NULL((ipc_port_t) ((ipc_object_t) 0));
167 pset->pset_name_self = IP_NULL((ipc_port_t) ((ipc_object_t) 0));
168 pset->max_priority = BASEPRI_USER25;
169#if MACH_FIXPRI1
170 pset->policies = POLICY_TIMESHARE1;
171#endif /* MACH_FIXPRI */
172 pset->set_quantum = min_quantum;
173#if NCPUS1 > 1
174 pset->quantum_adj_index = 0;
175 simple_lock_init(&pset->quantum_adj_lock);
176
177 for (i = 0; i <= NCPUS1; i++) {
178 pset->machine_quantum[i] = min_quantum;
179 }
180#endif /* NCPUS > 1 */
181 pset->mach_factor = 0;
182 pset->load_average = 0;
183 pset->sched_load = SCHED_SCALE128; /* i.e. 1 */
184}
185
186/*
187 * Initialize the given processor structure for the processor in
188 * the slot specified by slot_num.
189 */
190
191void processor_init(
192 register processor_t pr,
193 int slot_num)
194{
195 int i;
196
197 simple_lock_init(&pr->runq.lock);
198 pr->runq.low = 0;
199 pr->runq.count = 0;
200 for (i = 0; i < NRQS50; i++) {
201 queue_init(&(pr->runq.runq[i]))((&(pr->runq.runq[i]))->next = (&(pr->runq.runq
[i]))->prev = &(pr->runq.runq[i]))
;
202 }
203 queue_init(&pr->processor_queue)((&pr->processor_queue)->next = (&pr->processor_queue
)->prev = &pr->processor_queue)
;
204 pr->state = PROCESSOR_OFF_LINE0;
205 pr->next_thread = THREAD_NULL((thread_t) 0);
206 pr->idle_thread = THREAD_NULL((thread_t) 0);
207 pr->quantum = 0;
208 pr->first_quantum = FALSE((boolean_t) 0);
209 pr->last_quantum = 0;
210 pr->processor_set = PROCESSOR_SET_NULL((processor_set_t) 0);
211 pr->processor_set_next = PROCESSOR_SET_NULL((processor_set_t) 0);
212 queue_init(&pr->processors)((&pr->processors)->next = (&pr->processors)
->prev = &pr->processors)
;
213 simple_lock_init(&pr->lock);
214 pr->processor_self = IP_NULL((ipc_port_t) ((ipc_object_t) 0));
215 pr->slot_num = slot_num;
216}
217
218/*
219 * pset_remove_processor() removes a processor from a processor_set.
220 * It can only be called on the current processor. Caller must
221 * hold lock on current processor and processor set.
222 */
223
224void pset_remove_processor(
225 processor_set_t pset,
226 processor_t processor)
227{
228 if (pset != processor->processor_set)
229 panic("pset_remove_processor: wrong pset");
230
231 queue_remove(&pset->processors, processor, processor_t, processors){ register queue_entry_t next, prev; next = (processor)->processors
.next; prev = (processor)->processors.prev; if ((&pset
->processors) == next) (&pset->processors)->prev
= prev; else ((processor_t)next)->processors.prev = prev;
if ((&pset->processors) == prev) (&pset->processors
)->next = next; else ((processor_t)prev)->processors.next
= next; }
;
232 processor->processor_set = PROCESSOR_SET_NULL((processor_set_t) 0);
233 pset->processor_count--;
234 quantum_set(pset);
235}
236
237/*
238 * pset_add_processor() adds a processor to a processor_set.
239 * It can only be called on the current processor. Caller must
240 * hold lock on curent processor and on pset. No reference counting on
241 * processors. Processor reference to pset is implicit.
242 */
243
244void pset_add_processor(
245 processor_set_t pset,
246 processor_t processor)
247{
248 queue_enter(&pset->processors, processor, processor_t, processors){ register queue_entry_t prev; prev = (&pset->processors
)->prev; if ((&pset->processors) == prev) { (&pset
->processors)->next = (queue_entry_t) (processor); } else
{ ((processor_t)prev)->processors.next = (queue_entry_t)(
processor); } (processor)->processors.prev = prev; (processor
)->processors.next = &pset->processors; (&pset->
processors)->prev = (queue_entry_t) processor; }
;
249 processor->processor_set = pset;
250 pset->processor_count++;
251 quantum_set(pset);
252}
253
254/*
255 * pset_remove_task() removes a task from a processor_set.
256 * Caller must hold locks on pset and task. Pset reference count
257 * is not decremented; caller must explicitly pset_deallocate.
258 */
259
260void pset_remove_task(
261 processor_set_t pset,
262 task_t task)
263{
264 if (pset != task->processor_set)
265 return;
266
267 queue_remove(&pset->tasks, task, task_t, pset_tasks){ register queue_entry_t next, prev; next = (task)->pset_tasks
.next; prev = (task)->pset_tasks.prev; if ((&pset->
tasks) == next) (&pset->tasks)->prev = prev; else (
(task_t)next)->pset_tasks.prev = prev; if ((&pset->
tasks) == prev) (&pset->tasks)->next = next; else (
(task_t)prev)->pset_tasks.next = next; }
;
268 task->processor_set = PROCESSOR_SET_NULL((processor_set_t) 0);
269 pset->task_count--;
270}
271
272/*
273 * pset_add_task() adds a task to a processor_set.
274 * Caller must hold locks on pset and task. Pset references to
275 * tasks are implicit.
276 */
277
278void pset_add_task(
279 processor_set_t pset,
280 task_t task)
281{
282 queue_enter(&pset->tasks, task, task_t, pset_tasks){ register queue_entry_t prev; prev = (&pset->tasks)->
prev; if ((&pset->tasks) == prev) { (&pset->tasks
)->next = (queue_entry_t) (task); } else { ((task_t)prev)->
pset_tasks.next = (queue_entry_t)(task); } (task)->pset_tasks
.prev = prev; (task)->pset_tasks.next = &pset->tasks
; (&pset->tasks)->prev = (queue_entry_t) task; }
;
283 task->processor_set = pset;
284 pset->task_count++;
285}
286
287/*
288 * pset_remove_thread() removes a thread from a processor_set.
289 * Caller must hold locks on pset and thread. Pset reference count
290 * is not decremented; caller must explicitly pset_deallocate.
291 */
292
293void pset_remove_thread(
294 processor_set_t pset,
295 thread_t thread)
296{
297 queue_remove(&pset->threads, thread, thread_t, pset_threads){ register queue_entry_t next, prev; next = (thread)->pset_threads
.next; prev = (thread)->pset_threads.prev; if ((&pset->
threads) == next) (&pset->threads)->prev = prev; else
((thread_t)next)->pset_threads.prev = prev; if ((&pset
->threads) == prev) (&pset->threads)->next = next
; else ((thread_t)prev)->pset_threads.next = next; }
;
298 thread->processor_set = PROCESSOR_SET_NULL((processor_set_t) 0);
299 pset->thread_count--;
300}
301
302/*
303 * pset_add_thread() adds a thread to a processor_set.
304 * Caller must hold locks on pset and thread. Pset references to
305 * threads are implicit.
306 */
307
308void pset_add_thread(
309 processor_set_t pset,
310 thread_t thread)
311{
312 queue_enter(&pset->threads, thread, thread_t, pset_threads){ register queue_entry_t prev; prev = (&pset->threads)
->prev; if ((&pset->threads) == prev) { (&pset->
threads)->next = (queue_entry_t) (thread); } else { ((thread_t
)prev)->pset_threads.next = (queue_entry_t)(thread); } (thread
)->pset_threads.prev = prev; (thread)->pset_threads.next
= &pset->threads; (&pset->threads)->prev = (
queue_entry_t) thread; }
;
313 thread->processor_set = pset;
314 pset->thread_count++;
315}
316
317/*
318 * thread_change_psets() changes the pset of a thread. Caller must
319 * hold locks on both psets and thread. The old pset must be
320 * explicitly pset_deallocat()'ed by caller.
321 */
322
323void thread_change_psets(
324 thread_t thread,
325 processor_set_t old_pset,
326 processor_set_t new_pset)
327{
328 queue_remove(&old_pset->threads, thread, thread_t, pset_threads){ register queue_entry_t next, prev; next = (thread)->pset_threads
.next; prev = (thread)->pset_threads.prev; if ((&old_pset
->threads) == next) (&old_pset->threads)->prev =
prev; else ((thread_t)next)->pset_threads.prev = prev; if
((&old_pset->threads) == prev) (&old_pset->threads
)->next = next; else ((thread_t)prev)->pset_threads.next
= next; }
;
329 old_pset->thread_count--;
330 queue_enter(&new_pset->threads, thread, thread_t, pset_threads){ register queue_entry_t prev; prev = (&new_pset->threads
)->prev; if ((&new_pset->threads) == prev) { (&
new_pset->threads)->next = (queue_entry_t) (thread); } else
{ ((thread_t)prev)->pset_threads.next = (queue_entry_t)(thread
); } (thread)->pset_threads.prev = prev; (thread)->pset_threads
.next = &new_pset->threads; (&new_pset->threads
)->prev = (queue_entry_t) thread; }
;
331 thread->processor_set = new_pset;
332 new_pset->thread_count++;
333}
334
335/*
336 * pset_deallocate:
337 *
338 * Remove one reference to the processor set. Destroy processor_set
339 * if this was the last reference.
340 */
341void pset_deallocate(
342 processor_set_t pset)
343{
344 if (pset == PROCESSOR_SET_NULL((processor_set_t) 0))
345 return;
346
347 pset_ref_lock(pset);
348 if (--pset->ref_count > 0) {
349 pset_ref_unlock(pset);
350 return;
351 }
352#if !MACH_HOST0
353 panic("pset_deallocate: default_pset destroyed");
354#endif /* !MACH_HOST */
355
356#if MACH_HOST0
357 /*
358 * Reference count is zero, however the all_psets list
359 * holds an implicit reference and may make new ones.
360 * Its lock also dominates the pset lock. To check for this,
361 * temporarily restore one reference, and then lock the
362 * other structures in the right order.
363 */
364 pset->ref_count = 1;
365 pset_ref_unlock(pset);
366
367 simple_lock(&all_psets_lock);
368 pset_ref_lock(pset);
369 if (--pset->ref_count > 0) {
370 /*
371 * Made an extra reference.
372 */
373 pset_ref_unlock(pset);
374 simple_unlock(&all_psets_lock);
375 return;
376 }
377
378 /*
379 * Ok to destroy pset. Make a few paranoia checks.
380 */
381
382 if ((pset == &default_pset) || (pset->thread_count > 0) ||
383 (pset->task_count > 0) || pset->processor_count > 0) {
384 panic("pset_deallocate: destroy default or active pset");
385 }
386 /*
387 * Remove from all_psets queue.
388 */
389 queue_remove(&all_psets, pset, processor_set_t, all_psets){ register queue_entry_t next, prev; next = (pset)->all_psets
.next; prev = (pset)->all_psets.prev; if ((&all_psets)
== next) (&all_psets)->prev = prev; else ((processor_set_t
)next)->all_psets.prev = prev; if ((&all_psets) == prev
) (&all_psets)->next = next; else ((processor_set_t)prev
)->all_psets.next = next; }
;
390 all_psets_count--;
391
392 pset_ref_unlock(pset);
393 simple_unlock(&all_psets_lock);
394
395 /*
396 * That's it, free data structure.
397 */
398 kmem_cache_free(&pset_cache, (vm_offset_t)pset);
399#endif /* MACH_HOST */
400}
401
402/*
403 * pset_reference:
404 *
405 * Add one reference to the processor set.
406 */
407void pset_reference(
408 processor_set_t pset)
409{
410 pset_ref_lock(pset);
411 pset->ref_count++;
412 pset_ref_unlock(pset);
413}
414
415kern_return_t
416processor_info(
417 register processor_t processor,
418 int flavor,
419 host_t *host,
420 processor_info_t info,
421 natural_t *count)
422{
423 register int slot_num, state;
424 register processor_basic_info_t basic_info;
425
426 if (processor == PROCESSOR_NULL((processor_t) 0))
427 return KERN_INVALID_ARGUMENT4;
428
429 if (flavor != PROCESSOR_BASIC_INFO1 ||
430 *count < PROCESSOR_BASIC_INFO_COUNT(sizeof(processor_basic_info_data_t)/sizeof(integer_t)))
431 return KERN_FAILURE5;
432
433 basic_info = (processor_basic_info_t) info;
434
435 slot_num = processor->slot_num;
436 basic_info->cpu_type = machine_slot[slot_num].cpu_type;
437 basic_info->cpu_subtype = machine_slot[slot_num].cpu_subtype;
438 state = processor->state;
439 if (state == PROCESSOR_SHUTDOWN5 || state == PROCESSOR_OFF_LINE0)
440 basic_info->running = FALSE((boolean_t) 0);
441 else
442 basic_info->running = TRUE((boolean_t) 1);
443 basic_info->slot_num = slot_num;
444 if (processor == master_processor)
445 basic_info->is_master = TRUE((boolean_t) 1);
446 else
447 basic_info->is_master = FALSE((boolean_t) 0);
448
449 *count = PROCESSOR_BASIC_INFO_COUNT(sizeof(processor_basic_info_data_t)/sizeof(integer_t));
450 *host = &realhost;
451 return KERN_SUCCESS0;
452}
453
454kern_return_t processor_start(
455 processor_t processor)
456{
457 if (processor == PROCESSOR_NULL((processor_t) 0))
458 return KERN_INVALID_ARGUMENT4;
459#if NCPUS1 > 1
460 return cpu_start(processor->slot_num);
461#else /* NCPUS > 1 */
462 return KERN_FAILURE5;
463#endif /* NCPUS > 1 */
464}
465
466kern_return_t processor_exit(
467 processor_t processor)
468{
469 if (processor == PROCESSOR_NULL((processor_t) 0))
470 return KERN_INVALID_ARGUMENT4;
471
472#if NCPUS1 > 1
473 return processor_shutdown(processor);
474#else /* NCPUS > 1 */
475 return KERN_FAILURE5;
476#endif /* NCPUS > 1 */
477}
478
479kern_return_t
480processor_control(
481 processor_t processor,
482 processor_info_t info,
483 natural_t count)
484{
485 if (processor == PROCESSOR_NULL((processor_t) 0))
486 return KERN_INVALID_ARGUMENT4;
487
488#if NCPUS1 > 1
489 return cpu_control(processor->slot_num, (int *)info, count);
490#else /* NCPUS > 1 */
491 return KERN_FAILURE5;
492#endif /* NCPUS > 1 */
493}
494
495/*
496 * Precalculate the appropriate system quanta based on load. The
497 * index into machine_quantum is the number of threads on the
498 * processor set queue. It is limited to the number of processors in
499 * the set.
500 */
501
502void quantum_set(
503 processor_set_t pset)
504{
505#if NCPUS1 > 1
506 register int i,ncpus;
507
508 ncpus = pset->processor_count;
509
510 for ( i=1 ; i <= ncpus ; i++) {
511 pset->machine_quantum[i] =
512 ((min_quantum * ncpus) + (i/2)) / i ;
513 }
514 pset->machine_quantum[0] = 2 * pset->machine_quantum[1];
515
516 i = ((pset->runq.count > pset->processor_count) ?
517 pset->processor_count : pset->runq.count);
518 pset->set_quantum = pset->machine_quantum[i];
519#else /* NCPUS > 1 */
520 default_pset.set_quantum = min_quantum;
521#endif /* NCPUS > 1 */
522}
523
524#if MACH_HOST0
525/*
526 * processor_set_create:
527 *
528 * Create and return a new processor set.
529 */
530
531kern_return_t
532processor_set_create(
533 host_t host,
534 processor_set_t *new_set,
535 processor_set_t *new_name)
536{
537 processor_set_t pset;
538
539 if (host == HOST_NULL((host_t)0))
540 return KERN_INVALID_ARGUMENT4;
541
542 pset = (processor_set_t) kmem_cache_alloc(&pset_cache);
543 pset_init(pset);
544 pset_reference(pset); /* for new_set out argument */
545 pset_reference(pset); /* for new_name out argument */
546 ipc_pset_init(pset);
547 pset->active = TRUE((boolean_t) 1);
548
549 simple_lock(&all_psets_lock);
550 queue_enter(&all_psets, pset, processor_set_t, all_psets){ register queue_entry_t prev; prev = (&all_psets)->prev
; if ((&all_psets) == prev) { (&all_psets)->next =
(queue_entry_t) (pset); } else { ((processor_set_t)prev)->
all_psets.next = (queue_entry_t)(pset); } (pset)->all_psets
.prev = prev; (pset)->all_psets.next = &all_psets; (&
all_psets)->prev = (queue_entry_t) pset; }
;
551 all_psets_count++;
552 simple_unlock(&all_psets_lock);
553
554 ipc_pset_enable(pset);
555
556 *new_set = pset;
557 *new_name = pset;
558 return KERN_SUCCESS0;
559}
560
561/*
562 * processor_set_destroy:
563 *
564 * destroy a processor set. Any tasks, threads or processors
565 * currently assigned to it are reassigned to the default pset.
566 */
567kern_return_t processor_set_destroy(
568 processor_set_t pset)
569{
570 register queue_entry_t elem;
571 register queue_head_t *list;
572
573 if (pset == PROCESSOR_SET_NULL((processor_set_t) 0) || pset == &default_pset)
574 return KERN_INVALID_ARGUMENT4;
575
576 /*
577 * Handle multiple termination race. First one through sets
578 * active to FALSE and disables ipc access.
579 */
580 pset_lock(pset);
581 if (!(pset->active)) {
582 pset_unlock(pset);
583 return KERN_FAILURE5;
584 }
585
586 pset->active = FALSE((boolean_t) 0);
587 ipc_pset_disable(pset);
588
589
590 /*
591 * Now reassign everything in this set to the default set.
592 */
593
594 if (pset->task_count > 0) {
595 list = &pset->tasks;
596 while (!queue_empty(list)(((list)) == (((list)->next)))) {
597 elem = queue_first(list)((list)->next);
598 task_reference((task_t) elem);
599 pset_unlock(pset);
600 task_assign((task_t) elem, &default_pset, FALSE((boolean_t) 0));
601 task_deallocate((task_t) elem);
602 pset_lock(pset);
603 }
604 }
605
606 if (pset->thread_count > 0) {
607 list = &pset->threads;
608 while (!queue_empty(list)(((list)) == (((list)->next)))) {
609 elem = queue_first(list)((list)->next);
610 thread_reference((thread_t) elem);
611 pset_unlock(pset);
612 thread_assign((thread_t) elem, &default_pset);
613 thread_deallocate((thread_t) elem);
614 pset_lock(pset);
615 }
616 }
617
618 if (pset->processor_count > 0) {
619 list = &pset->processors;
620 while(!queue_empty(list)(((list)) == (((list)->next)))) {
621 elem = queue_first(list)((list)->next);
622 pset_unlock(pset);
623 processor_assign((processor_t) elem, &default_pset, TRUE((boolean_t) 1));
624 pset_lock(pset);
625 }
626 }
627
628 pset_unlock(pset);
629
630 /*
631 * Destroy ipc state.
632 */
633 ipc_pset_terminate(pset);
634
635 /*
636 * Deallocate pset's reference to itself.
637 */
638 pset_deallocate(pset);
639 return KERN_SUCCESS0;
640}
641
642#else /* MACH_HOST */
643
644kern_return_t
645processor_set_create(
646 host_t host,
647 processor_set_t *new_set,
648 processor_set_t *new_name)
649{
650#ifdef lint
651 host++; new_set++; new_name++;
652#endif /* lint */
653 return KERN_FAILURE5;
654}
655
656kern_return_t processor_set_destroy(
657 processor_set_t pset)
658{
659#ifdef lint
660 pset++;
661#endif /* lint */
662 return KERN_FAILURE5;
663}
664
665#endif /* MACH_HOST */
666
667kern_return_t
668processor_get_assignment(
669 processor_t processor,
670 processor_set_t *pset)
671{
672 int state;
673
674 state = processor->state;
675 if (state == PROCESSOR_SHUTDOWN5 || state == PROCESSOR_OFF_LINE0)
676 return KERN_FAILURE5;
677
678 *pset = processor->processor_set;
679 pset_reference(*pset);
680 return KERN_SUCCESS0;
681}
682
683kern_return_t
684processor_set_info(
685 processor_set_t pset,
686 int flavor,
687 host_t *host,
688 processor_set_info_t info,
689 natural_t *count)
690{
691 if (pset == PROCESSOR_SET_NULL((processor_set_t) 0))
692 return KERN_INVALID_ARGUMENT4;
693
694 if (flavor == PROCESSOR_SET_BASIC_INFO1) {
695 register processor_set_basic_info_t basic_info;
696
697 if (*count < PROCESSOR_SET_BASIC_INFO_COUNT(sizeof(processor_set_basic_info_data_t)/sizeof(integer_t)))
698 return KERN_FAILURE5;
699
700 basic_info = (processor_set_basic_info_t) info;
701
702 pset_lock(pset);
703 basic_info->processor_count = pset->processor_count;
704 basic_info->task_count = pset->task_count;
705 basic_info->thread_count = pset->thread_count;
706 basic_info->mach_factor = pset->mach_factor;
707 basic_info->load_average = pset->load_average;
708 pset_unlock(pset);
709
710 *count = PROCESSOR_SET_BASIC_INFO_COUNT(sizeof(processor_set_basic_info_data_t)/sizeof(integer_t));
711 *host = &realhost;
712 return KERN_SUCCESS0;
713 }
714 else if (flavor == PROCESSOR_SET_SCHED_INFO2) {
715 register processor_set_sched_info_t sched_info;
716
717 if (*count < PROCESSOR_SET_SCHED_INFO_COUNT(sizeof(processor_set_sched_info_data_t)/sizeof(integer_t)))
718 return KERN_FAILURE5;
719
720 sched_info = (processor_set_sched_info_t) info;
721
722 pset_lock(pset);
723#if MACH_FIXPRI1
724 sched_info->policies = pset->policies;
725#else /* MACH_FIXPRI */
726 sched_info->policies = POLICY_TIMESHARE1;
727#endif /* MACH_FIXPRI */
728 sched_info->max_priority = pset->max_priority;
729 pset_unlock(pset);
730
731 *count = PROCESSOR_SET_SCHED_INFO_COUNT(sizeof(processor_set_sched_info_data_t)/sizeof(integer_t));
732 *host = &realhost;
733 return KERN_SUCCESS0;
734 }
735
736 *host = HOST_NULL((host_t)0);
737 return KERN_INVALID_ARGUMENT4;
738}
739
740/*
741 * processor_set_max_priority:
742 *
743 * Specify max priority permitted on processor set. This affects
744 * newly created and assigned threads. Optionally change existing
745 * ones.
746 */
747kern_return_t
748processor_set_max_priority(
749 processor_set_t pset,
750 int max_priority,
751 boolean_t change_threads)
752{
753 if (pset == PROCESSOR_SET_NULL((processor_set_t) 0) || invalid_pri(max_priority)(((max_priority) < 0) || ((max_priority) >= 50)))
754 return KERN_INVALID_ARGUMENT4;
755
756 pset_lock(pset);
757 pset->max_priority = max_priority;
758
759 if (change_threads) {
760 register queue_head_t *list;
761 register thread_t thread;
762
763 list = &pset->threads;
764 queue_iterate(list, thread, thread_t, pset_threads)for ((thread) = (thread_t) ((list)->next); !(((list)) == (
(queue_entry_t)(thread))); (thread) = (thread_t) ((&(thread
)->pset_threads)->next))
{
765 if (thread->max_priority < max_priority)
766 thread_max_priority(thread, pset, max_priority);
767 }
768 }
769
770 pset_unlock(pset);
771
772 return KERN_SUCCESS0;
773}
774
775/*
776 * processor_set_policy_enable:
777 *
778 * Allow indicated policy on processor set.
779 */
780
781kern_return_t
782processor_set_policy_enable(
783 processor_set_t pset,
784 int policy)
785{
786 if ((pset == PROCESSOR_SET_NULL((processor_set_t) 0)) || invalid_policy(policy)(((policy) <= 0) || ((policy) > 2)))
787 return KERN_INVALID_ARGUMENT4;
788
789#if MACH_FIXPRI1
790 pset_lock(pset);
791 pset->policies |= policy;
792 pset_unlock(pset);
793
794 return KERN_SUCCESS0;
795#else /* MACH_FIXPRI */
796 if (policy == POLICY_TIMESHARE1)
797 return KERN_SUCCESS0;
798 else
799 return KERN_FAILURE5;
800#endif /* MACH_FIXPRI */
801}
802
803/*
804 * processor_set_policy_disable:
805 *
806 * Forbid indicated policy on processor set. Time sharing cannot
807 * be forbidden.
808 */
809
810kern_return_t
811processor_set_policy_disable(
812 processor_set_t pset,
813 int policy,
814 boolean_t change_threads)
815{
816 if ((pset == PROCESSOR_SET_NULL((processor_set_t) 0)) || policy == POLICY_TIMESHARE1 ||
817 invalid_policy(policy)(((policy) <= 0) || ((policy) > 2)))
818 return KERN_INVALID_ARGUMENT4;
819
820#if MACH_FIXPRI1
821 pset_lock(pset);
822
823 /*
824 * Check if policy enabled. Disable if so, then handle
825 * change_threads.
826 */
827 if (pset->policies & policy) {
828 pset->policies &= ~policy;
829
830 if (change_threads) {
831 register queue_head_t *list;
832 register thread_t thread;
833
834 list = &pset->threads;
835 queue_iterate(list, thread, thread_t, pset_threads)for ((thread) = (thread_t) ((list)->next); !(((list)) == (
(queue_entry_t)(thread))); (thread) = (thread_t) ((&(thread
)->pset_threads)->next))
{
836 if (thread->policy == policy)
837 thread_policy(thread, POLICY_TIMESHARE1, 0);
838 }
839 }
840 }
841 pset_unlock(pset);
842#endif /* MACH_FIXPRI */
843
844 return KERN_SUCCESS0;
845}
846
847#define THING_TASK0 0
848#define THING_THREAD1 1
849
850/*
851 * processor_set_things:
852 *
853 * Common internals for processor_set_{threads,tasks}
854 */
855kern_return_t
856processor_set_things(
857 processor_set_t pset,
858 mach_port_t **thing_list,
859 natural_t *count,
860 int type)
861{
862 unsigned int actual; /* this many things */
863 int i;
864
865 vm_size_t size, size_needed;
866 vm_offset_t addr;
867
868 if (pset == PROCESSOR_SET_NULL((processor_set_t) 0))
2
Assuming 'pset' is not equal to null
3
Taking false branch
869 return KERN_INVALID_ARGUMENT4;
870
871 size = 0; addr = 0;
4
The value 0 is assigned to 'addr'
872
873 for (;;) {
5
Loop condition is true. Entering loop body
874 pset_lock(pset);
875 if (!pset->active) {
6
Taking false branch
876 pset_unlock(pset);
877 return KERN_FAILURE5;
878 }
879
880 if (type == THING_TASK0)
7
Taking false branch
881 actual = pset->task_count;
882 else
883 actual = pset->thread_count;
884
885 /* do we have the memory we need? */
886
887 size_needed = actual * sizeof(mach_port_t);
888 if (size_needed <= size)
8
Assuming 'size_needed' is <= 'size'
9
Taking true branch
889 break;
10
Execution continues on line 907
890
891 /* unlock the pset and allocate more memory */
892 pset_unlock(pset);
893
894 if (size != 0)
895 kfree(addr, size);
896
897 assert(size_needed > 0)({ if (!(size_needed > 0)) Assert("size_needed > 0", "../kern/processor.c"
, 897); })
;
898 size = size_needed;
899
900 addr = kalloc(size);
901 if (addr == 0)
902 return KERN_RESOURCE_SHORTAGE6;
903 }
904
905 /* OK, have memory and the processor_set is locked & active */
906
907 switch (type) {
11
Control jumps to 'case 1:' at line 923
908 case THING_TASK0: {
909 task_t *tasks = (task_t *) addr;
910 task_t task;
911
912 for (i = 0, task = (task_t) queue_first(&pset->tasks)((&pset->tasks)->next);
913 i < actual;
914 i++, task = (task_t) queue_next(&task->pset_tasks)((&task->pset_tasks)->next)) {
915 /* take ref for convert_task_to_port */
916 task_reference(task);
917 tasks[i] = task;
918 }
919 assert(queue_end(&pset->tasks, (queue_entry_t) task))({ if (!(((&pset->tasks) == ((queue_entry_t) task)))) Assert
("queue_end(&pset->tasks, (queue_entry_t) task)", "../kern/processor.c"
, 919); })
;
920 break;
921 }
922
923 case THING_THREAD1: {
924 thread_t *threads = (thread_t *) addr;
12
Variable 'threads' initialized to a null pointer value
925 thread_t thread;
926
927 for (i = 0, thread = (thread_t) queue_first(&pset->threads)((&pset->threads)->next);
14
Loop condition is true. Entering loop body
928 i < actual;
13
Assuming 'i' is < 'actual'
929 i++,
930 thread = (thread_t) queue_next(&thread->pset_threads)((&thread->pset_threads)->next)) {
931 /* take ref for convert_thread_to_port */
932 thread_reference(thread);
933 threads[i] = thread;
15
Array access (from variable 'threads') results in a null pointer dereference
934 }
935 assert(queue_end(&pset->threads, (queue_entry_t) thread))({ if (!(((&pset->threads) == ((queue_entry_t) thread)
))) Assert("queue_end(&pset->threads, (queue_entry_t) thread)"
, "../kern/processor.c", 935); })
;
936 break;
937 }
938 }
939
940 /* can unlock processor set now that we have the task/thread refs */
941 pset_unlock(pset);
942
943 if (actual == 0) {
944 /* no things, so return null pointer and deallocate memory */
945 *thing_list = 0;
946 *count = 0;
947
948 if (size != 0)
949 kfree(addr, size);
950 } else {
951 /* if we allocated too much, must copy */
952
953 if (size_needed < size) {
954 vm_offset_t newaddr;
955
956 newaddr = kalloc(size_needed);
957 if (newaddr == 0) {
958 switch (type) {
959 case THING_TASK0: {
960 task_t *tasks = (task_t *) addr;
961
962 for (i = 0; i < actual; i++)
963 task_deallocate(tasks[i]);
964 break;
965 }
966
967 case THING_THREAD1: {
968 thread_t *threads = (thread_t *) addr;
969
970 for (i = 0; i < actual; i++)
971 thread_deallocate(threads[i]);
972 break;
973 }
974 }
975 kfree(addr, size);
976 return KERN_RESOURCE_SHORTAGE6;
977 }
978
979 memcpy((void *) newaddr, (void *) addr, size_needed);
980 kfree(addr, size);
981 addr = newaddr;
982 }
983
984 *thing_list = (mach_port_t *) addr;
985 *count = actual;
986
987 /* do the conversion that Mig should handle */
988
989 switch (type) {
990 case THING_TASK0: {
991 task_t *tasks = (task_t *) addr;
992
993 for (i = 0; i < actual; i++)
994 ((mach_port_t *) tasks)[i] =
995 (mach_port_t)convert_task_to_port(tasks[i]);
996 break;
997 }
998
999 case THING_THREAD1: {
1000 thread_t *threads = (thread_t *) addr;
1001
1002 for (i = 0; i < actual; i++)
1003 ((mach_port_t *) threads)[i] =
1004 (mach_port_t)convert_thread_to_port(threads[i]);
1005 break;
1006 }
1007 }
1008 }
1009
1010 return KERN_SUCCESS0;
1011}
1012
1013
1014/*
1015 * processor_set_tasks:
1016 *
1017 * List all tasks in the processor set.
1018 */
1019kern_return_t
1020processor_set_tasks(
1021 processor_set_t pset,
1022 task_array_t *task_list,
1023 natural_t *count)
1024{
1025 return processor_set_things(pset, task_list, count, THING_TASK0);
1026}
1027
1028/*
1029 * processor_set_threads:
1030 *
1031 * List all threads in the processor set.
1032 */
1033kern_return_t
1034processor_set_threads(
1035 processor_set_t pset,
1036 thread_array_t *thread_list,
1037 natural_t *count)
1038{
1039 return processor_set_things(pset, thread_list, count, THING_THREAD1);
1
Calling 'processor_set_things'
1040}