Bug Summary

File:obj-scan-build/mach-defpager/../../mach-defpager/kalloc.c
Location:line 254, column 6
Description:Function call argument is an uninitialized value

Annotated Source Code

1/*
2 * Mach Operating System
3 * Copyright (c) 1993-1987 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26/*
27 * File: kern/kalloc.c
28 * Author: Avadis Tevanian, Jr.
29 * Date: 1985
30 *
31 * General kernel memory allocator. This allocator is designed
32 * to be used by the kernel to manage dynamic memory fast.
33 */
34
35#include <mach.h>
36#include <pthread.h> /* for spin locks */
37#include <malloc.h> /* for malloc_hook/free_hook */
38
39#include "wiring.h"
40
41static void init_hook (void);
42static void *malloc_hook (size_t size, const void *caller);
43static void free_hook (void *ptr, const void *caller);
44
45/* GNU libc 2.14 defines this macro to declare hook variables as volatile.
46 Define it as empty for older libc versions. */
47#ifndef __MALLOC_HOOK_VOLATILEvolatile
48# define __MALLOC_HOOK_VOLATILEvolatile
49#endif
50
51void (*__MALLOC_HOOK_VOLATILEvolatile __malloc_initialize_hook) (void) = init_hook;
52
53
54/* #define DEBUG */
55
56/*
57 * All allocations of size less than kalloc_max are rounded to the
58 * next highest power of 2.
59 */
60vm_size_t kalloc_max; /* max before we use vm_allocate */
61#define MINSIZEsizeof(vm_offset_t) sizeof(vm_offset_t) /* minimum allocation size */
62
63struct free_list {
64 pthread_spinlock_t lock;
65 vm_offset_t head; /* head of free list */
66#ifdef DEBUG
67 int count;
68#endif /*DEBUG*/
69};
70
71#define KLIST_MAX13 13
72 /* sizes: 4, 8, 16, 32, 64,
73 128, 256, 512, 1024,
74 2048, 4096, 8192, 16384 */
75struct free_list kfree_list[KLIST_MAX13];
76
77pthread_spinlock_t kget_space_lock;
78vm_offset_t kalloc_next_space = 0;
79vm_offset_t kalloc_end_of_space = 0;
80
81vm_size_t kalloc_wasted_space = 0;
82
83boolean_t kalloc_initialized = FALSE((boolean_t) 0);
84
85/*
86 * Initialize the memory allocator. This should be called only
87 * once on a system wide basis (i.e. first processor to get here
88 * does the initialization).
89 *
90 * This initializes all of the zones.
91 */
92
93void kalloc_init(void)
94{
95 int i;
96
97 /*
98 * Support free lists for items up to vm_page_size or
99 * 16Kbytes, whichever is less.
100 */
101
102 if (vm_page_size > (MINSIZEsizeof(vm_offset_t) << (KLIST_MAX13-1)))
103 kalloc_max = (MINSIZEsizeof(vm_offset_t) << (KLIST_MAX13-1));
104 else
105 kalloc_max = vm_page_size;
106
107 for (i = 0; i < KLIST_MAX13; i++) {
108 pthread_spin_init(&kfree_list[i].lock, PTHREAD_PROCESS_PRIVATE__PTHREAD_PROCESS_PRIVATE);
109 kfree_list[i].head = 0;
110 }
111 pthread_spin_init(&kget_space_lock, PTHREAD_PROCESS_PRIVATE__PTHREAD_PROCESS_PRIVATE);
112
113 /*
114 * Do not allocate memory at address 0.
115 */
116 kalloc_next_space = vm_page_size;
117 kalloc_end_of_space = vm_page_size;
118}
119
120/*
121 * Contiguous space allocator for items of less than a page size.
122 */
123vm_offset_t kget_space(vm_offset_t size)
124{
125 vm_size_t space_to_add = 0;
126 vm_offset_t new_space = 0;
127 vm_offset_t addr;
128
129 pthread_spin_lock(&kget_space_lock);
130 while (kalloc_next_space + size > kalloc_end_of_space) {
131 /*
132 * Add at least one page to allocation area.
133 */
134 space_to_add = round_page(size)((((vm_offset_t) (size) + __vm_page_size - 1) / __vm_page_size
) * __vm_page_size)
;
135
136 if (new_space == 0) {
137 /*
138 * Unlock and allocate memory.
139 * Try to make it contiguous with the last
140 * allocation area.
141 */
142 pthread_spin_unlock(&kget_space_lock);
143
144 new_space = kalloc_end_of_space;
145 if (vm_map(mach_task_self()((__mach_task_self_ + 0)),
146 &new_space, space_to_add, (vm_offset_t) 0, TRUE((boolean_t) 1),
147 MEMORY_OBJECT_NULL((mach_port_t) 0), (vm_offset_t) 0, FALSE((boolean_t) 0),
148 VM_PROT_DEFAULT(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)), VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04)), VM_INHERIT_DEFAULT((vm_inherit_t) 1))
149 != KERN_SUCCESS0)
150 return 0;
151 wire_memory(new_space, space_to_add,
152 VM_PROT_READ((vm_prot_t) 0x01)|VM_PROT_WRITE((vm_prot_t) 0x02));
153 pthread_spin_lock(&kget_space_lock);
154 continue;
155 }
156
157 /*
158 * Memory was allocated in a previous iteration.
159 * Check whether the new region is contiguous with the
160 * old one.
161 */
162 if (new_space != kalloc_end_of_space) {
163 /*
164 * Throw away the remainder of the old space,
165 * and start a new one.
166 */
167 kalloc_wasted_space +=
168 kalloc_end_of_space - kalloc_next_space;
169 kalloc_next_space = new_space;
170 }
171 kalloc_end_of_space = new_space + space_to_add;
172
173 new_space = 0;
174 }
175
176 addr = kalloc_next_space;
177 kalloc_next_space += size;
178 pthread_spin_unlock(&kget_space_lock);
179
180 if (new_space != 0)
181 (void) vm_deallocate(mach_task_self()((__mach_task_self_ + 0)), new_space, space_to_add);
182
183 return addr;
184}
185
186void *kalloc(vm_size_t size)
187{
188 vm_size_t allocsize;
189 vm_offset_t addr;
190 struct free_list *fl;
191
192 if (!kalloc_initialized) {
193 kalloc_init();
194 kalloc_initialized = TRUE((boolean_t) 1);
195 }
196
197 /* compute the size of the block that we will actually allocate */
198
199 allocsize = size;
200 if (size <= kalloc_max) {
201 allocsize = MINSIZEsizeof(vm_offset_t);
202 fl = kfree_list;
203 while (allocsize < size) {
204 allocsize <<= 1;
205 fl++;
206 }
207 }
208
209 /*
210 * If our size is still small enough, check the queue for that size
211 * and allocate.
212 */
213
214 if (allocsize <= kalloc_max) {
215 pthread_spin_lock(&fl->lock);
216 if ((addr = fl->head) != 0) {
217 fl->head = *(vm_offset_t *)addr;
218#ifdef DEBUG
219 fl->count--;
220#endif
221 pthread_spin_unlock(&fl->lock);
222 }
223 else {
224 pthread_spin_unlock(&fl->lock);
225 addr = kget_space(allocsize);
226 }
227 }
228 else {
229 if (vm_allocate(mach_task_self()((__mach_task_self_ + 0)), &addr, allocsize, TRUE((boolean_t) 1))
230 != KERN_SUCCESS0)
231 addr = 0;
232 }
233 return (void *) addr;
234}
235
236void
237kfree( void *data,
238 vm_size_t size)
239{
240 vm_size_t freesize;
241 struct free_list *fl;
242
243 freesize = size;
244 if (size <= kalloc_max) {
1
Taking false branch
245 freesize = MINSIZEsizeof(vm_offset_t);
246 fl = kfree_list;
247 while (freesize < size) {
248 freesize <<= 1;
249 fl++;
250 }
251 }
252
253 if (freesize <= kalloc_max) {
2
Taking true branch
254 pthread_spin_lock(&fl->lock);
3
Function call argument is an uninitialized value
255 *(vm_offset_t *)data = fl->head;
256 fl->head = (vm_offset_t) data;
257#ifdef DEBUG
258 fl->count++;
259#endif
260 pthread_spin_unlock(&fl->lock);
261 }
262 else {
263 (void) vm_deallocate(mach_task_self()((__mach_task_self_ + 0)), (vm_offset_t)data, freesize);
264 }
265}
266
267static void
268init_hook (void)
269{
270 __malloc_hook = malloc_hook;
271 __free_hook = free_hook;
272}
273
274static void *
275malloc_hook (size_t size, const void *caller)
276{
277 return (void *) kalloc ((vm_size_t) size);
278}
279
280static void
281free_hook (void *ptr, const void *caller)
282{
283 /* Just ignore harmless attempts at cleanliness. */
284 /* panic("free not implemented"); */
285}
286
287void malloc_fork_prepare()
288{
289}
290
291void malloc_fork_parent()
292{
293}
294
295void malloc_fork_child()
296{
297}