1 | |
2 | |
3 | |
4 | |
5 | |
6 | |
7 | |
8 | |
9 | |
10 | |
11 | |
12 | |
13 | |
14 | |
15 | |
16 | |
17 | |
18 | |
19 | |
20 | |
21 | |
22 | |
23 | |
24 | |
25 | |
26 | |
27 | |
28 | |
29 | |
30 | |
31 | |
32 | |
33 | |
34 | |
35 | #include <mach.h> |
36 | #include <pthread.h> /* for spin locks */ |
37 | #include <malloc.h> /* for malloc_hook/free_hook */ |
38 | |
39 | #include "wiring.h" |
40 | |
41 | static void init_hook (void); |
42 | static void *malloc_hook (size_t size, const void *caller); |
43 | static void free_hook (void *ptr, const void *caller); |
44 | |
45 | |
46 | |
47 | #ifndef __MALLOC_HOOK_VOLATILEvolatile |
48 | # define __MALLOC_HOOK_VOLATILEvolatile |
49 | #endif |
50 | |
51 | void (*__MALLOC_HOOK_VOLATILEvolatile __malloc_initialize_hook) (void) = init_hook; |
52 | |
53 | |
54 | |
55 | |
56 | |
57 | |
58 | |
59 | |
60 | vm_size_t kalloc_max; |
61 | #define MINSIZE4 4 /* minimum allocation size */ |
62 | |
63 | struct free_list { |
64 | pthread_spinlock_t lock; |
65 | vm_offset_t head; |
66 | #ifdef DEBUG |
67 | int count; |
68 | #endif /*DEBUG*/ |
69 | }; |
70 | |
71 | #define KLIST_MAX13 13 |
72 | |
73 | |
74 | |
75 | struct free_list kfree_list[KLIST_MAX13]; |
76 | |
77 | pthread_spinlock_t kget_space_lock; |
78 | vm_offset_t kalloc_next_space = 0; |
79 | vm_offset_t kalloc_end_of_space = 0; |
80 | |
81 | vm_size_t kalloc_wasted_space = 0; |
82 | |
83 | boolean_t kalloc_initialized = FALSE((boolean_t) 0); |
84 | |
85 | |
86 | |
87 | |
88 | |
89 | |
90 | |
91 | |
92 | |
93 | void kalloc_init(void) |
94 | { |
95 | int i; |
96 | |
97 | |
98 | |
99 | |
100 | |
101 | |
102 | if (vm_page_size > 16*1024) |
103 | kalloc_max = 16*1024; |
104 | else |
105 | kalloc_max = vm_page_size; |
106 | |
107 | for (i = 0; i < KLIST_MAX13; i++) { |
108 | pthread_spin_init(&kfree_list[i].lock, PTHREAD_PROCESS_PRIVATE__PTHREAD_PROCESS_PRIVATE); |
109 | kfree_list[i].head = 0; |
110 | } |
111 | pthread_spin_init(&kget_space_lock, PTHREAD_PROCESS_PRIVATE__PTHREAD_PROCESS_PRIVATE); |
112 | |
113 | |
114 | |
115 | |
116 | kalloc_next_space = vm_page_size; |
117 | kalloc_end_of_space = vm_page_size; |
118 | } |
119 | |
120 | |
121 | |
122 | |
123 | vm_offset_t kget_space(vm_offset_t size) |
124 | { |
125 | vm_size_t space_to_add = 0; |
126 | vm_offset_t new_space = 0; |
127 | vm_offset_t addr; |
128 | |
129 | pthread_spin_lock(&kget_space_lock); |
130 | while (kalloc_next_space + size > kalloc_end_of_space) { |
131 | |
132 | |
133 | |
134 | space_to_add = round_page(size)((((vm_offset_t) (size) + __vm_page_size - 1) / __vm_page_size ) * __vm_page_size); |
135 | |
136 | if (new_space == 0) { |
137 | |
138 | |
139 | |
140 | |
141 | |
142 | pthread_spin_unlock(&kget_space_lock); |
143 | |
144 | new_space = kalloc_end_of_space; |
145 | if (vm_map(mach_task_self()((__mach_task_self_ + 0)), |
146 | &new_space, space_to_add, (vm_offset_t) 0, TRUE((boolean_t) 1), |
147 | MEMORY_OBJECT_NULL((mach_port_t) 0), (vm_offset_t) 0, FALSE((boolean_t) 0), |
148 | VM_PROT_DEFAULT(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)), VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04)), VM_INHERIT_DEFAULT((vm_inherit_t) 1)) |
149 | != KERN_SUCCESS0) |
150 | return 0; |
151 | wire_memory(new_space, space_to_add, |
152 | VM_PROT_READ((vm_prot_t) 0x01)|VM_PROT_WRITE((vm_prot_t) 0x02)); |
153 | pthread_spin_lock(&kget_space_lock); |
154 | continue; |
155 | } |
156 | |
157 | |
158 | |
159 | |
160 | |
161 | |
162 | if (new_space != kalloc_end_of_space) { |
163 | |
164 | |
165 | |
166 | |
167 | kalloc_wasted_space += |
168 | kalloc_end_of_space - kalloc_next_space; |
169 | kalloc_next_space = new_space; |
170 | } |
171 | kalloc_end_of_space = new_space + space_to_add; |
172 | |
173 | new_space = 0; |
174 | } |
175 | |
176 | addr = kalloc_next_space; |
177 | kalloc_next_space += size; |
178 | pthread_spin_unlock(&kget_space_lock); |
179 | |
180 | if (new_space != 0) |
181 | (void) vm_deallocate(mach_task_self()((__mach_task_self_ + 0)), new_space, space_to_add); |
182 | |
183 | return addr; |
184 | } |
185 | |
186 | void *kalloc(vm_size_t size) |
187 | { |
188 | vm_size_t allocsize; |
189 | vm_offset_t addr; |
190 | struct free_list *fl; |
191 | |
192 | if (!kalloc_initialized) { |
| 1 | Assuming 'kalloc_initialized' is not equal to 0 | |
|
| |
193 | kalloc_init(); |
194 | kalloc_initialized = TRUE((boolean_t) 1); |
195 | } |
196 | |
197 | |
198 | |
199 | allocsize = size; |
200 | if (size < kalloc_max) { |
| |
201 | allocsize = MINSIZE4; |
202 | fl = kfree_list; |
203 | while (allocsize < size) { |
204 | allocsize <<= 1; |
205 | fl++; |
206 | } |
207 | } |
208 | |
209 | |
210 | |
211 | |
212 | |
213 | |
214 | if (allocsize < kalloc_max) { |
| |
215 | pthread_spin_lock(&fl->lock); |
| 5 | | Function call argument is an uninitialized value |
|
216 | if ((addr = fl->head) != 0) { |
217 | fl->head = *(vm_offset_t *)addr; |
218 | #ifdef DEBUG |
219 | fl->count--; |
220 | #endif |
221 | pthread_spin_unlock(&fl->lock); |
222 | } |
223 | else { |
224 | pthread_spin_unlock(&fl->lock); |
225 | addr = kget_space(allocsize); |
226 | } |
227 | } |
228 | else { |
229 | if (vm_allocate(mach_task_self()((__mach_task_self_ + 0)), &addr, allocsize, TRUE((boolean_t) 1)) |
230 | != KERN_SUCCESS0) |
231 | addr = 0; |
232 | } |
233 | return (void *) addr; |
234 | } |
235 | |
236 | void |
237 | kfree( void *data, |
238 | vm_size_t size) |
239 | { |
240 | vm_size_t freesize; |
241 | struct free_list *fl; |
242 | |
243 | freesize = size; |
244 | if (size < kalloc_max) { |
245 | freesize = MINSIZE4; |
246 | fl = kfree_list; |
247 | while (freesize < size) { |
248 | freesize <<= 1; |
249 | fl++; |
250 | } |
251 | } |
252 | |
253 | if (freesize < kalloc_max) { |
254 | pthread_spin_lock(&fl->lock); |
255 | *(vm_offset_t *)data = fl->head; |
256 | fl->head = (vm_offset_t) data; |
257 | #ifdef DEBUG |
258 | fl->count++; |
259 | #endif |
260 | pthread_spin_unlock(&fl->lock); |
261 | } |
262 | else { |
263 | (void) vm_deallocate(mach_task_self()((__mach_task_self_ + 0)), (vm_offset_t)data, freesize); |
264 | } |
265 | } |
266 | |
267 | static void |
268 | init_hook (void) |
269 | { |
270 | __malloc_hook = malloc_hook; |
271 | __free_hook = free_hook; |
272 | } |
273 | |
274 | static void * |
275 | malloc_hook (size_t size, const void *caller) |
276 | { |
277 | return (void *) kalloc ((vm_size_t) size); |
278 | } |
279 | |
280 | static void |
281 | free_hook (void *ptr, const void *caller) |
282 | { |
283 | |
284 | |
285 | } |
286 | |
287 | void malloc_fork_prepare() |
288 | { |
289 | } |
290 | |
291 | void malloc_fork_parent() |
292 | { |
293 | } |
294 | |
295 | void malloc_fork_child() |
296 | { |
297 | } |