File: | obj-scan-build/../kern/slab.c |
Location: | line 1146, column 15 |
Description: | Access to field 'addr' results in a dereference of a null pointer (loaded from variable 'slab') |
1 | /* | |||||
2 | * Copyright (c) 2011 Free Software Foundation. | |||||
3 | * | |||||
4 | * This program is free software; you can redistribute it and/or modify | |||||
5 | * it under the terms of the GNU General Public License as published by | |||||
6 | * the Free Software Foundation; either version 2 of the License, or | |||||
7 | * (at your option) any later version. | |||||
8 | * | |||||
9 | * This program is distributed in the hope that it will be useful, | |||||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |||||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |||||
12 | * GNU General Public License for more details. | |||||
13 | * | |||||
14 | * You should have received a copy of the GNU General Public License along | |||||
15 | * with this program; if not, write to the Free Software Foundation, Inc., | |||||
16 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | |||||
17 | */ | |||||
18 | ||||||
19 | /* | |||||
20 | * Copyright (c) 2010, 2011 Richard Braun. | |||||
21 | * All rights reserved. | |||||
22 | * | |||||
23 | * Redistribution and use in source and binary forms, with or without | |||||
24 | * modification, are permitted provided that the following conditions | |||||
25 | * are met: | |||||
26 | * 1. Redistributions of source code must retain the above copyright | |||||
27 | * notice, this list of conditions and the following disclaimer. | |||||
28 | * 2. Redistributions in binary form must reproduce the above copyright | |||||
29 | * notice, this list of conditions and the following disclaimer in the | |||||
30 | * documentation and/or other materials provided with the distribution. | |||||
31 | * | |||||
32 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | |||||
33 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | |||||
34 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | |||||
35 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | |||||
36 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | |||||
37 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |||||
38 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |||||
39 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |||||
40 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | |||||
41 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||||
42 | * | |||||
43 | * | |||||
44 | * Object caching and general purpose memory allocator. | |||||
45 | * | |||||
46 | * This allocator is based on the paper "The Slab Allocator: An Object-Caching | |||||
47 | * Kernel Memory Allocator" by Jeff Bonwick. | |||||
48 | * | |||||
49 | * It allows the allocation of objects (i.e. fixed-size typed buffers) from | |||||
50 | * caches and is efficient in both space and time. This implementation follows | |||||
51 | * many of the indications from the paper mentioned. The most notable | |||||
52 | * differences are outlined below. | |||||
53 | * | |||||
54 | * The per-cache self-scaling hash table for buffer-to-bufctl conversion, | |||||
55 | * described in 3.2.3 "Slab Layout for Large Objects", has been replaced by | |||||
56 | * a red-black tree storing slabs, sorted by address. The use of a | |||||
57 | * self-balancing tree for buffer-to-slab conversions provides a few advantages | |||||
58 | * over a hash table. Unlike a hash table, a BST provides a "lookup nearest" | |||||
59 | * operation, so obtaining the slab data (whether it is embedded in the slab or | |||||
60 | * off slab) from a buffer address simply consists of a "lookup nearest towards | |||||
61 | * 0" tree search. Storing slabs instead of buffers also considerably reduces | |||||
62 | * the number of elements to retain. Finally, a self-balancing tree is a true | |||||
63 | * self-scaling data structure, whereas a hash table requires periodic | |||||
64 | * maintenance and complete resizing, which is expensive. The only drawback is | |||||
65 | * that releasing a buffer to the slab layer takes logarithmic time instead of | |||||
66 | * constant time. But as the data set size is kept reasonable (because slabs | |||||
67 | * are stored instead of buffers) and because the CPU pool layer services most | |||||
68 | * requests, avoiding many accesses to the slab layer, it is considered an | |||||
69 | * acceptable tradeoff. | |||||
70 | * | |||||
71 | * This implementation uses per-cpu pools of objects, which service most | |||||
72 | * allocation requests. These pools act as caches (but are named differently | |||||
73 | * to avoid confusion with CPU caches) that reduce contention on multiprocessor | |||||
74 | * systems. When a pool is empty and cannot provide an object, it is filled by | |||||
75 | * transferring multiple objects from the slab layer. The symmetric case is | |||||
76 | * handled likewise. | |||||
77 | */ | |||||
78 | ||||||
79 | #include <string.h> | |||||
80 | #include <kern/assert.h> | |||||
81 | #include <kern/mach_clock.h> | |||||
82 | #include <kern/printf.h> | |||||
83 | #include <kern/slab.h> | |||||
84 | #include <kern/kalloc.h> | |||||
85 | #include <kern/cpu_number.h> | |||||
86 | #include <mach/vm_param.h> | |||||
87 | #include <mach/machine/vm_types.h> | |||||
88 | #include <vm/vm_kern.h> | |||||
89 | #include <vm/vm_types.h> | |||||
90 | #include <sys/types.h> | |||||
91 | ||||||
92 | #ifdef MACH_DEBUG1 | |||||
93 | #include <mach_debug/slab_info.h> | |||||
94 | #endif | |||||
95 | ||||||
96 | /* | |||||
97 | * Utility macros. | |||||
98 | */ | |||||
99 | #define ARRAY_SIZE(x)(sizeof(x) / sizeof((x)[0])) (sizeof(x) / sizeof((x)[0])) | |||||
100 | #define P2ALIGNED(x, a)(((x) & ((a) - 1)) == 0) (((x) & ((a) - 1)) == 0) | |||||
101 | #define ISP2(x)(((x) & ((x) - 1)) == 0) P2ALIGNED(x, x)(((x) & ((x) - 1)) == 0) | |||||
102 | #define P2ALIGN(x, a)((x) & -(a)) ((x) & -(a)) | |||||
103 | #define P2ROUND(x, a)(-(-(x) & -(a))) (-(-(x) & -(a))) | |||||
104 | #define P2END(x, a)(-(~(x) & -(a))) (-(~(x) & -(a))) | |||||
105 | #define likely(expr)__builtin_expect(!!(expr), 1) __builtin_expect(!!(expr), 1) | |||||
106 | #define unlikely(expr)__builtin_expect(!!(expr), 0) __builtin_expect(!!(expr), 0) | |||||
107 | ||||||
108 | /* | |||||
109 | * Minimum required alignment. | |||||
110 | */ | |||||
111 | #define KMEM_ALIGN_MIN8 8 | |||||
112 | ||||||
113 | /* | |||||
114 | * Minimum number of buffers per slab. | |||||
115 | * | |||||
116 | * This value is ignored when the slab size exceeds a threshold. | |||||
117 | */ | |||||
118 | #define KMEM_MIN_BUFS_PER_SLAB8 8 | |||||
119 | ||||||
120 | /* | |||||
121 | * Special slab size beyond which the minimum number of buffers per slab is | |||||
122 | * ignored when computing the slab size of a cache. | |||||
123 | */ | |||||
124 | #define KMEM_SLAB_SIZE_THRESHOLD(8 * (1 << 12)) (8 * PAGE_SIZE(1 << 12)) | |||||
125 | ||||||
126 | /* | |||||
127 | * Special buffer size under which slab data is unconditionnally allocated | |||||
128 | * from its associated slab. | |||||
129 | */ | |||||
130 | #define KMEM_BUF_SIZE_THRESHOLD((1 << 12) / 8) (PAGE_SIZE(1 << 12) / 8) | |||||
131 | ||||||
132 | /* | |||||
133 | * Time (in ticks) between two garbage collection operations. | |||||
134 | */ | |||||
135 | #define KMEM_GC_INTERVAL(5 * hz) (5 * hz) | |||||
136 | ||||||
137 | /* | |||||
138 | * The transfer size of a CPU pool is computed by dividing the pool size by | |||||
139 | * this value. | |||||
140 | */ | |||||
141 | #define KMEM_CPU_POOL_TRANSFER_RATIO2 2 | |||||
142 | ||||||
143 | /* | |||||
144 | * Redzone guard word. | |||||
145 | */ | |||||
146 | #ifdef __LP64__ | |||||
147 | #if _HOST_BIG_ENDIAN | |||||
148 | #define KMEM_REDZONE_WORD0xcefaedfeUL 0xfeedfacefeedfaceUL | |||||
149 | #else /* _HOST_BIG_ENDIAN */ | |||||
150 | #define KMEM_REDZONE_WORD0xcefaedfeUL 0xcefaedfecefaedfeUL | |||||
151 | #endif /* _HOST_BIG_ENDIAN */ | |||||
152 | #else /* __LP64__ */ | |||||
153 | #if _HOST_BIG_ENDIAN | |||||
154 | #define KMEM_REDZONE_WORD0xcefaedfeUL 0xfeedfaceUL | |||||
155 | #else /* _HOST_BIG_ENDIAN */ | |||||
156 | #define KMEM_REDZONE_WORD0xcefaedfeUL 0xcefaedfeUL | |||||
157 | #endif /* _HOST_BIG_ENDIAN */ | |||||
158 | #endif /* __LP64__ */ | |||||
159 | ||||||
160 | /* | |||||
161 | * Redzone byte for padding. | |||||
162 | */ | |||||
163 | #define KMEM_REDZONE_BYTE0xbb 0xbb | |||||
164 | ||||||
165 | /* | |||||
166 | * Size of the VM submap from which default backend functions allocate. | |||||
167 | */ | |||||
168 | #define KMEM_MAP_SIZE(128 * 1024 * 1024) (128 * 1024 * 1024) | |||||
169 | ||||||
170 | /* | |||||
171 | * Shift for the first kalloc cache size. | |||||
172 | */ | |||||
173 | #define KALLOC_FIRST_SHIFT5 5 | |||||
174 | ||||||
175 | /* | |||||
176 | * Number of caches backing general purpose allocations. | |||||
177 | */ | |||||
178 | #define KALLOC_NR_CACHES13 13 | |||||
179 | ||||||
180 | /* | |||||
181 | * Values the buftag state member can take. | |||||
182 | */ | |||||
183 | #ifdef __LP64__ | |||||
184 | #if _HOST_BIG_ENDIAN | |||||
185 | #define KMEM_BUFTAG_ALLOC0xedc810a1UL 0xa110c8eda110c8edUL | |||||
186 | #define KMEM_BUFTAG_FREE0x0cb1eef4UL 0xf4eeb10cf4eeb10cUL | |||||
187 | #else /* _HOST_BIG_ENDIAN */ | |||||
188 | #define KMEM_BUFTAG_ALLOC0xedc810a1UL 0xedc810a1edc810a1UL | |||||
189 | #define KMEM_BUFTAG_FREE0x0cb1eef4UL 0x0cb1eef40cb1eef4UL | |||||
190 | #endif /* _HOST_BIG_ENDIAN */ | |||||
191 | #else /* __LP64__ */ | |||||
192 | #if _HOST_BIG_ENDIAN | |||||
193 | #define KMEM_BUFTAG_ALLOC0xedc810a1UL 0xa110c8edUL | |||||
194 | #define KMEM_BUFTAG_FREE0x0cb1eef4UL 0xf4eeb10cUL | |||||
195 | #else /* _HOST_BIG_ENDIAN */ | |||||
196 | #define KMEM_BUFTAG_ALLOC0xedc810a1UL 0xedc810a1UL | |||||
197 | #define KMEM_BUFTAG_FREE0x0cb1eef4UL 0x0cb1eef4UL | |||||
198 | #endif /* _HOST_BIG_ENDIAN */ | |||||
199 | #endif /* __LP64__ */ | |||||
200 | ||||||
201 | /* | |||||
202 | * Free and uninitialized patterns. | |||||
203 | * | |||||
204 | * These values are unconditionnally 64-bit wide since buffers are at least | |||||
205 | * 8-byte aligned. | |||||
206 | */ | |||||
207 | #if _HOST_BIG_ENDIAN | |||||
208 | #define KMEM_FREE_PATTERN0xefbeaddeefbeaddeULL 0xdeadbeefdeadbeefULL | |||||
209 | #define KMEM_UNINIT_PATTERN0xfecaddbafecaddbaULL 0xbaddcafebaddcafeULL | |||||
210 | #else /* _HOST_BIG_ENDIAN */ | |||||
211 | #define KMEM_FREE_PATTERN0xefbeaddeefbeaddeULL 0xefbeaddeefbeaddeULL | |||||
212 | #define KMEM_UNINIT_PATTERN0xfecaddbafecaddbaULL 0xfecaddbafecaddbaULL | |||||
213 | #endif /* _HOST_BIG_ENDIAN */ | |||||
214 | ||||||
215 | /* | |||||
216 | * Cache flags. | |||||
217 | * | |||||
218 | * The flags don't change once set and can be tested without locking. | |||||
219 | */ | |||||
220 | #define KMEM_CF_NO_CPU_POOL0x01 0x01 /* CPU pool layer disabled */ | |||||
221 | #define KMEM_CF_SLAB_EXTERNAL0x02 0x02 /* Slab data is off slab */ | |||||
222 | #define KMEM_CF_NO_RECLAIM0x04 0x04 /* Slabs are not reclaimable */ | |||||
223 | #define KMEM_CF_VERIFY0x08 0x08 /* Debugging facilities enabled */ | |||||
224 | #define KMEM_CF_DIRECT0x10 0x10 /* No buf-to-slab tree lookup */ | |||||
225 | ||||||
226 | /* | |||||
227 | * Options for kmem_cache_alloc_verify(). | |||||
228 | */ | |||||
229 | #define KMEM_AV_NOCONSTRUCT0 0 | |||||
230 | #define KMEM_AV_CONSTRUCT1 1 | |||||
231 | ||||||
232 | /* | |||||
233 | * Error codes for kmem_cache_error(). | |||||
234 | */ | |||||
235 | #define KMEM_ERR_INVALID0 0 /* Invalid address being freed */ | |||||
236 | #define KMEM_ERR_DOUBLEFREE1 1 /* Freeing already free address */ | |||||
237 | #define KMEM_ERR_BUFTAG2 2 /* Invalid buftag content */ | |||||
238 | #define KMEM_ERR_MODIFIED3 3 /* Buffer modified while free */ | |||||
239 | #define KMEM_ERR_REDZONE4 4 /* Redzone violation */ | |||||
240 | ||||||
241 | #if SLAB_USE_CPU_POOLS0 | |||||
242 | /* | |||||
243 | * Available CPU pool types. | |||||
244 | * | |||||
245 | * For each entry, the CPU pool size applies from the entry buf_size | |||||
246 | * (excluded) up to (and including) the buf_size of the preceding entry. | |||||
247 | * | |||||
248 | * See struct kmem_cpu_pool_type for a description of the values. | |||||
249 | */ | |||||
250 | static struct kmem_cpu_pool_type kmem_cpu_pool_types[] = { | |||||
251 | { 32768, 1, 0, NULL((void *) 0) }, | |||||
252 | { 4096, 8, CPU_L1_SIZE, NULL((void *) 0) }, | |||||
253 | { 256, 64, CPU_L1_SIZE, NULL((void *) 0) }, | |||||
254 | { 0, 128, CPU_L1_SIZE, NULL((void *) 0) } | |||||
255 | }; | |||||
256 | ||||||
257 | /* | |||||
258 | * Caches where CPU pool arrays are allocated from. | |||||
259 | */ | |||||
260 | static struct kmem_cache kmem_cpu_array_caches[ARRAY_SIZE(kmem_cpu_pool_types)(sizeof(kmem_cpu_pool_types) / sizeof((kmem_cpu_pool_types)[0 ]))]; | |||||
261 | #endif /* SLAB_USE_CPU_POOLS */ | |||||
262 | ||||||
263 | /* | |||||
264 | * Cache for off slab data. | |||||
265 | */ | |||||
266 | static struct kmem_cache kmem_slab_cache; | |||||
267 | ||||||
268 | /* | |||||
269 | * General purpose caches array. | |||||
270 | */ | |||||
271 | static struct kmem_cache kalloc_caches[KALLOC_NR_CACHES13]; | |||||
272 | ||||||
273 | /* | |||||
274 | * List of all caches managed by the allocator. | |||||
275 | */ | |||||
276 | static struct list kmem_cache_list; | |||||
277 | static unsigned int kmem_nr_caches; | |||||
278 | static simple_lock_data_t __attribute__((used)) kmem_cache_list_lock; | |||||
279 | ||||||
280 | /* | |||||
281 | * VM submap for slab caches. | |||||
282 | */ | |||||
283 | static struct vm_map kmem_map_store; | |||||
284 | vm_map_t kmem_map = &kmem_map_store; | |||||
285 | ||||||
286 | /* | |||||
287 | * Time of the last memory reclaim, in clock ticks. | |||||
288 | */ | |||||
289 | static unsigned long kmem_gc_last_tick; | |||||
290 | ||||||
291 | #define kmem_error(format, ...)printf("mem: error: %s(): " format "\n", __func__, ...) \ | |||||
292 | printf("mem: error: %s(): " format "\n", __func__, \ | |||||
293 | ## __VA_ARGS__) | |||||
294 | ||||||
295 | #define kmem_warn(format, ...)printf("mem: warning: %s(): " format "\n", __func__, ...) \ | |||||
296 | printf("mem: warning: %s(): " format "\n", __func__, \ | |||||
297 | ## __VA_ARGS__) | |||||
298 | ||||||
299 | #define kmem_print(format, ...)printf(format "\n", ...) \ | |||||
300 | printf(format "\n", ## __VA_ARGS__) | |||||
301 | ||||||
302 | static void kmem_cache_error(struct kmem_cache *cache, void *buf, int error, | |||||
303 | void *arg); | |||||
304 | static void * kmem_cache_alloc_from_slab(struct kmem_cache *cache); | |||||
305 | static void kmem_cache_free_to_slab(struct kmem_cache *cache, void *buf); | |||||
306 | ||||||
307 | static void * kmem_buf_verify_bytes(void *buf, void *pattern, size_t size) | |||||
308 | { | |||||
309 | char *ptr, *pattern_ptr, *end; | |||||
310 | ||||||
311 | end = buf + size; | |||||
312 | ||||||
313 | for (ptr = buf, pattern_ptr = pattern; ptr < end; ptr++, pattern_ptr++) | |||||
314 | if (*ptr != *pattern_ptr) | |||||
315 | return ptr; | |||||
316 | ||||||
317 | return NULL((void *) 0); | |||||
318 | } | |||||
319 | ||||||
320 | static void * kmem_buf_verify(void *buf, uint64_t pattern, vm_size_t size) | |||||
321 | { | |||||
322 | uint64_t *ptr, *end; | |||||
323 | ||||||
324 | assert(P2ALIGNED((unsigned long)buf, sizeof(uint64_t)))({ if (!(((((unsigned long)buf) & ((sizeof(uint64_t)) - 1 )) == 0))) Assert("P2ALIGNED((unsigned long)buf, sizeof(uint64_t))" , "../kern/slab.c", 324); }); | |||||
325 | assert(P2ALIGNED(size, sizeof(uint64_t)))({ if (!((((size) & ((sizeof(uint64_t)) - 1)) == 0))) Assert ("P2ALIGNED(size, sizeof(uint64_t))", "../kern/slab.c", 325); }); | |||||
326 | ||||||
327 | end = buf + size; | |||||
328 | ||||||
329 | for (ptr = buf; ptr < end; ptr++) | |||||
330 | if (*ptr != pattern) | |||||
331 | return kmem_buf_verify_bytes(ptr, &pattern, sizeof(pattern)); | |||||
332 | ||||||
333 | return NULL((void *) 0); | |||||
334 | } | |||||
335 | ||||||
336 | static void kmem_buf_fill(void *buf, uint64_t pattern, size_t size) | |||||
337 | { | |||||
338 | uint64_t *ptr, *end; | |||||
339 | ||||||
340 | assert(P2ALIGNED((unsigned long)buf, sizeof(uint64_t)))({ if (!(((((unsigned long)buf) & ((sizeof(uint64_t)) - 1 )) == 0))) Assert("P2ALIGNED((unsigned long)buf, sizeof(uint64_t))" , "../kern/slab.c", 340); }); | |||||
341 | assert(P2ALIGNED(size, sizeof(uint64_t)))({ if (!((((size) & ((sizeof(uint64_t)) - 1)) == 0))) Assert ("P2ALIGNED(size, sizeof(uint64_t))", "../kern/slab.c", 341); }); | |||||
342 | ||||||
343 | end = buf + size; | |||||
344 | ||||||
345 | for (ptr = buf; ptr < end; ptr++) | |||||
346 | *ptr = pattern; | |||||
347 | } | |||||
348 | ||||||
349 | static void * kmem_buf_verify_fill(void *buf, uint64_t old, uint64_t new, | |||||
350 | size_t size) | |||||
351 | { | |||||
352 | uint64_t *ptr, *end; | |||||
353 | ||||||
354 | assert(P2ALIGNED((unsigned long)buf, sizeof(uint64_t)))({ if (!(((((unsigned long)buf) & ((sizeof(uint64_t)) - 1 )) == 0))) Assert("P2ALIGNED((unsigned long)buf, sizeof(uint64_t))" , "../kern/slab.c", 354); }); | |||||
355 | assert(P2ALIGNED(size, sizeof(uint64_t)))({ if (!((((size) & ((sizeof(uint64_t)) - 1)) == 0))) Assert ("P2ALIGNED(size, sizeof(uint64_t))", "../kern/slab.c", 355); }); | |||||
356 | ||||||
357 | end = buf + size; | |||||
358 | ||||||
359 | for (ptr = buf; ptr < end; ptr++) { | |||||
360 | if (*ptr != old) | |||||
361 | return kmem_buf_verify_bytes(ptr, &old, sizeof(old)); | |||||
362 | ||||||
363 | *ptr = new; | |||||
364 | } | |||||
365 | ||||||
366 | return NULL((void *) 0); | |||||
367 | } | |||||
368 | ||||||
369 | static inline union kmem_bufctl * | |||||
370 | kmem_buf_to_bufctl(void *buf, struct kmem_cache *cache) | |||||
371 | { | |||||
372 | return (union kmem_bufctl *)(buf + cache->bufctl_dist); | |||||
373 | } | |||||
374 | ||||||
375 | static inline struct kmem_buftag * | |||||
376 | kmem_buf_to_buftag(void *buf, struct kmem_cache *cache) | |||||
377 | { | |||||
378 | return (struct kmem_buftag *)(buf + cache->buftag_dist); | |||||
379 | } | |||||
380 | ||||||
381 | static inline void * kmem_bufctl_to_buf(union kmem_bufctl *bufctl, | |||||
382 | struct kmem_cache *cache) | |||||
383 | { | |||||
384 | return (void *)bufctl - cache->bufctl_dist; | |||||
385 | } | |||||
386 | ||||||
387 | static vm_offset_t kmem_pagealloc(vm_size_t size) | |||||
388 | { | |||||
389 | vm_offset_t addr; | |||||
390 | kern_return_t kr; | |||||
391 | ||||||
392 | kr = kmem_alloc_wired(kmem_map, &addr, size); | |||||
393 | ||||||
394 | if (kr != KERN_SUCCESS0) | |||||
395 | return 0; | |||||
396 | ||||||
397 | return addr; | |||||
398 | } | |||||
399 | ||||||
400 | static void kmem_pagefree(vm_offset_t ptr, vm_size_t size) | |||||
401 | { | |||||
402 | kmem_free(kmem_map, ptr, size); | |||||
403 | } | |||||
404 | ||||||
405 | static void kmem_slab_create_verify(struct kmem_slab *slab, | |||||
406 | struct kmem_cache *cache) | |||||
407 | { | |||||
408 | struct kmem_buftag *buftag; | |||||
409 | size_t buf_size; | |||||
410 | unsigned long buffers; | |||||
411 | void *buf; | |||||
412 | ||||||
413 | buf_size = cache->buf_size; | |||||
414 | buf = slab->addr; | |||||
415 | buftag = kmem_buf_to_buftag(buf, cache); | |||||
416 | ||||||
417 | for (buffers = cache->bufs_per_slab; buffers != 0; buffers--) { | |||||
418 | kmem_buf_fill(buf, KMEM_FREE_PATTERN0xefbeaddeefbeaddeULL, cache->bufctl_dist); | |||||
419 | buftag->state = KMEM_BUFTAG_FREE0x0cb1eef4UL; | |||||
420 | buf += buf_size; | |||||
421 | buftag = kmem_buf_to_buftag(buf, cache); | |||||
422 | } | |||||
423 | } | |||||
424 | ||||||
425 | /* | |||||
426 | * Create an empty slab for a cache. | |||||
427 | * | |||||
428 | * The caller must drop all locks before calling this function. | |||||
429 | */ | |||||
430 | static struct kmem_slab * kmem_slab_create(struct kmem_cache *cache, | |||||
431 | size_t color) | |||||
432 | { | |||||
433 | struct kmem_slab *slab; | |||||
434 | union kmem_bufctl *bufctl; | |||||
435 | size_t buf_size; | |||||
436 | unsigned long buffers; | |||||
437 | void *slab_buf; | |||||
438 | ||||||
439 | if (cache->slab_alloc_fn == NULL((void *) 0)) | |||||
440 | slab_buf = (void *)kmem_pagealloc(cache->slab_size); | |||||
441 | else | |||||
442 | slab_buf = (void *)cache->slab_alloc_fn(cache->slab_size); | |||||
443 | ||||||
444 | if (slab_buf == NULL((void *) 0)) | |||||
445 | return NULL((void *) 0); | |||||
446 | ||||||
447 | if (cache->flags & KMEM_CF_SLAB_EXTERNAL0x02) { | |||||
448 | assert(!(cache->flags & KMEM_CF_NO_RECLAIM))({ if (!(!(cache->flags & 0x04))) Assert("!(cache->flags & KMEM_CF_NO_RECLAIM)" , "../kern/slab.c", 448); }); | |||||
449 | slab = (struct kmem_slab *)kmem_cache_alloc(&kmem_slab_cache); | |||||
450 | ||||||
451 | if (slab == NULL((void *) 0)) { | |||||
452 | if (cache->slab_free_fn == NULL((void *) 0)) | |||||
453 | kmem_pagefree((vm_offset_t)slab_buf, cache->slab_size); | |||||
454 | else | |||||
455 | cache->slab_free_fn((vm_offset_t)slab_buf, cache->slab_size); | |||||
456 | ||||||
457 | return NULL((void *) 0); | |||||
458 | } | |||||
459 | } else { | |||||
460 | slab = (struct kmem_slab *)(slab_buf + cache->slab_size) - 1; | |||||
461 | } | |||||
462 | ||||||
463 | list_node_init(&slab->list_node); | |||||
464 | rbtree_node_init(&slab->tree_node); | |||||
465 | slab->nr_refs = 0; | |||||
466 | slab->first_free = NULL((void *) 0); | |||||
467 | slab->addr = slab_buf + color; | |||||
468 | ||||||
469 | buf_size = cache->buf_size; | |||||
470 | bufctl = kmem_buf_to_bufctl(slab->addr, cache); | |||||
471 | ||||||
472 | for (buffers = cache->bufs_per_slab; buffers != 0; buffers--) { | |||||
473 | bufctl->next = slab->first_free; | |||||
474 | slab->first_free = bufctl; | |||||
475 | bufctl = (union kmem_bufctl *)((void *)bufctl + buf_size); | |||||
476 | } | |||||
477 | ||||||
478 | if (cache->flags & KMEM_CF_VERIFY0x08) | |||||
479 | kmem_slab_create_verify(slab, cache); | |||||
480 | ||||||
481 | return slab; | |||||
482 | } | |||||
483 | ||||||
484 | static void kmem_slab_destroy_verify(struct kmem_slab *slab, | |||||
485 | struct kmem_cache *cache) | |||||
486 | { | |||||
487 | struct kmem_buftag *buftag; | |||||
488 | size_t buf_size; | |||||
489 | unsigned long buffers; | |||||
490 | void *buf, *addr; | |||||
491 | ||||||
492 | buf_size = cache->buf_size; | |||||
493 | buf = slab->addr; | |||||
494 | buftag = kmem_buf_to_buftag(buf, cache); | |||||
495 | ||||||
496 | for (buffers = cache->bufs_per_slab; buffers != 0; buffers--) { | |||||
497 | if (buftag->state != KMEM_BUFTAG_FREE0x0cb1eef4UL) | |||||
498 | kmem_cache_error(cache, buf, KMEM_ERR_BUFTAG2, buftag); | |||||
499 | ||||||
500 | addr = kmem_buf_verify(buf, KMEM_FREE_PATTERN0xefbeaddeefbeaddeULL, cache->bufctl_dist); | |||||
501 | ||||||
502 | if (addr != NULL((void *) 0)) | |||||
503 | kmem_cache_error(cache, buf, KMEM_ERR_MODIFIED3, addr); | |||||
504 | ||||||
505 | buf += buf_size; | |||||
506 | buftag = kmem_buf_to_buftag(buf, cache); | |||||
507 | } | |||||
508 | } | |||||
509 | ||||||
510 | /* | |||||
511 | * Destroy a slab. | |||||
512 | * | |||||
513 | * The caller must drop all locks before calling this function. | |||||
514 | */ | |||||
515 | static void kmem_slab_destroy(struct kmem_slab *slab, struct kmem_cache *cache) | |||||
516 | { | |||||
517 | vm_offset_t slab_buf; | |||||
518 | ||||||
519 | assert(slab->nr_refs == 0)({ if (!(slab->nr_refs == 0)) Assert("slab->nr_refs == 0" , "../kern/slab.c", 519); }); | |||||
520 | assert(slab->first_free != NULL)({ if (!(slab->first_free != ((void *) 0))) Assert("slab->first_free != NULL" , "../kern/slab.c", 520); }); | |||||
521 | assert(!(cache->flags & KMEM_CF_NO_RECLAIM))({ if (!(!(cache->flags & 0x04))) Assert("!(cache->flags & KMEM_CF_NO_RECLAIM)" , "../kern/slab.c", 521); }); | |||||
522 | ||||||
523 | if (cache->flags & KMEM_CF_VERIFY0x08) | |||||
524 | kmem_slab_destroy_verify(slab, cache); | |||||
525 | ||||||
526 | slab_buf = (vm_offset_t)P2ALIGN((unsigned long)slab->addr, PAGE_SIZE)(((unsigned long)slab->addr) & -((1 << 12))); | |||||
527 | ||||||
528 | if (cache->slab_free_fn == NULL((void *) 0)) | |||||
529 | kmem_pagefree(slab_buf, cache->slab_size); | |||||
530 | else | |||||
531 | cache->slab_free_fn(slab_buf, cache->slab_size); | |||||
532 | ||||||
533 | if (cache->flags & KMEM_CF_SLAB_EXTERNAL0x02) | |||||
534 | kmem_cache_free(&kmem_slab_cache, (vm_offset_t)slab); | |||||
535 | } | |||||
536 | ||||||
537 | static inline int kmem_slab_use_tree(int flags) | |||||
538 | { | |||||
539 | return !(flags & KMEM_CF_DIRECT0x10) || (flags & KMEM_CF_VERIFY0x08); | |||||
540 | } | |||||
541 | ||||||
542 | static inline int kmem_slab_cmp_lookup(const void *addr, | |||||
543 | const struct rbtree_node *node) | |||||
544 | { | |||||
545 | struct kmem_slab *slab; | |||||
546 | ||||||
547 | slab = rbtree_entry(node, struct kmem_slab, tree_node)((struct kmem_slab *)((char *)node - __builtin_offsetof (struct kmem_slab, tree_node))); | |||||
548 | ||||||
549 | if (addr == slab->addr) | |||||
550 | return 0; | |||||
551 | else if (addr < slab->addr) | |||||
552 | return -1; | |||||
553 | else | |||||
554 | return 1; | |||||
555 | } | |||||
556 | ||||||
557 | static inline int kmem_slab_cmp_insert(const struct rbtree_node *a, | |||||
558 | const struct rbtree_node *b) | |||||
559 | { | |||||
560 | struct kmem_slab *slab; | |||||
561 | ||||||
562 | slab = rbtree_entry(a, struct kmem_slab, tree_node)((struct kmem_slab *)((char *)a - __builtin_offsetof (struct kmem_slab , tree_node))); | |||||
563 | return kmem_slab_cmp_lookup(slab->addr, b); | |||||
564 | } | |||||
565 | ||||||
566 | #if SLAB_USE_CPU_POOLS0 | |||||
567 | static void kmem_cpu_pool_init(struct kmem_cpu_pool *cpu_pool, | |||||
568 | struct kmem_cache *cache) | |||||
569 | { | |||||
570 | simple_lock_init(&cpu_pool->lock); | |||||
571 | cpu_pool->flags = cache->flags; | |||||
572 | cpu_pool->size = 0; | |||||
573 | cpu_pool->transfer_size = 0; | |||||
574 | cpu_pool->nr_objs = 0; | |||||
575 | cpu_pool->array = NULL((void *) 0); | |||||
576 | } | |||||
577 | ||||||
578 | /* | |||||
579 | * Return a CPU pool. | |||||
580 | * | |||||
581 | * This function will generally return the pool matching the CPU running the | |||||
582 | * calling thread. Because of context switches and thread migration, the | |||||
583 | * caller might be running on another processor after this function returns. | |||||
584 | * Although not optimal, this should rarely happen, and it doesn't affect the | |||||
585 | * allocator operations in any other way, as CPU pools are always valid, and | |||||
586 | * their access is serialized by a lock. | |||||
587 | */ | |||||
588 | static inline struct kmem_cpu_pool * kmem_cpu_pool_get(struct kmem_cache *cache) | |||||
589 | { | |||||
590 | return &cache->cpu_pools[cpu_number()(0)]; | |||||
591 | } | |||||
592 | ||||||
593 | static inline void kmem_cpu_pool_build(struct kmem_cpu_pool *cpu_pool, | |||||
594 | struct kmem_cache *cache, void **array) | |||||
595 | { | |||||
596 | cpu_pool->size = cache->cpu_pool_type->array_size; | |||||
597 | cpu_pool->transfer_size = (cpu_pool->size | |||||
598 | + KMEM_CPU_POOL_TRANSFER_RATIO2 - 1) | |||||
599 | / KMEM_CPU_POOL_TRANSFER_RATIO2; | |||||
600 | cpu_pool->array = array; | |||||
601 | } | |||||
602 | ||||||
603 | static inline void * kmem_cpu_pool_pop(struct kmem_cpu_pool *cpu_pool) | |||||
604 | { | |||||
605 | cpu_pool->nr_objs--; | |||||
606 | return cpu_pool->array[cpu_pool->nr_objs]; | |||||
607 | } | |||||
608 | ||||||
609 | static inline void kmem_cpu_pool_push(struct kmem_cpu_pool *cpu_pool, void *obj) | |||||
610 | { | |||||
611 | cpu_pool->array[cpu_pool->nr_objs] = obj; | |||||
612 | cpu_pool->nr_objs++; | |||||
613 | } | |||||
614 | ||||||
615 | static int kmem_cpu_pool_fill(struct kmem_cpu_pool *cpu_pool, | |||||
616 | struct kmem_cache *cache) | |||||
617 | { | |||||
618 | kmem_cache_ctor_t ctor; | |||||
619 | void *buf; | |||||
620 | int i; | |||||
621 | ||||||
622 | ctor = (cpu_pool->flags & KMEM_CF_VERIFY0x08) ? NULL((void *) 0) : cache->ctor; | |||||
623 | ||||||
624 | simple_lock(&cache->lock); | |||||
625 | ||||||
626 | for (i = 0; i < cpu_pool->transfer_size; i++) { | |||||
627 | buf = kmem_cache_alloc_from_slab(cache); | |||||
628 | ||||||
629 | if (buf == NULL((void *) 0)) | |||||
630 | break; | |||||
631 | ||||||
632 | if (ctor != NULL((void *) 0)) | |||||
633 | ctor(buf); | |||||
634 | ||||||
635 | kmem_cpu_pool_push(cpu_pool, buf); | |||||
636 | } | |||||
637 | ||||||
638 | simple_unlock(&cache->lock)((void)(&cache->lock)); | |||||
639 | ||||||
640 | return i; | |||||
641 | } | |||||
642 | ||||||
643 | static void kmem_cpu_pool_drain(struct kmem_cpu_pool *cpu_pool, | |||||
644 | struct kmem_cache *cache) | |||||
645 | { | |||||
646 | void *obj; | |||||
647 | int i; | |||||
648 | ||||||
649 | simple_lock(&cache->lock); | |||||
650 | ||||||
651 | for (i = cpu_pool->transfer_size; i > 0; i--) { | |||||
652 | obj = kmem_cpu_pool_pop(cpu_pool); | |||||
653 | kmem_cache_free_to_slab(cache, obj); | |||||
654 | } | |||||
655 | ||||||
656 | simple_unlock(&cache->lock)((void)(&cache->lock)); | |||||
657 | } | |||||
658 | #endif /* SLAB_USE_CPU_POOLS */ | |||||
659 | ||||||
660 | static void kmem_cache_error(struct kmem_cache *cache, void *buf, int error, | |||||
661 | void *arg) | |||||
662 | { | |||||
663 | struct kmem_buftag *buftag; | |||||
664 | ||||||
665 | kmem_error("cache: %s, buffer: %p", cache->name, (void *)buf)printf("mem: error: %s(): " "cache: %s, buffer: %p" "\n", __func__ , cache->name, (void *)buf); | |||||
666 | ||||||
667 | switch(error) { | |||||
668 | case KMEM_ERR_INVALID0: | |||||
669 | kmem_error("freeing invalid address")printf("mem: error: %s(): " "freeing invalid address" "\n", __func__ ); | |||||
670 | break; | |||||
671 | case KMEM_ERR_DOUBLEFREE1: | |||||
672 | kmem_error("attempting to free the same address twice")printf("mem: error: %s(): " "attempting to free the same address twice" "\n", __func__); | |||||
673 | break; | |||||
674 | case KMEM_ERR_BUFTAG2: | |||||
675 | buftag = arg; | |||||
676 | kmem_error("invalid buftag content, buftag state: %p",printf("mem: error: %s(): " "invalid buftag content, buftag state: %p" "\n", __func__, (void *)buftag->state) | |||||
677 | (void *)buftag->state)printf("mem: error: %s(): " "invalid buftag content, buftag state: %p" "\n", __func__, (void *)buftag->state); | |||||
678 | break; | |||||
679 | case KMEM_ERR_MODIFIED3: | |||||
680 | kmem_error("free buffer modified, fault address: %p, "printf("mem: error: %s(): " "free buffer modified, fault address: %p, " "offset in buffer: %td" "\n", __func__, arg, arg - buf) | |||||
681 | "offset in buffer: %td", arg, arg - buf)printf("mem: error: %s(): " "free buffer modified, fault address: %p, " "offset in buffer: %td" "\n", __func__, arg, arg - buf); | |||||
682 | break; | |||||
683 | case KMEM_ERR_REDZONE4: | |||||
684 | kmem_error("write beyond end of buffer, fault address: %p, "printf("mem: error: %s(): " "write beyond end of buffer, fault address: %p, " "offset in buffer: %td" "\n", __func__, arg, arg - buf) | |||||
685 | "offset in buffer: %td", arg, arg - buf)printf("mem: error: %s(): " "write beyond end of buffer, fault address: %p, " "offset in buffer: %td" "\n", __func__, arg, arg - buf); | |||||
686 | break; | |||||
687 | default: | |||||
688 | kmem_error("unknown error")printf("mem: error: %s(): " "unknown error" "\n", __func__); | |||||
689 | } | |||||
690 | ||||||
691 | /* | |||||
692 | * Never reached. | |||||
693 | */ | |||||
694 | } | |||||
695 | ||||||
696 | /* | |||||
697 | * Compute an appropriate slab size for the given cache. | |||||
698 | * | |||||
699 | * Once the slab size is known, this function sets the related properties | |||||
700 | * (buffers per slab and maximum color). It can also set the KMEM_CF_DIRECT | |||||
701 | * and/or KMEM_CF_SLAB_EXTERNAL flags depending on the resulting layout. | |||||
702 | */ | |||||
703 | static void kmem_cache_compute_sizes(struct kmem_cache *cache, int flags) | |||||
704 | { | |||||
705 | size_t i, buffers, buf_size, slab_size, free_slab_size, optimal_size; | |||||
706 | size_t waste, waste_min; | |||||
707 | int embed, optimal_embed = 0; | |||||
708 | ||||||
709 | buf_size = cache->buf_size; | |||||
710 | ||||||
711 | if (buf_size < KMEM_BUF_SIZE_THRESHOLD((1 << 12) / 8)) | |||||
712 | flags |= KMEM_CACHE_NOOFFSLAB0x2; | |||||
713 | ||||||
714 | i = 0; | |||||
715 | waste_min = (size_t)-1; | |||||
716 | ||||||
717 | do { | |||||
718 | i++; | |||||
719 | slab_size = P2ROUND(i * buf_size, PAGE_SIZE)(-(-(i * buf_size) & -((1 << 12)))); | |||||
720 | free_slab_size = slab_size; | |||||
721 | ||||||
722 | if (flags & KMEM_CACHE_NOOFFSLAB0x2) | |||||
723 | free_slab_size -= sizeof(struct kmem_slab); | |||||
724 | ||||||
725 | buffers = free_slab_size / buf_size; | |||||
726 | waste = free_slab_size % buf_size; | |||||
727 | ||||||
728 | if (buffers > i) | |||||
729 | i = buffers; | |||||
730 | ||||||
731 | if (flags & KMEM_CACHE_NOOFFSLAB0x2) | |||||
732 | embed = 1; | |||||
733 | else if (sizeof(struct kmem_slab) <= waste) { | |||||
734 | embed = 1; | |||||
735 | waste -= sizeof(struct kmem_slab); | |||||
736 | } else { | |||||
737 | embed = 0; | |||||
738 | } | |||||
739 | ||||||
740 | if (waste <= waste_min) { | |||||
741 | waste_min = waste; | |||||
742 | optimal_size = slab_size; | |||||
743 | optimal_embed = embed; | |||||
744 | } | |||||
745 | } while ((buffers < KMEM_MIN_BUFS_PER_SLAB8) | |||||
746 | && (slab_size < KMEM_SLAB_SIZE_THRESHOLD(8 * (1 << 12)))); | |||||
747 | ||||||
748 | assert(!(flags & KMEM_CACHE_NOOFFSLAB) || optimal_embed)({ if (!(!(flags & 0x2) || optimal_embed)) Assert("!(flags & KMEM_CACHE_NOOFFSLAB) || optimal_embed" , "../kern/slab.c", 748); }); | |||||
749 | ||||||
750 | cache->slab_size = optimal_size; | |||||
751 | slab_size = cache->slab_size - (optimal_embed | |||||
752 | ? sizeof(struct kmem_slab) | |||||
753 | : 0); | |||||
754 | cache->bufs_per_slab = slab_size / buf_size; | |||||
755 | cache->color_max = slab_size % buf_size; | |||||
756 | ||||||
757 | if (cache->color_max >= PAGE_SIZE(1 << 12)) | |||||
758 | cache->color_max = PAGE_SIZE(1 << 12) - 1; | |||||
759 | ||||||
760 | if (optimal_embed) { | |||||
761 | if (cache->slab_size == PAGE_SIZE(1 << 12)) | |||||
762 | cache->flags |= KMEM_CF_DIRECT0x10; | |||||
763 | } else { | |||||
764 | cache->flags |= KMEM_CF_SLAB_EXTERNAL0x02; | |||||
765 | } | |||||
766 | } | |||||
767 | ||||||
768 | void kmem_cache_init(struct kmem_cache *cache, const char *name, | |||||
769 | size_t obj_size, size_t align, kmem_cache_ctor_t ctor, | |||||
770 | kmem_slab_alloc_fn_t slab_alloc_fn, | |||||
771 | kmem_slab_free_fn_t slab_free_fn, int flags) | |||||
772 | { | |||||
773 | #if SLAB_USE_CPU_POOLS0 | |||||
774 | struct kmem_cpu_pool_type *cpu_pool_type; | |||||
775 | size_t i; | |||||
776 | #endif /* SLAB_USE_CPU_POOLS */ | |||||
777 | size_t buf_size; | |||||
778 | ||||||
779 | #if SLAB_VERIFY0 | |||||
780 | cache->flags = KMEM_CF_VERIFY0x08; | |||||
781 | #else /* SLAB_VERIFY */ | |||||
782 | cache->flags = 0; | |||||
783 | #endif /* SLAB_VERIFY */ | |||||
784 | ||||||
785 | if (flags & KMEM_CACHE_NOCPUPOOL0x1) | |||||
786 | cache->flags |= KMEM_CF_NO_CPU_POOL0x01; | |||||
787 | ||||||
788 | if (flags & KMEM_CACHE_NORECLAIM0x4) { | |||||
789 | assert(slab_free_fn == NULL)({ if (!(slab_free_fn == ((void *) 0))) Assert("slab_free_fn == NULL" , "../kern/slab.c", 789); }); | |||||
790 | flags |= KMEM_CACHE_NOOFFSLAB0x2; | |||||
791 | cache->flags |= KMEM_CF_NO_RECLAIM0x04; | |||||
792 | } | |||||
793 | ||||||
794 | if (flags & KMEM_CACHE_VERIFY0x8) | |||||
795 | cache->flags |= KMEM_CF_VERIFY0x08; | |||||
796 | ||||||
797 | if (align < KMEM_ALIGN_MIN8) | |||||
798 | align = KMEM_ALIGN_MIN8; | |||||
799 | ||||||
800 | assert(obj_size > 0)({ if (!(obj_size > 0)) Assert("obj_size > 0", "../kern/slab.c" , 800); }); | |||||
801 | assert(ISP2(align))({ if (!((((align) & ((align) - 1)) == 0))) Assert("ISP2(align)" , "../kern/slab.c", 801); }); | |||||
802 | assert(align < PAGE_SIZE)({ if (!(align < (1 << 12))) Assert("align < PAGE_SIZE" , "../kern/slab.c", 802); }); | |||||
803 | ||||||
804 | buf_size = P2ROUND(obj_size, align)(-(-(obj_size) & -(align))); | |||||
805 | ||||||
806 | simple_lock_init(&cache->lock); | |||||
807 | list_node_init(&cache->node); | |||||
808 | list_init(&cache->partial_slabs); | |||||
809 | list_init(&cache->free_slabs); | |||||
810 | rbtree_init(&cache->active_slabs); | |||||
811 | cache->obj_size = obj_size; | |||||
812 | cache->align = align; | |||||
813 | cache->buf_size = buf_size; | |||||
814 | cache->bufctl_dist = buf_size - sizeof(union kmem_bufctl); | |||||
815 | cache->color = 0; | |||||
816 | cache->nr_objs = 0; | |||||
817 | cache->nr_bufs = 0; | |||||
818 | cache->nr_slabs = 0; | |||||
819 | cache->nr_free_slabs = 0; | |||||
820 | cache->ctor = ctor; | |||||
821 | cache->slab_alloc_fn = slab_alloc_fn; | |||||
822 | cache->slab_free_fn = slab_free_fn; | |||||
823 | strncpy(cache->name, name, sizeof(cache->name)); | |||||
824 | cache->name[sizeof(cache->name) - 1] = '\0'; | |||||
825 | cache->buftag_dist = 0; | |||||
826 | cache->redzone_pad = 0; | |||||
827 | ||||||
828 | if (cache->flags & KMEM_CF_VERIFY0x08) { | |||||
829 | cache->bufctl_dist = buf_size; | |||||
830 | cache->buftag_dist = cache->bufctl_dist + sizeof(union kmem_bufctl); | |||||
831 | cache->redzone_pad = cache->bufctl_dist - cache->obj_size; | |||||
832 | buf_size += sizeof(union kmem_bufctl) + sizeof(struct kmem_buftag); | |||||
833 | buf_size = P2ROUND(buf_size, align)(-(-(buf_size) & -(align))); | |||||
834 | cache->buf_size = buf_size; | |||||
835 | } | |||||
836 | ||||||
837 | kmem_cache_compute_sizes(cache, flags); | |||||
838 | ||||||
839 | #if SLAB_USE_CPU_POOLS0 | |||||
840 | for (cpu_pool_type = kmem_cpu_pool_types; | |||||
841 | buf_size <= cpu_pool_type->buf_size; | |||||
842 | cpu_pool_type++); | |||||
843 | ||||||
844 | cache->cpu_pool_type = cpu_pool_type; | |||||
845 | ||||||
846 | for (i = 0; i < ARRAY_SIZE(cache->cpu_pools)(sizeof(cache->cpu_pools) / sizeof((cache->cpu_pools)[0 ])); i++) | |||||
847 | kmem_cpu_pool_init(&cache->cpu_pools[i], cache); | |||||
848 | #endif /* SLAB_USE_CPU_POOLS */ | |||||
849 | ||||||
850 | simple_lock(&kmem_cache_list_lock); | |||||
851 | list_insert_tail(&kmem_cache_list, &cache->node); | |||||
852 | kmem_nr_caches++; | |||||
853 | simple_unlock(&kmem_cache_list_lock)((void)(&kmem_cache_list_lock)); | |||||
854 | } | |||||
855 | ||||||
856 | static inline int kmem_cache_empty(struct kmem_cache *cache) | |||||
857 | { | |||||
858 | return cache->nr_objs == cache->nr_bufs; | |||||
859 | } | |||||
860 | ||||||
861 | static int kmem_cache_grow(struct kmem_cache *cache) | |||||
862 | { | |||||
863 | struct kmem_slab *slab; | |||||
864 | size_t color; | |||||
865 | int empty; | |||||
866 | ||||||
867 | simple_lock(&cache->lock); | |||||
868 | ||||||
869 | if (!kmem_cache_empty(cache)) { | |||||
870 | simple_unlock(&cache->lock)((void)(&cache->lock)); | |||||
871 | return 1; | |||||
872 | } | |||||
873 | ||||||
874 | color = cache->color; | |||||
875 | cache->color += cache->align; | |||||
876 | ||||||
877 | if (cache->color > cache->color_max) | |||||
878 | cache->color = 0; | |||||
879 | ||||||
880 | simple_unlock(&cache->lock)((void)(&cache->lock)); | |||||
881 | ||||||
882 | slab = kmem_slab_create(cache, color); | |||||
883 | ||||||
884 | simple_lock(&cache->lock); | |||||
885 | ||||||
886 | if (slab != NULL((void *) 0)) { | |||||
887 | list_insert_head(&cache->free_slabs, &slab->list_node); | |||||
888 | cache->nr_bufs += cache->bufs_per_slab; | |||||
889 | cache->nr_slabs++; | |||||
890 | cache->nr_free_slabs++; | |||||
891 | } | |||||
892 | ||||||
893 | /* | |||||
894 | * Even if our slab creation failed, another thread might have succeeded | |||||
895 | * in growing the cache. | |||||
896 | */ | |||||
897 | empty = kmem_cache_empty(cache); | |||||
898 | ||||||
899 | simple_unlock(&cache->lock)((void)(&cache->lock)); | |||||
900 | ||||||
901 | return !empty; | |||||
902 | } | |||||
903 | ||||||
904 | static void kmem_cache_reap(struct kmem_cache *cache) | |||||
905 | { | |||||
906 | struct kmem_slab *slab; | |||||
907 | struct list dead_slabs; | |||||
908 | unsigned long nr_free_slabs; | |||||
909 | ||||||
910 | if (cache->flags & KMEM_CF_NO_RECLAIM0x04) | |||||
911 | return; | |||||
912 | ||||||
913 | simple_lock(&cache->lock); | |||||
914 | list_set_head(&dead_slabs, &cache->free_slabs); | |||||
915 | list_init(&cache->free_slabs); | |||||
916 | nr_free_slabs = cache->nr_free_slabs; | |||||
917 | cache->nr_bufs -= cache->bufs_per_slab * nr_free_slabs; | |||||
918 | cache->nr_slabs -= nr_free_slabs; | |||||
919 | cache->nr_free_slabs = 0; | |||||
920 | simple_unlock(&cache->lock)((void)(&cache->lock)); | |||||
921 | ||||||
922 | while (!list_empty(&dead_slabs)) { | |||||
923 | slab = list_first_entry(&dead_slabs, struct kmem_slab, list_node)((struct kmem_slab *)((char *)list_first(&dead_slabs) - __builtin_offsetof (struct kmem_slab, list_node))); | |||||
924 | list_remove(&slab->list_node); | |||||
925 | kmem_slab_destroy(slab, cache); | |||||
926 | nr_free_slabs--; | |||||
927 | } | |||||
928 | ||||||
929 | assert(nr_free_slabs == 0)({ if (!(nr_free_slabs == 0)) Assert("nr_free_slabs == 0", "../kern/slab.c" , 929); }); | |||||
930 | } | |||||
931 | ||||||
932 | /* | |||||
933 | * Allocate a raw (unconstructed) buffer from the slab layer of a cache. | |||||
934 | * | |||||
935 | * The cache must be locked before calling this function. | |||||
936 | */ | |||||
937 | static void * kmem_cache_alloc_from_slab(struct kmem_cache *cache) | |||||
938 | { | |||||
939 | struct kmem_slab *slab; | |||||
940 | union kmem_bufctl *bufctl; | |||||
941 | ||||||
942 | if (!list_empty(&cache->partial_slabs)) | |||||
943 | slab = list_first_entry(&cache->partial_slabs, struct kmem_slab,((struct kmem_slab *)((char *)list_first(&cache->partial_slabs ) - __builtin_offsetof (struct kmem_slab, list_node))) | |||||
944 | list_node)((struct kmem_slab *)((char *)list_first(&cache->partial_slabs ) - __builtin_offsetof (struct kmem_slab, list_node))); | |||||
945 | else if (!list_empty(&cache->free_slabs)) | |||||
946 | slab = list_first_entry(&cache->free_slabs, struct kmem_slab,((struct kmem_slab *)((char *)list_first(&cache->free_slabs ) - __builtin_offsetof (struct kmem_slab, list_node))) | |||||
947 | list_node)((struct kmem_slab *)((char *)list_first(&cache->free_slabs ) - __builtin_offsetof (struct kmem_slab, list_node))); | |||||
948 | else | |||||
949 | return NULL((void *) 0); | |||||
950 | ||||||
951 | bufctl = slab->first_free; | |||||
952 | assert(bufctl != NULL)({ if (!(bufctl != ((void *) 0))) Assert("bufctl != NULL", "../kern/slab.c" , 952); }); | |||||
953 | slab->first_free = bufctl->next; | |||||
954 | slab->nr_refs++; | |||||
955 | cache->nr_objs++; | |||||
956 | ||||||
957 | if (slab->nr_refs == cache->bufs_per_slab) { | |||||
958 | /* The slab has become complete */ | |||||
959 | list_remove(&slab->list_node); | |||||
960 | ||||||
961 | if (slab->nr_refs == 1) | |||||
962 | cache->nr_free_slabs--; | |||||
963 | } else if (slab->nr_refs == 1) { | |||||
964 | /* | |||||
965 | * The slab has become partial. Insert the new slab at the end of | |||||
966 | * the list to reduce fragmentation. | |||||
967 | */ | |||||
968 | list_remove(&slab->list_node); | |||||
969 | list_insert_tail(&cache->partial_slabs, &slab->list_node); | |||||
970 | cache->nr_free_slabs--; | |||||
971 | } | |||||
972 | ||||||
973 | if ((slab->nr_refs == 1) && kmem_slab_use_tree(cache->flags)) | |||||
974 | rbtree_insert(&cache->active_slabs, &slab->tree_node,({ struct rbtree_node *___cur, *___prev; int ___diff, ___index ; ___prev = ((void *) 0); ___index = -1; ___cur = (&cache ->active_slabs)->root; while (___cur != ((void *) 0)) { ___diff = kmem_slab_cmp_insert(&slab->tree_node, ___cur ); ({ if (!(___diff != 0)) Assert("___diff != 0", "../kern/slab.c" , 975); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&cache->active_slabs, ___prev, ___index, &slab-> tree_node); }) | |||||
975 | kmem_slab_cmp_insert)({ struct rbtree_node *___cur, *___prev; int ___diff, ___index ; ___prev = ((void *) 0); ___index = -1; ___cur = (&cache ->active_slabs)->root; while (___cur != ((void *) 0)) { ___diff = kmem_slab_cmp_insert(&slab->tree_node, ___cur ); ({ if (!(___diff != 0)) Assert("___diff != 0", "../kern/slab.c" , 975); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&cache->active_slabs, ___prev, ___index, &slab-> tree_node); }); | |||||
976 | ||||||
977 | return kmem_bufctl_to_buf(bufctl, cache); | |||||
978 | } | |||||
979 | ||||||
980 | /* | |||||
981 | * Release a buffer to the slab layer of a cache. | |||||
982 | * | |||||
983 | * The cache must be locked before calling this function. | |||||
984 | */ | |||||
985 | static void kmem_cache_free_to_slab(struct kmem_cache *cache, void *buf) | |||||
986 | { | |||||
987 | struct kmem_slab *slab; | |||||
988 | union kmem_bufctl *bufctl; | |||||
989 | ||||||
990 | if (cache->flags & KMEM_CF_DIRECT0x10) { | |||||
991 | assert(cache->slab_size == PAGE_SIZE)({ if (!(cache->slab_size == (1 << 12))) Assert("cache->slab_size == PAGE_SIZE" , "../kern/slab.c", 991); }); | |||||
992 | slab = (struct kmem_slab *)P2END((unsigned long)buf, cache->slab_size)(-(~((unsigned long)buf) & -(cache->slab_size))) | |||||
993 | - 1; | |||||
994 | } else { | |||||
995 | struct rbtree_node *node; | |||||
996 | ||||||
997 | node = rbtree_lookup_nearest(&cache->active_slabs, buf,({ struct rbtree_node *___cur, *___prev; int ___diff, ___index ; ___prev = ((void *) 0); ___index = -1; ___cur = (&cache ->active_slabs)->root; while (___cur != ((void *) 0)) { ___diff = kmem_slab_cmp_lookup(buf, ___cur); if (___diff == 0 ) break; ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->children[___index]; } if (___cur == ((void *) 0 )) ___cur = rbtree_nearest(___prev, ___index, 0); ___cur; }) | |||||
998 | kmem_slab_cmp_lookup, RBTREE_LEFT)({ struct rbtree_node *___cur, *___prev; int ___diff, ___index ; ___prev = ((void *) 0); ___index = -1; ___cur = (&cache ->active_slabs)->root; while (___cur != ((void *) 0)) { ___diff = kmem_slab_cmp_lookup(buf, ___cur); if (___diff == 0 ) break; ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->children[___index]; } if (___cur == ((void *) 0 )) ___cur = rbtree_nearest(___prev, ___index, 0); ___cur; }); | |||||
999 | assert(node != NULL)({ if (!(node != ((void *) 0))) Assert("node != NULL", "../kern/slab.c" , 999); }); | |||||
1000 | slab = rbtree_entry(node, struct kmem_slab, tree_node)((struct kmem_slab *)((char *)node - __builtin_offsetof (struct kmem_slab, tree_node))); | |||||
1001 | assert((unsigned long)buf < (P2ALIGN((unsigned long)slab->addr({ if (!((unsigned long)buf < ((((unsigned long)slab->addr + cache->slab_size) & -((1 << 12)))))) Assert("(unsigned long)buf < (P2ALIGN((unsigned long)slab->addr + cache->slab_size, PAGE_SIZE))" , "../kern/slab.c", 1002); }) | |||||
1002 | + cache->slab_size, PAGE_SIZE)))({ if (!((unsigned long)buf < ((((unsigned long)slab->addr + cache->slab_size) & -((1 << 12)))))) Assert("(unsigned long)buf < (P2ALIGN((unsigned long)slab->addr + cache->slab_size, PAGE_SIZE))" , "../kern/slab.c", 1002); }); | |||||
1003 | } | |||||
1004 | ||||||
1005 | assert(slab->nr_refs >= 1)({ if (!(slab->nr_refs >= 1)) Assert("slab->nr_refs >= 1" , "../kern/slab.c", 1005); }); | |||||
1006 | assert(slab->nr_refs <= cache->bufs_per_slab)({ if (!(slab->nr_refs <= cache->bufs_per_slab)) Assert ("slab->nr_refs <= cache->bufs_per_slab", "../kern/slab.c" , 1006); }); | |||||
1007 | bufctl = kmem_buf_to_bufctl(buf, cache); | |||||
1008 | bufctl->next = slab->first_free; | |||||
1009 | slab->first_free = bufctl; | |||||
1010 | slab->nr_refs--; | |||||
1011 | cache->nr_objs--; | |||||
1012 | ||||||
1013 | if (slab->nr_refs == 0) { | |||||
1014 | /* The slab has become free */ | |||||
1015 | ||||||
1016 | if (kmem_slab_use_tree(cache->flags)) | |||||
1017 | rbtree_remove(&cache->active_slabs, &slab->tree_node); | |||||
1018 | ||||||
1019 | if (cache->bufs_per_slab > 1) | |||||
1020 | list_remove(&slab->list_node); | |||||
1021 | ||||||
1022 | list_insert_head(&cache->free_slabs, &slab->list_node); | |||||
1023 | cache->nr_free_slabs++; | |||||
1024 | } else if (slab->nr_refs == (cache->bufs_per_slab - 1)) { | |||||
1025 | /* The slab has become partial */ | |||||
1026 | list_insert_head(&cache->partial_slabs, &slab->list_node); | |||||
1027 | } | |||||
1028 | } | |||||
1029 | ||||||
1030 | static void kmem_cache_alloc_verify(struct kmem_cache *cache, void *buf, | |||||
1031 | int construct) | |||||
1032 | { | |||||
1033 | struct kmem_buftag *buftag; | |||||
1034 | union kmem_bufctl *bufctl; | |||||
1035 | void *addr; | |||||
1036 | ||||||
1037 | buftag = kmem_buf_to_buftag(buf, cache); | |||||
1038 | ||||||
1039 | if (buftag->state != KMEM_BUFTAG_FREE0x0cb1eef4UL) | |||||
1040 | kmem_cache_error(cache, buf, KMEM_ERR_BUFTAG2, buftag); | |||||
1041 | ||||||
1042 | addr = kmem_buf_verify_fill(buf, KMEM_FREE_PATTERN0xefbeaddeefbeaddeULL, KMEM_UNINIT_PATTERN0xfecaddbafecaddbaULL, | |||||
1043 | cache->bufctl_dist); | |||||
1044 | ||||||
1045 | if (addr != NULL((void *) 0)) | |||||
1046 | kmem_cache_error(cache, buf, KMEM_ERR_MODIFIED3, addr); | |||||
1047 | ||||||
1048 | addr = buf + cache->obj_size; | |||||
1049 | memset(addr, KMEM_REDZONE_BYTE0xbb, cache->redzone_pad); | |||||
1050 | ||||||
1051 | bufctl = kmem_buf_to_bufctl(buf, cache); | |||||
1052 | bufctl->redzone = KMEM_REDZONE_WORD0xcefaedfeUL; | |||||
1053 | buftag->state = KMEM_BUFTAG_ALLOC0xedc810a1UL; | |||||
1054 | ||||||
1055 | if (construct && (cache->ctor != NULL((void *) 0))) | |||||
1056 | cache->ctor(buf); | |||||
1057 | } | |||||
1058 | ||||||
1059 | vm_offset_t kmem_cache_alloc(struct kmem_cache *cache) | |||||
1060 | { | |||||
1061 | int filled; | |||||
1062 | void *buf; | |||||
1063 | ||||||
1064 | #if SLAB_USE_CPU_POOLS0 | |||||
1065 | struct kmem_cpu_pool *cpu_pool; | |||||
1066 | ||||||
1067 | cpu_pool = kmem_cpu_pool_get(cache); | |||||
1068 | ||||||
1069 | if (cpu_pool->flags & KMEM_CF_NO_CPU_POOL0x01) | |||||
1070 | goto slab_alloc; | |||||
1071 | ||||||
1072 | simple_lock(&cpu_pool->lock); | |||||
1073 | ||||||
1074 | fast_alloc: | |||||
1075 | if (likely(cpu_pool->nr_objs > 0)__builtin_expect(!!(cpu_pool->nr_objs > 0), 1)) { | |||||
1076 | buf = kmem_cpu_pool_pop(cpu_pool); | |||||
1077 | simple_unlock(&cpu_pool->lock)((void)(&cpu_pool->lock)); | |||||
1078 | ||||||
1079 | if (cpu_pool->flags & KMEM_CF_VERIFY0x08) | |||||
1080 | kmem_cache_alloc_verify(cache, buf, KMEM_AV_CONSTRUCT1); | |||||
1081 | ||||||
1082 | return (vm_offset_t)buf; | |||||
1083 | } | |||||
1084 | ||||||
1085 | if (cpu_pool->array != NULL((void *) 0)) { | |||||
1086 | filled = kmem_cpu_pool_fill(cpu_pool, cache); | |||||
1087 | ||||||
1088 | if (!filled) { | |||||
1089 | simple_unlock(&cpu_pool->lock)((void)(&cpu_pool->lock)); | |||||
1090 | ||||||
1091 | filled = kmem_cache_grow(cache); | |||||
1092 | ||||||
1093 | if (!filled) | |||||
1094 | return 0; | |||||
1095 | ||||||
1096 | simple_lock(&cpu_pool->lock); | |||||
1097 | } | |||||
1098 | ||||||
1099 | goto fast_alloc; | |||||
1100 | } | |||||
1101 | ||||||
1102 | simple_unlock(&cpu_pool->lock)((void)(&cpu_pool->lock)); | |||||
1103 | #endif /* SLAB_USE_CPU_POOLS */ | |||||
1104 | ||||||
1105 | slab_alloc: | |||||
1106 | simple_lock(&cache->lock); | |||||
1107 | buf = kmem_cache_alloc_from_slab(cache); | |||||
1108 | simple_unlock(&cache->lock)((void)(&cache->lock)); | |||||
1109 | ||||||
1110 | if (buf == NULL((void *) 0)) { | |||||
1111 | filled = kmem_cache_grow(cache); | |||||
1112 | ||||||
1113 | if (!filled) | |||||
1114 | return 0; | |||||
1115 | ||||||
1116 | goto slab_alloc; | |||||
1117 | } | |||||
1118 | ||||||
1119 | if (cache->flags & KMEM_CF_VERIFY0x08) | |||||
1120 | kmem_cache_alloc_verify(cache, buf, KMEM_AV_NOCONSTRUCT0); | |||||
1121 | ||||||
1122 | if (cache->ctor != NULL((void *) 0)) | |||||
1123 | cache->ctor(buf); | |||||
1124 | ||||||
1125 | return (vm_offset_t)buf; | |||||
1126 | } | |||||
1127 | ||||||
1128 | static void kmem_cache_free_verify(struct kmem_cache *cache, void *buf) | |||||
1129 | { | |||||
1130 | struct rbtree_node *node; | |||||
1131 | struct kmem_buftag *buftag; | |||||
1132 | struct kmem_slab *slab; | |||||
1133 | union kmem_bufctl *bufctl; | |||||
1134 | unsigned char *redzone_byte; | |||||
1135 | unsigned long slabend; | |||||
1136 | ||||||
1137 | simple_lock(&cache->lock); | |||||
1138 | node = rbtree_lookup_nearest(&cache->active_slabs, buf,({ struct rbtree_node *___cur, *___prev; int ___diff, ___index ; ___prev = ((void *) 0); ___index = -1; ___cur = (&cache ->active_slabs)->root; while (___cur != ((void *) 0)) { ___diff = kmem_slab_cmp_lookup(buf, ___cur); if (___diff == 0 ) break; ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->children[___index]; } if (___cur == ((void *) 0 )) ___cur = rbtree_nearest(___prev, ___index, 0); ___cur; }) | |||||
1139 | kmem_slab_cmp_lookup, RBTREE_LEFT)({ struct rbtree_node *___cur, *___prev; int ___diff, ___index ; ___prev = ((void *) 0); ___index = -1; ___cur = (&cache ->active_slabs)->root; while (___cur != ((void *) 0)) { ___diff = kmem_slab_cmp_lookup(buf, ___cur); if (___diff == 0 ) break; ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->children[___index]; } if (___cur == ((void *) 0 )) ___cur = rbtree_nearest(___prev, ___index, 0); ___cur; }); | |||||
1140 | simple_unlock(&cache->lock)((void)(&cache->lock)); | |||||
1141 | ||||||
1142 | if (node == NULL((void *) 0)) | |||||
1143 | kmem_cache_error(cache, buf, KMEM_ERR_INVALID0, NULL((void *) 0)); | |||||
1144 | ||||||
1145 | slab = rbtree_entry(node, struct kmem_slab, tree_node)((struct kmem_slab *)((char *)node - __builtin_offsetof (struct kmem_slab, tree_node))); | |||||
1146 | slabend = P2ALIGN((unsigned long)slab->addr + cache->slab_size, PAGE_SIZE)(((unsigned long)slab->addr + cache->slab_size) & - ((1 << 12))); | |||||
| ||||||
1147 | ||||||
1148 | if ((unsigned long)buf >= slabend) | |||||
1149 | kmem_cache_error(cache, buf, KMEM_ERR_INVALID0, NULL((void *) 0)); | |||||
1150 | ||||||
1151 | if ((((unsigned long)buf - (unsigned long)slab->addr) % cache->buf_size) | |||||
1152 | != 0) | |||||
1153 | kmem_cache_error(cache, buf, KMEM_ERR_INVALID0, NULL((void *) 0)); | |||||
1154 | ||||||
1155 | /* | |||||
1156 | * As the buffer address is valid, accessing its buftag is safe. | |||||
1157 | */ | |||||
1158 | buftag = kmem_buf_to_buftag(buf, cache); | |||||
1159 | ||||||
1160 | if (buftag->state != KMEM_BUFTAG_ALLOC0xedc810a1UL) { | |||||
1161 | if (buftag->state == KMEM_BUFTAG_FREE0x0cb1eef4UL) | |||||
1162 | kmem_cache_error(cache, buf, KMEM_ERR_DOUBLEFREE1, NULL((void *) 0)); | |||||
1163 | else | |||||
1164 | kmem_cache_error(cache, buf, KMEM_ERR_BUFTAG2, buftag); | |||||
1165 | } | |||||
1166 | ||||||
1167 | redzone_byte = buf + cache->obj_size; | |||||
1168 | bufctl = kmem_buf_to_bufctl(buf, cache); | |||||
1169 | ||||||
1170 | while (redzone_byte < (unsigned char *)bufctl) { | |||||
1171 | if (*redzone_byte != KMEM_REDZONE_BYTE0xbb) | |||||
1172 | kmem_cache_error(cache, buf, KMEM_ERR_REDZONE4, redzone_byte); | |||||
1173 | ||||||
1174 | redzone_byte++; | |||||
1175 | } | |||||
1176 | ||||||
1177 | if (bufctl->redzone != KMEM_REDZONE_WORD0xcefaedfeUL) { | |||||
1178 | unsigned long word; | |||||
1179 | ||||||
1180 | word = KMEM_REDZONE_WORD0xcefaedfeUL; | |||||
1181 | redzone_byte = kmem_buf_verify_bytes(&bufctl->redzone, &word, | |||||
1182 | sizeof(bufctl->redzone)); | |||||
1183 | kmem_cache_error(cache, buf, KMEM_ERR_REDZONE4, redzone_byte); | |||||
1184 | } | |||||
1185 | ||||||
1186 | kmem_buf_fill(buf, KMEM_FREE_PATTERN0xefbeaddeefbeaddeULL, cache->bufctl_dist); | |||||
1187 | buftag->state = KMEM_BUFTAG_FREE0x0cb1eef4UL; | |||||
1188 | } | |||||
1189 | ||||||
1190 | void kmem_cache_free(struct kmem_cache *cache, vm_offset_t obj) | |||||
1191 | { | |||||
1192 | #if SLAB_USE_CPU_POOLS0 | |||||
1193 | struct kmem_cpu_pool *cpu_pool; | |||||
1194 | void **array; | |||||
1195 | ||||||
1196 | cpu_pool = kmem_cpu_pool_get(cache); | |||||
1197 | ||||||
1198 | if (cpu_pool->flags & KMEM_CF_VERIFY0x08) { | |||||
1199 | #else /* SLAB_USE_CPU_POOLS */ | |||||
1200 | if (cache->flags & KMEM_CF_VERIFY0x08) { | |||||
1201 | #endif /* SLAB_USE_CPU_POOLS */ | |||||
1202 | kmem_cache_free_verify(cache, (void *)obj); | |||||
1203 | } | |||||
1204 | ||||||
1205 | #if SLAB_USE_CPU_POOLS0 | |||||
1206 | if (cpu_pool->flags & KMEM_CF_NO_CPU_POOL0x01) | |||||
1207 | goto slab_free; | |||||
1208 | ||||||
1209 | simple_lock(&cpu_pool->lock); | |||||
1210 | ||||||
1211 | fast_free: | |||||
1212 | if (likely(cpu_pool->nr_objs < cpu_pool->size)__builtin_expect(!!(cpu_pool->nr_objs < cpu_pool->size ), 1)) { | |||||
1213 | kmem_cpu_pool_push(cpu_pool, (void *)obj); | |||||
1214 | simple_unlock(&cpu_pool->lock)((void)(&cpu_pool->lock)); | |||||
1215 | return; | |||||
1216 | } | |||||
1217 | ||||||
1218 | if (cpu_pool->array != NULL((void *) 0)) { | |||||
1219 | kmem_cpu_pool_drain(cpu_pool, cache); | |||||
1220 | goto fast_free; | |||||
1221 | } | |||||
1222 | ||||||
1223 | simple_unlock(&cpu_pool->lock)((void)(&cpu_pool->lock)); | |||||
1224 | ||||||
1225 | array = (void *)kmem_cache_alloc(cache->cpu_pool_type->array_cache); | |||||
1226 | ||||||
1227 | if (array != NULL((void *) 0)) { | |||||
1228 | simple_lock(&cpu_pool->lock); | |||||
1229 | ||||||
1230 | /* | |||||
1231 | * Another thread may have built the CPU pool while the lock was | |||||
1232 | * dropped. | |||||
1233 | */ | |||||
1234 | if (cpu_pool->array != NULL((void *) 0)) { | |||||
1235 | simple_unlock(&cpu_pool->lock)((void)(&cpu_pool->lock)); | |||||
1236 | kmem_cache_free(cache->cpu_pool_type->array_cache, | |||||
1237 | (vm_offset_t)array); | |||||
1238 | simple_lock(&cpu_pool->lock); | |||||
1239 | goto fast_free; | |||||
1240 | } | |||||
1241 | ||||||
1242 | kmem_cpu_pool_build(cpu_pool, cache, array); | |||||
1243 | goto fast_free; | |||||
1244 | } | |||||
1245 | ||||||
1246 | slab_free: | |||||
1247 | #endif /* SLAB_USE_CPU_POOLS */ | |||||
1248 | ||||||
1249 | simple_lock(&cache->lock); | |||||
1250 | kmem_cache_free_to_slab(cache, (void *)obj); | |||||
1251 | simple_unlock(&cache->lock)((void)(&cache->lock)); | |||||
1252 | } | |||||
1253 | ||||||
1254 | void slab_collect(void) | |||||
1255 | { | |||||
1256 | struct kmem_cache *cache; | |||||
1257 | ||||||
1258 | if (elapsed_ticks <= (kmem_gc_last_tick + KMEM_GC_INTERVAL(5 * hz))) | |||||
1259 | return; | |||||
1260 | ||||||
1261 | kmem_gc_last_tick = elapsed_ticks; | |||||
1262 | ||||||
1263 | simple_lock(&kmem_cache_list_lock); | |||||
1264 | ||||||
1265 | list_for_each_entry(&kmem_cache_list, cache, node)for (cache = ((typeof(*cache) *)((char *)list_first(&kmem_cache_list ) - __builtin_offsetof (typeof(*cache), node))); !list_end(& kmem_cache_list, &cache->node); cache = ((typeof(*cache ) *)((char *)list_next(&cache->node) - __builtin_offsetof (typeof(*cache), node)))) | |||||
1266 | kmem_cache_reap(cache); | |||||
1267 | ||||||
1268 | simple_unlock(&kmem_cache_list_lock)((void)(&kmem_cache_list_lock)); | |||||
1269 | } | |||||
1270 | ||||||
1271 | void slab_bootstrap(void) | |||||
1272 | { | |||||
1273 | /* Make sure a bufctl can always be stored in a buffer */ | |||||
1274 | assert(sizeof(union kmem_bufctl) <= KMEM_ALIGN_MIN)({ if (!(sizeof(union kmem_bufctl) <= 8)) Assert("sizeof(union kmem_bufctl) <= KMEM_ALIGN_MIN" , "../kern/slab.c", 1274); }); | |||||
1275 | ||||||
1276 | list_init(&kmem_cache_list); | |||||
1277 | simple_lock_init(&kmem_cache_list_lock); | |||||
1278 | } | |||||
1279 | ||||||
1280 | void slab_init(void) | |||||
1281 | { | |||||
1282 | vm_offset_t min, max; | |||||
1283 | ||||||
1284 | #if SLAB_USE_CPU_POOLS0 | |||||
1285 | struct kmem_cpu_pool_type *cpu_pool_type; | |||||
1286 | char name[KMEM_CACHE_NAME_SIZE24]; | |||||
1287 | size_t i, size; | |||||
1288 | #endif /* SLAB_USE_CPU_POOLS */ | |||||
1289 | ||||||
1290 | kmem_submap(kmem_map, kernel_map, &min, &max, KMEM_MAP_SIZE(128 * 1024 * 1024), FALSE((boolean_t) 0)); | |||||
1291 | ||||||
1292 | #if SLAB_USE_CPU_POOLS0 | |||||
1293 | for (i = 0; i < ARRAY_SIZE(kmem_cpu_pool_types)(sizeof(kmem_cpu_pool_types) / sizeof((kmem_cpu_pool_types)[0 ])); i++) { | |||||
1294 | cpu_pool_type = &kmem_cpu_pool_types[i]; | |||||
1295 | cpu_pool_type->array_cache = &kmem_cpu_array_caches[i]; | |||||
1296 | sprintf(name, "kmem_cpu_array_%d", cpu_pool_type->array_size); | |||||
1297 | size = sizeof(void *) * cpu_pool_type->array_size; | |||||
1298 | kmem_cache_init(cpu_pool_type->array_cache, name, size, | |||||
1299 | cpu_pool_type->array_align, NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0); | |||||
1300 | } | |||||
1301 | #endif /* SLAB_USE_CPU_POOLS */ | |||||
1302 | ||||||
1303 | /* | |||||
1304 | * Prevent off slab data for the slab cache to avoid infinite recursion. | |||||
1305 | */ | |||||
1306 | kmem_cache_init(&kmem_slab_cache, "kmem_slab", sizeof(struct kmem_slab), | |||||
1307 | 0, NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), KMEM_CACHE_NOOFFSLAB0x2); | |||||
1308 | } | |||||
1309 | ||||||
1310 | static vm_offset_t kalloc_pagealloc(vm_size_t size) | |||||
1311 | { | |||||
1312 | vm_offset_t addr; | |||||
1313 | kern_return_t kr; | |||||
1314 | ||||||
1315 | kr = kmem_alloc_wired(kmem_map, &addr, size); | |||||
1316 | ||||||
1317 | if (kr != KERN_SUCCESS0) | |||||
1318 | return 0; | |||||
1319 | ||||||
1320 | return addr; | |||||
1321 | } | |||||
1322 | ||||||
1323 | static void kalloc_pagefree(vm_offset_t ptr, vm_size_t size) | |||||
1324 | { | |||||
1325 | kmem_free(kmem_map, ptr, size); | |||||
1326 | } | |||||
1327 | ||||||
1328 | void kalloc_init(void) | |||||
1329 | { | |||||
1330 | char name[KMEM_CACHE_NAME_SIZE24]; | |||||
1331 | size_t i, size; | |||||
1332 | ||||||
1333 | size = 1 << KALLOC_FIRST_SHIFT5; | |||||
1334 | ||||||
1335 | for (i = 0; i < ARRAY_SIZE(kalloc_caches)(sizeof(kalloc_caches) / sizeof((kalloc_caches)[0])); i++) { | |||||
1336 | sprintf(name, "kalloc_%lu", size); | |||||
1337 | kmem_cache_init(&kalloc_caches[i], name, size, 0, NULL((void *) 0), | |||||
1338 | kalloc_pagealloc, kalloc_pagefree, 0); | |||||
1339 | size <<= 1; | |||||
1340 | } | |||||
1341 | } | |||||
1342 | ||||||
1343 | /* | |||||
1344 | * Return the kalloc cache index matching the given allocation size, which | |||||
1345 | * must be strictly greater than 0. | |||||
1346 | */ | |||||
1347 | static inline size_t kalloc_get_index(unsigned long size) | |||||
1348 | { | |||||
1349 | assert(size != 0)({ if (!(size != 0)) Assert("size != 0", "../kern/slab.c", 1349 ); }); | |||||
1350 | ||||||
1351 | size = (size - 1) >> KALLOC_FIRST_SHIFT5; | |||||
1352 | ||||||
1353 | if (size == 0) | |||||
1354 | return 0; | |||||
1355 | else | |||||
1356 | return (sizeof(long) * 8) - __builtin_clzl(size); | |||||
1357 | } | |||||
1358 | ||||||
1359 | static void kalloc_verify(struct kmem_cache *cache, void *buf, size_t size) | |||||
1360 | { | |||||
1361 | size_t redzone_size; | |||||
1362 | void *redzone; | |||||
1363 | ||||||
1364 | assert(size <= cache->obj_size)({ if (!(size <= cache->obj_size)) Assert("size <= cache->obj_size" , "../kern/slab.c", 1364); }); | |||||
1365 | ||||||
1366 | redzone = buf + size; | |||||
1367 | redzone_size = cache->obj_size - size; | |||||
1368 | memset(redzone, KMEM_REDZONE_BYTE0xbb, redzone_size); | |||||
1369 | } | |||||
1370 | ||||||
1371 | vm_offset_t kalloc(vm_size_t size) | |||||
1372 | { | |||||
1373 | size_t index; | |||||
1374 | void *buf; | |||||
1375 | ||||||
1376 | if (size == 0) | |||||
1377 | return 0; | |||||
1378 | ||||||
1379 | index = kalloc_get_index(size); | |||||
1380 | ||||||
1381 | if (index < ARRAY_SIZE(kalloc_caches)(sizeof(kalloc_caches) / sizeof((kalloc_caches)[0]))) { | |||||
1382 | struct kmem_cache *cache; | |||||
1383 | ||||||
1384 | cache = &kalloc_caches[index]; | |||||
1385 | buf = (void *)kmem_cache_alloc(cache); | |||||
1386 | ||||||
1387 | if ((buf != 0) && (cache->flags & KMEM_CF_VERIFY0x08)) | |||||
1388 | kalloc_verify(cache, buf, size); | |||||
1389 | } else | |||||
1390 | buf = (void *)kalloc_pagealloc(size); | |||||
1391 | ||||||
1392 | return (vm_offset_t)buf; | |||||
1393 | } | |||||
1394 | ||||||
1395 | static void kfree_verify(struct kmem_cache *cache, void *buf, size_t size) | |||||
1396 | { | |||||
1397 | unsigned char *redzone_byte, *redzone_end; | |||||
1398 | ||||||
1399 | assert(size <= cache->obj_size)({ if (!(size <= cache->obj_size)) Assert("size <= cache->obj_size" , "../kern/slab.c", 1399); }); | |||||
1400 | ||||||
1401 | redzone_byte = buf + size; | |||||
1402 | redzone_end = buf + cache->obj_size; | |||||
1403 | ||||||
1404 | while (redzone_byte < redzone_end) { | |||||
1405 | if (*redzone_byte != KMEM_REDZONE_BYTE0xbb) | |||||
1406 | kmem_cache_error(cache, buf, KMEM_ERR_REDZONE4, redzone_byte); | |||||
1407 | ||||||
1408 | redzone_byte++; | |||||
1409 | } | |||||
1410 | } | |||||
1411 | ||||||
1412 | void kfree(vm_offset_t data, vm_size_t size) | |||||
1413 | { | |||||
1414 | size_t index; | |||||
1415 | ||||||
1416 | if ((data == 0) || (size == 0)) | |||||
| ||||||
1417 | return; | |||||
1418 | ||||||
1419 | index = kalloc_get_index(size); | |||||
1420 | ||||||
1421 | if (index < ARRAY_SIZE(kalloc_caches)(sizeof(kalloc_caches) / sizeof((kalloc_caches)[0]))) { | |||||
1422 | struct kmem_cache *cache; | |||||
1423 | ||||||
1424 | cache = &kalloc_caches[index]; | |||||
1425 | ||||||
1426 | if (cache->flags & KMEM_CF_VERIFY0x08) | |||||
1427 | kfree_verify(cache, (void *)data, size); | |||||
1428 | ||||||
1429 | kmem_cache_free(cache, data); | |||||
1430 | } else { | |||||
1431 | kalloc_pagefree(data, size); | |||||
1432 | } | |||||
1433 | } | |||||
1434 | ||||||
1435 | void slab_info(void) | |||||
1436 | { | |||||
1437 | struct kmem_cache *cache; | |||||
1438 | vm_size_t mem_usage, mem_reclaimable; | |||||
1439 | ||||||
1440 | printf("cache obj slab bufs objs bufs " | |||||
1441 | " total reclaimable\n" | |||||
1442 | "name size size /slab usage count " | |||||
1443 | " memory memory\n"); | |||||
1444 | ||||||
1445 | simple_lock(&kmem_cache_list_lock); | |||||
1446 | ||||||
1447 | list_for_each_entry(&kmem_cache_list, cache, node)for (cache = ((typeof(*cache) *)((char *)list_first(&kmem_cache_list ) - __builtin_offsetof (typeof(*cache), node))); !list_end(& kmem_cache_list, &cache->node); cache = ((typeof(*cache ) *)((char *)list_next(&cache->node) - __builtin_offsetof (typeof(*cache), node)))) { | |||||
1448 | simple_lock(&cache->lock); | |||||
1449 | ||||||
1450 | mem_usage = (cache->nr_slabs * cache->slab_size) >> 10; | |||||
1451 | mem_reclaimable = (cache->nr_free_slabs * cache->slab_size) >> 10; | |||||
1452 | ||||||
1453 | printf("%-19s %6lu %3luk %4lu %6lu %6lu %7uk %10uk\n", | |||||
1454 | cache->name, cache->obj_size, cache->slab_size >> 10, | |||||
1455 | cache->bufs_per_slab, cache->nr_objs, cache->nr_bufs, | |||||
1456 | mem_usage, mem_reclaimable); | |||||
1457 | ||||||
1458 | simple_unlock(&cache->lock)((void)(&cache->lock)); | |||||
1459 | } | |||||
1460 | ||||||
1461 | simple_unlock(&kmem_cache_list_lock)((void)(&kmem_cache_list_lock)); | |||||
1462 | } | |||||
1463 | ||||||
1464 | #if MACH_DEBUG1 | |||||
1465 | kern_return_t host_slab_info(host_t host, cache_info_array_t *infop, | |||||
1466 | unsigned int *infoCntp) | |||||
1467 | { | |||||
1468 | struct kmem_cache *cache; | |||||
1469 | cache_info_t *info; | |||||
1470 | unsigned int i, nr_caches; | |||||
1471 | vm_size_t info_size = 0; | |||||
1472 | kern_return_t kr; | |||||
1473 | ||||||
1474 | if (host == HOST_NULL((host_t)0)) | |||||
1475 | return KERN_INVALID_HOST22; | |||||
1476 | ||||||
1477 | /* | |||||
1478 | * Assume the cache list is unaltered once the kernel is ready. | |||||
1479 | */ | |||||
1480 | ||||||
1481 | simple_lock(&kmem_cache_list_lock); | |||||
1482 | nr_caches = kmem_nr_caches; | |||||
1483 | simple_unlock(&kmem_cache_list_lock)((void)(&kmem_cache_list_lock)); | |||||
1484 | ||||||
1485 | if (nr_caches <= *infoCntp) | |||||
1486 | info = *infop; | |||||
1487 | else { | |||||
1488 | vm_offset_t info_addr; | |||||
1489 | ||||||
1490 | info_size = round_page(nr_caches * sizeof(*info))((vm_offset_t)((((vm_offset_t)(nr_caches * sizeof(*info))) + ( (1 << 12)-1)) & ~((1 << 12)-1))); | |||||
1491 | kr = kmem_alloc_pageable(ipc_kernel_map, &info_addr, info_size); | |||||
1492 | ||||||
1493 | if (kr != KERN_SUCCESS0) | |||||
1494 | return kr; | |||||
1495 | ||||||
1496 | info = (cache_info_t *)info_addr; | |||||
1497 | } | |||||
1498 | ||||||
1499 | if (info == NULL((void *) 0)) | |||||
1500 | return KERN_RESOURCE_SHORTAGE6; | |||||
1501 | ||||||
1502 | i = 0; | |||||
1503 | ||||||
1504 | list_for_each_entry(&kmem_cache_list, cache, node)for (cache = ((typeof(*cache) *)((char *)list_first(&kmem_cache_list ) - __builtin_offsetof (typeof(*cache), node))); !list_end(& kmem_cache_list, &cache->node); cache = ((typeof(*cache ) *)((char *)list_next(&cache->node) - __builtin_offsetof (typeof(*cache), node)))) { | |||||
1505 | simple_lock(&cache_lock); | |||||
1506 | info[i].flags = ((cache->flags & KMEM_CF_NO_CPU_POOL0x01) | |||||
1507 | ? CACHE_FLAGS_NO_CPU_POOL0x01 : 0) | |||||
1508 | | ((cache->flags & KMEM_CF_SLAB_EXTERNAL0x02) | |||||
1509 | ? CACHE_FLAGS_SLAB_EXTERNAL0x02 : 0) | |||||
1510 | | ((cache->flags & KMEM_CF_NO_RECLAIM0x04) | |||||
1511 | ? CACHE_FLAGS_NO_RECLAIM0x04 : 0) | |||||
1512 | | ((cache->flags & KMEM_CF_VERIFY0x08) | |||||
1513 | ? CACHE_FLAGS_VERIFY0x08 : 0) | |||||
1514 | | ((cache->flags & KMEM_CF_DIRECT0x10) | |||||
1515 | ? CACHE_FLAGS_DIRECT0x10 : 0); | |||||
1516 | #if SLAB_USE_CPU_POOLS0 | |||||
1517 | info[i].cpu_pool_size = cache->cpu_pool_type->array_size; | |||||
1518 | #else /* SLAB_USE_CPU_POOLS */ | |||||
1519 | info[i].cpu_pool_size = 0; | |||||
1520 | #endif /* SLAB_USE_CPU_POOLS */ | |||||
1521 | info[i].obj_size = cache->obj_size; | |||||
1522 | info[i].align = cache->align; | |||||
1523 | info[i].buf_size = cache->buf_size; | |||||
1524 | info[i].slab_size = cache->slab_size; | |||||
1525 | info[i].bufs_per_slab = cache->bufs_per_slab; | |||||
1526 | info[i].nr_objs = cache->nr_objs; | |||||
1527 | info[i].nr_bufs = cache->nr_bufs; | |||||
1528 | info[i].nr_slabs = cache->nr_slabs; | |||||
1529 | info[i].nr_free_slabs = cache->nr_free_slabs; | |||||
1530 | strncpy(info[i].name, cache->name, sizeof(info[i].name)); | |||||
1531 | info[i].name[sizeof(info[i].name) - 1] = '\0'; | |||||
1532 | simple_unlock(&cache->lock)((void)(&cache->lock)); | |||||
1533 | ||||||
1534 | i++; | |||||
1535 | } | |||||
1536 | ||||||
1537 | if (info != *infop) { | |||||
1538 | vm_map_copy_t copy; | |||||
1539 | vm_size_t used; | |||||
1540 | ||||||
1541 | used = nr_caches * sizeof(*info); | |||||
1542 | ||||||
1543 | if (used != info_size) | |||||
1544 | memset((char *)info + used, 0, info_size - used); | |||||
1545 | ||||||
1546 | kr = vm_map_copyin(ipc_kernel_map, (vm_offset_t)info, used, TRUE((boolean_t) 1), | |||||
1547 | ©); | |||||
1548 | ||||||
1549 | assert(kr == KERN_SUCCESS)({ if (!(kr == 0)) Assert("kr == KERN_SUCCESS", "../kern/slab.c" , 1549); }); | |||||
1550 | *infop = (cache_info_t *)copy; | |||||
1551 | } | |||||
1552 | ||||||
1553 | *infoCntp = nr_caches; | |||||
1554 | ||||||
1555 | return KERN_SUCCESS0; | |||||
1556 | } | |||||
1557 | #endif /* MACH_DEBUG */ |