diff options
Diffstat (limited to 'kern')
-rw-r--r-- | kern/act.c | 6 | ||||
-rw-r--r-- | kern/kalloc.h | 2 | ||||
-rw-r--r-- | kern/processor.c | 6 | ||||
-rw-r--r-- | kern/rdxtree.c | 6 | ||||
-rw-r--r-- | kern/slab.c | 173 | ||||
-rw-r--r-- | kern/slab.h | 21 | ||||
-rw-r--r-- | kern/slab_i.h | 5 | ||||
-rw-r--r-- | kern/task.c | 6 | ||||
-rw-r--r-- | kern/thread.c | 4 |
9 files changed, 77 insertions, 152 deletions
@@ -67,8 +67,10 @@ void global_act_init(void) { #ifndef ACT_STATIC_KLUDGE - kmem_cache_init(&act_cache, "Act", sizeof(struct Act), 0, - NULL, NULL, NULL, 0); + kmem_cache_init (&act_cache, + "Act", + sizeof(struct Act), 0, + NULL, 0); #else int i; diff --git a/kern/kalloc.h b/kern/kalloc.h index 004e3a6..c378873 100644 --- a/kern/kalloc.h +++ b/kern/kalloc.h @@ -33,6 +33,4 @@ extern vm_offset_t kalloc (vm_size_t size); extern void kfree (vm_offset_t data, vm_size_t size); -extern void kalloc_init (void); - #endif /* _KERN_KALLOC_H_ */ diff --git a/kern/processor.c b/kern/processor.c index bc70dde..f2f9a2d 100644 --- a/kern/processor.c +++ b/kern/processor.c @@ -108,8 +108,10 @@ void pset_sys_init(void) /* * Allocate the cache for processor sets. */ - kmem_cache_init(&pset_cache, "processor_set", - sizeof(struct processor_set), 0, NULL, NULL, NULL, 0); + kmem_cache_init (&pset_cache, + "processor_set", + sizeof(struct processor_set), 0, + NULL, 0); /* * Give each processor a control port. diff --git a/kern/rdxtree.c b/kern/rdxtree.c index 78868b1..83ba750 100644 --- a/kern/rdxtree.c +++ b/kern/rdxtree.c @@ -123,8 +123,10 @@ static struct kmem_cache rdxtree_node_cache; void rdxtree_cache_init(void) { - kmem_cache_init(&rdxtree_node_cache, "rdxtree_node", - sizeof(struct rdxtree_node), 0, NULL, NULL, NULL, 0); + kmem_cache_init (&rdxtree_node_cache, + "rdxtree_node", + sizeof(struct rdxtree_node), 0, + NULL, 0); } #ifdef RDXTREE_ENABLE_NODE_CREATION_FAILURES diff --git a/kern/slab.c b/kern/slab.c index ca8b44a..42af6a3 100644 --- a/kern/slab.c +++ b/kern/slab.c @@ -53,14 +53,11 @@ * * The per-cache self-scaling hash table for buffer-to-bufctl conversion, * described in 3.2.3 "Slab Layout for Large Objects", has been replaced with - * a constant time buffer-to-slab lookup that relies on the VM system. When - * slabs are created, their backing virtual pages are mapped to physical pages - * that are never aliased by other virtual addresses (unless explicitely by - * other kernel code). The lookup operation provides the associated physical - * page descriptor which can store data private to this allocator. The main - * drawback of this method is that it needs to walk the low level page tables, - * but it's expected that these have already been cached by the CPU due to - * prior access by the allocator user, depending on the hardware properties. + * a constant time buffer-to-slab lookup that relies on the VM system. Slabs + * are allocated from the direct mapping of physical memory, which enables + * the retrieval of physical addresses backing slabs with a simple shift. + * Physical addresses are then used to find page descriptors, which store + * data private to this allocator. * * This implementation uses per-cpu pools of objects, which service most * allocation requests. These pools act as caches (but are named differently @@ -81,6 +78,7 @@ #include <mach/vm_param.h> #include <mach/machine/vm_types.h> #include <vm/vm_kern.h> +#include <vm/vm_page.h> #include <vm/vm_types.h> #include <sys/types.h> @@ -135,9 +133,9 @@ #define KMEM_CPU_POOL_TRANSFER_RATIO 2 /* - * Size of the VM submap from which default backend functions allocate. + * Logarithm of the size of the smallest general cache. */ -#define KMEM_MAP_SIZE (96 * 1024 * 1024) +#define KMEM_CACHES_FIRST_ORDER 5 /* * Shift for the first kalloc cache size. @@ -204,12 +202,6 @@ static unsigned int kmem_nr_caches; static simple_lock_data_t __attribute__((used)) kmem_cache_list_lock; /* - * VM submap for slab caches. - */ -static struct vm_map kmem_map_store; -vm_map_t kmem_map = &kmem_map_store; - -/* * Time of the last memory reclaim, in clock ticks. */ static unsigned long kmem_gc_last_tick; @@ -310,30 +302,12 @@ static inline void * kmem_bufctl_to_buf(union kmem_bufctl *bufctl, return (void *)bufctl - cache->bufctl_dist; } -static vm_offset_t kmem_pagealloc(vm_size_t size) -{ - vm_offset_t addr; - kern_return_t kr; - - kr = kmem_alloc_wired(kmem_map, &addr, size); - - if (kr != KERN_SUCCESS) - return 0; - - return addr; -} - -static void kmem_pagefree(vm_offset_t ptr, vm_size_t size) -{ - kmem_free(kmem_map, ptr, size); -} - static void kmem_slab_create_verify(struct kmem_slab *slab, struct kmem_cache *cache) { struct kmem_buftag *buftag; - size_t buf_size; unsigned long buffers; + size_t buf_size; void *buf; buf_size = cache->buf_size; @@ -356,29 +330,26 @@ static void kmem_slab_create_verify(struct kmem_slab *slab, static struct kmem_slab * kmem_slab_create(struct kmem_cache *cache, size_t color) { + struct vm_page *page; struct kmem_slab *slab; union kmem_bufctl *bufctl; size_t buf_size; unsigned long buffers; void *slab_buf; - if (cache->slab_alloc_fn == NULL) - slab_buf = (void *)kmem_pagealloc(cache->slab_size); - else - slab_buf = (void *)cache->slab_alloc_fn(cache->slab_size); + page = vm_page_alloc_p(cache->slab_order, VM_PAGE_SEL_DIRECTMAP, + VM_PAGE_KMEM); - if (slab_buf == NULL) + if (page == NULL) return NULL; + slab_buf = vm_page_direct_ptr(page); + if (cache->flags & KMEM_CF_SLAB_EXTERNAL) { slab = (struct kmem_slab *)kmem_cache_alloc(&kmem_slab_cache); if (slab == NULL) { - if (cache->slab_free_fn == NULL) - kmem_pagefree((vm_offset_t)slab_buf, cache->slab_size); - else - cache->slab_free_fn((vm_offset_t)slab_buf, cache->slab_size); - + vm_page_free_p(page, cache->slab_order); return NULL; } } else { @@ -438,21 +409,12 @@ static void kmem_slab_destroy_verify(struct kmem_slab *slab, */ static void kmem_slab_destroy(struct kmem_slab *slab, struct kmem_cache *cache) { - vm_offset_t slab_buf; - assert(slab->nr_refs == 0); assert(slab->first_free != NULL); if (cache->flags & KMEM_CF_VERIFY) kmem_slab_destroy_verify(slab, cache); - slab_buf = (vm_offset_t)P2ALIGN((unsigned long)slab->addr, PAGE_SIZE); - - if (cache->slab_free_fn == NULL) - kmem_pagefree(slab_buf, cache->slab_size); - else - cache->slab_free_fn(slab_buf, cache->slab_size); - if (cache->flags & KMEM_CF_SLAB_EXTERNAL) kmem_cache_free(&kmem_slab_cache, (vm_offset_t)slab); } @@ -473,7 +435,7 @@ kmem_slab_vmref(struct kmem_slab *slab, size_t size) end = va + size; do { - page = vm_kmem_lookup_page(va); + page = vm_page_lookup_pa(vm_page_direct_pa(va)); assert(page != NULL); assert(page->slab_priv == NULL); page->slab_priv = slab; @@ -628,7 +590,8 @@ static void kmem_cache_compute_sizes(struct kmem_cache *cache, int flags) { size_t i, buffers, buf_size, slab_size, free_slab_size, optimal_size = 0; size_t waste, waste_min; - int embed, optimal_embed = 0; + int embed, optimal_embed = optimal_embed; + unsigned int slab_order, optimal_order = optimal_order; buf_size = cache->buf_size; @@ -640,7 +603,9 @@ static void kmem_cache_compute_sizes(struct kmem_cache *cache, int flags) do { i++; - slab_size = P2ROUND(i * buf_size, PAGE_SIZE); + + slab_order = vm_page_order(i * buf_size); + slab_size = PAGE_SIZE << slab_order; free_slab_size = slab_size; if (flags & KMEM_CACHE_NOOFFSLAB) @@ -663,6 +628,7 @@ static void kmem_cache_compute_sizes(struct kmem_cache *cache, int flags) if (waste <= waste_min) { waste_min = waste; + optimal_order = slab_order; optimal_size = slab_size; optimal_embed = embed; } @@ -672,10 +638,10 @@ static void kmem_cache_compute_sizes(struct kmem_cache *cache, int flags) assert(optimal_size > 0); assert(!(flags & KMEM_CACHE_NOOFFSLAB) || optimal_embed); + cache->slab_order = optimal_order; cache->slab_size = optimal_size; - slab_size = cache->slab_size - (optimal_embed - ? sizeof(struct kmem_slab) - : 0); + slab_size = cache->slab_size + - (optimal_embed ? sizeof(struct kmem_slab) : 0); cache->bufs_per_slab = slab_size / buf_size; cache->color_max = slab_size % buf_size; @@ -690,10 +656,9 @@ static void kmem_cache_compute_sizes(struct kmem_cache *cache, int flags) } } -void kmem_cache_init(struct kmem_cache *cache, const char *name, - size_t obj_size, size_t align, kmem_ctor_fn_t ctor, - kmem_slab_alloc_fn_t slab_alloc_fn, - kmem_slab_free_fn_t slab_free_fn, int flags) +void +kmem_cache_init(struct kmem_cache *cache, const char *name, size_t obj_size, + size_t align, kmem_ctor_fn_t ctor, int flags) { #if SLAB_USE_CPU_POOLS struct kmem_cpu_pool_type *cpu_pool_type; @@ -735,8 +700,6 @@ void kmem_cache_init(struct kmem_cache *cache, const char *name, cache->nr_slabs = 0; cache->nr_free_slabs = 0; cache->ctor = ctor; - cache->slab_alloc_fn = slab_alloc_fn; - cache->slab_free_fn = slab_free_fn; strncpy(cache->name, name, sizeof(cache->name)); cache->name[sizeof(cache->name) - 1] = '\0'; cache->buftag_dist = 0; @@ -905,7 +868,7 @@ static void kmem_cache_free_to_slab(struct kmem_cache *cache, void *buf) } else { struct vm_page *page; - page = vm_kmem_lookup_page((unsigned long)buf); + page = vm_page_lookup_pa(vm_page_direct_pa((unsigned long)buf)); assert(page != NULL); slab = page->slab_priv; assert(slab != NULL); @@ -1194,16 +1157,13 @@ void slab_bootstrap(void) void slab_init(void) { - vm_offset_t min, max; + size_t i, size; + char name[KMEM_CACHE_NAME_SIZE]; #if SLAB_USE_CPU_POOLS struct kmem_cpu_pool_type *cpu_pool_type; - char name[KMEM_CACHE_NAME_SIZE]; - size_t i, size; #endif /* SLAB_USE_CPU_POOLS */ - kmem_submap(kmem_map, kernel_map, &min, &max, KMEM_MAP_SIZE, FALSE); - #if SLAB_USE_CPU_POOLS for (i = 0; i < ARRAY_SIZE(kmem_cpu_pool_types); i++) { cpu_pool_type = &kmem_cpu_pool_types[i]; @@ -1211,7 +1171,7 @@ void slab_init(void) sprintf(name, "kmem_cpu_array_%d", cpu_pool_type->array_size); size = sizeof(void *) * cpu_pool_type->array_size; kmem_cache_init(cpu_pool_type->array_cache, name, size, - cpu_pool_type->array_align, NULL, NULL, NULL, 0); + cpu_pool_type->array_align, NULL, 0); } #endif /* SLAB_USE_CPU_POOLS */ @@ -1219,56 +1179,21 @@ void slab_init(void) * Prevent off slab data for the slab cache to avoid infinite recursion. */ kmem_cache_init(&kmem_slab_cache, "kmem_slab", sizeof(struct kmem_slab), - 0, NULL, NULL, NULL, KMEM_CACHE_NOOFFSLAB); -} - -static vm_offset_t kalloc_pagealloc(vm_size_t size) -{ - vm_offset_t addr; - kern_return_t kr; - - kr = kmem_alloc_wired(kmem_map, &addr, size); - - if (kr != KERN_SUCCESS) - return 0; - - return addr; -} - -static void kalloc_pagefree(vm_offset_t ptr, vm_size_t size) -{ - kmem_free(kmem_map, ptr, size); -} - -void kalloc_init(void) -{ - char name[KMEM_CACHE_NAME_SIZE]; - size_t i, size; + 0, NULL, KMEM_CACHE_NOOFFSLAB); - size = 1 << KALLOC_FIRST_SHIFT; + size = 1 << KMEM_CACHES_FIRST_ORDER; for (i = 0; i < ARRAY_SIZE(kalloc_caches); i++) { - sprintf(name, "kalloc_%lu", size); - kmem_cache_init(&kalloc_caches[i], name, size, 0, NULL, - kalloc_pagealloc, kalloc_pagefree, 0); + sprintf(name, "kmem_%zu", size); + kmem_cache_init(&kalloc_caches[i], name, size, 0, NULL, 0); size <<= 1; } } -/* - * Return the kalloc cache index matching the given allocation size, which - * must be strictly greater than 0. - */ -static inline size_t kalloc_get_index(unsigned long size) +static inline size_t +kalloc_get_index(unsigned long size) { - assert(size != 0); - - size = (size - 1) >> KALLOC_FIRST_SHIFT; - - if (size == 0) - return 0; - else - return (sizeof(long) * 8) - __builtin_clzl(size); + return iorder2(size) - KMEM_CACHES_FIRST_ORDER; } static void kalloc_verify(struct kmem_cache *cache, void *buf, size_t size) @@ -1301,8 +1226,17 @@ vm_offset_t kalloc(vm_size_t size) if ((buf != 0) && (cache->flags & KMEM_CF_VERIFY)) kalloc_verify(cache, buf, size); - } else - buf = (void *)kalloc_pagealloc(size); + } else { + struct vm_page *page; + + page = vm_page_alloc_p(vm_page_order(size), VM_PAGE_SEL_DIRECTMAP, + VM_PAGE_KERNEL); + + if (page == NULL) + return (vm_offset_t) 0; + + buf = vm_page_direct_ptr(page); + } return (vm_offset_t)buf; } @@ -1343,7 +1277,10 @@ void kfree(vm_offset_t data, vm_size_t size) kmem_cache_free(cache, data); } else { - kalloc_pagefree(data, size); + struct vm_page *page; + + page = vm_page_lookup_pa(vm_page_direct_pa((unsigned long)data)); + vm_page_free_p(page, vm_page_order(size)); } } diff --git a/kern/slab.h b/kern/slab.h index 52aa11b..6f5cc18 100644 --- a/kern/slab.h +++ b/kern/slab.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011 Free Software Foundation. + * Copyright (c) 2011-2015 Free Software Foundation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -69,14 +69,6 @@ struct kmem_cache; */ typedef void (*kmem_ctor_fn_t)(void *); -/* - * Types for slab allocation/free functions. - * - * All addresses and sizes must be page-aligned. - */ -typedef vm_offset_t (*kmem_slab_alloc_fn_t)(vm_size_t); -typedef void (*kmem_slab_free_fn_t)(vm_offset_t, vm_size_t); - #include <kern/slab_i.h> /* @@ -86,11 +78,6 @@ typedef struct kmem_cache *kmem_cache_t; #define KMEM_CACHE_NULL ((kmem_cache_t) 0) /* - * VM submap for slab allocations. - */ -extern vm_map_t kmem_map; - -/* * Cache initialization flags. */ #define KMEM_CACHE_NOCPUPOOL 0x1 /* Don't use the per-cpu pools */ @@ -99,14 +86,10 @@ extern vm_map_t kmem_map; /* * Initialize a cache. - * - * If a slab allocation/free function pointer is NULL, the default backend - * (vm_kmem on the kernel map) is used for the allocation/free action. */ void kmem_cache_init(struct kmem_cache *cache, const char *name, size_t obj_size, size_t align, kmem_ctor_fn_t ctor, - kmem_slab_alloc_fn_t slab_alloc_fn, - kmem_slab_free_fn_t slab_free_fn, int flags); + int flags); /* * Allocate an object from a cache. diff --git a/kern/slab_i.h b/kern/slab_i.h index 06da0ea..f8dc901 100644 --- a/kern/slab_i.h +++ b/kern/slab_i.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2011, 2012, 2013 Richard Braun. + * Copyright (c) 2010-2014 Richard Braun. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -197,6 +197,7 @@ struct kmem_cache { int flags; size_t bufctl_dist; /* Distance from buffer to bufctl */ size_t slab_size; + unsigned int slab_order; unsigned long bufs_per_slab; unsigned long nr_objs; /* Number of allocated objects */ unsigned long nr_free_slabs; @@ -210,8 +211,6 @@ struct kmem_cache { size_t color_max; unsigned long nr_bufs; /* Total number of buffers */ unsigned long nr_slabs; - kmem_slab_alloc_fn_t slab_alloc_fn; - kmem_slab_free_fn_t slab_free_fn; char name[KMEM_CACHE_NAME_SIZE]; size_t buftag_dist; /* Distance from buffer to buftag */ size_t redzone_pad; /* Bytes from end of object to redzone word */ diff --git a/kern/task.c b/kern/task.c index 357fb7f..08f6677 100644 --- a/kern/task.c +++ b/kern/task.c @@ -62,8 +62,10 @@ ipc_port_t new_task_notification = NULL; void task_init(void) { - kmem_cache_init(&task_cache, "task", sizeof(struct task), 0, - NULL, NULL, NULL, 0); + kmem_cache_init (&task_cache, + "task", + sizeof(struct task), 0, + NULL, 0); eml_init(); machine_task_module_init (); diff --git a/kern/thread.c b/kern/thread.c index 8676132..9c82af5 100644 --- a/kern/thread.c +++ b/kern/thread.c @@ -223,7 +223,7 @@ void stack_privilege( void thread_init(void) { kmem_cache_init(&thread_cache, "thread", sizeof(struct thread), 0, - NULL, NULL, NULL, 0); + NULL, 0); /* * Kernel stacks should be naturally aligned, so that it @@ -232,7 +232,7 @@ void thread_init(void) */ kmem_cache_init(&stack_cache, "stack", KERNEL_STACK_SIZE, KERNEL_STACK_SIZE, - NULL, NULL, NULL, 0); + NULL, 0); /* * Fill in a template thread for fast initialization. |