diff options
-rw-r--r-- | device/dev_lookup.c | 6 | ||||
-rw-r--r-- | device/dev_pager.c | 12 | ||||
-rw-r--r-- | device/ds_routines.c | 12 | ||||
-rw-r--r-- | device/net_io.c | 12 | ||||
-rw-r--r-- | i386/i386/fpu.c | 7 | ||||
-rw-r--r-- | i386/i386/machine_task.c | 6 | ||||
-rw-r--r-- | i386/i386/pcb.c | 6 | ||||
-rw-r--r-- | i386/intel/pmap.c | 10 | ||||
-rw-r--r-- | ipc/ipc_init.c | 30 | ||||
-rw-r--r-- | ipc/ipc_marequest.c | 6 | ||||
-rw-r--r-- | kern/act.c | 6 | ||||
-rw-r--r-- | kern/kalloc.h | 2 | ||||
-rw-r--r-- | kern/processor.c | 6 | ||||
-rw-r--r-- | kern/rdxtree.c | 6 | ||||
-rw-r--r-- | kern/slab.c | 173 | ||||
-rw-r--r-- | kern/slab.h | 21 | ||||
-rw-r--r-- | kern/slab_i.h | 5 | ||||
-rw-r--r-- | kern/task.c | 6 | ||||
-rw-r--r-- | kern/thread.c | 4 | ||||
-rw-r--r-- | vm/memory_object_proxy.c | 6 | ||||
-rw-r--r-- | vm/vm_external.c | 24 | ||||
-rw-r--r-- | vm/vm_fault.c | 6 | ||||
-rw-r--r-- | vm/vm_init.c | 2 | ||||
-rw-r--r-- | vm/vm_map.c | 47 | ||||
-rw-r--r-- | vm/vm_object.c | 6 | ||||
-rw-r--r-- | vm/vm_resident.c | 6 |
26 files changed, 193 insertions, 240 deletions
diff --git a/device/dev_lookup.c b/device/dev_lookup.c index 297dcde..f6fc0d4 100644 --- a/device/dev_lookup.c +++ b/device/dev_lookup.c @@ -365,6 +365,8 @@ dev_lookup_init(void) for (i = 0; i < NDEVHASH; i++) queue_init(&dev_number_hash_table[i]); - kmem_cache_init(&dev_hdr_cache, "mach_device", - sizeof(struct mach_device), 0, NULL, NULL, NULL, 0); + kmem_cache_init (&dev_hdr_cache, + "mach_device", + sizeof(struct mach_device), 0, + NULL, 0); } diff --git a/device/dev_pager.c b/device/dev_pager.c index 815473a..0ffcdf5 100644 --- a/device/dev_pager.c +++ b/device/dev_pager.c @@ -172,8 +172,10 @@ void dev_pager_hash_init(void) vm_size_t size; size = sizeof(struct dev_pager_entry); - kmem_cache_init(&dev_pager_hash_cache, "dev_pager_entry", size, 0, - NULL, NULL, NULL, 0); + kmem_cache_init (&dev_pager_hash_cache, + "dev_pager_entry", + size, 0, + NULL, 0); for (i = 0; i < DEV_PAGER_HASH_COUNT; i++) queue_init(&dev_pager_hashtable[i]); simple_lock_init(&dev_pager_hash_lock); @@ -704,8 +706,10 @@ void device_pager_init(void) * Initialize cache of paging structures. */ size = sizeof(struct dev_pager); - kmem_cache_init(&dev_pager_cache, "dev_pager", size, 0, - NULL, NULL, NULL, 0); + kmem_cache_init (&dev_pager_cache, + "dev_pager", + size, 0, + NULL, 0); /* * Initialize the name port hashing stuff. diff --git a/device/ds_routines.c b/device/ds_routines.c index 33cfd89..e3502f7 100644 --- a/device/ds_routines.c +++ b/device/ds_routines.c @@ -1553,8 +1553,10 @@ void mach_device_init(void) */ device_io_map->wait_for_space = TRUE; - kmem_cache_init(&io_inband_cache, "io_buf_ptr_inband", - sizeof(io_buf_ptr_inband_t), 0, NULL, NULL, NULL, 0); + kmem_cache_init (&io_inband_cache, + "io_buf_ptr_inband", + sizeof(io_buf_ptr_inband_t), 0, + NULL, 0); mach_device_trap_init(); } @@ -1597,8 +1599,10 @@ struct kmem_cache io_trap_cache; static void mach_device_trap_init(void) { - kmem_cache_init(&io_trap_cache, "io_req", IOTRAP_REQSIZE, 0, - NULL, NULL, NULL, 0); + kmem_cache_init (&io_trap_cache, + "io_req", + IOTRAP_REQSIZE, 0, + NULL, 0); } /* diff --git a/device/net_io.c b/device/net_io.c index 12a1e9c..67d849a 100644 --- a/device/net_io.c +++ b/device/net_io.c @@ -1494,12 +1494,16 @@ net_io_init(void) vm_size_t size; size = sizeof(struct net_rcv_port); - kmem_cache_init(&net_rcv_cache, "net_rcv_port", size, 0, - NULL, NULL, NULL, 0); + kmem_cache_init (&net_rcv_cache, + "net_rcv_port", + size, 0, + NULL, 0); size = sizeof(struct net_hash_entry); - kmem_cache_init(&net_hash_entry_cache, "net_hash_entry", size, 0, - NULL, NULL, NULL, 0); + kmem_cache_init (&net_hash_entry_cache, + "net_hash_entry", + size, 0, + NULL, 0); size = ikm_plus_overhead(sizeof(struct net_rcv_msg)); net_kmsg_size = round_page(size); diff --git a/i386/i386/fpu.c b/i386/i386/fpu.c index 0f34833..476e1de 100644 --- a/i386/i386/fpu.c +++ b/i386/i386/fpu.c @@ -187,9 +187,10 @@ init_fpu(void) void fpu_module_init(void) { - kmem_cache_init(&ifps_cache, "i386_fpsave_state", - sizeof(struct i386_fpsave_state), 16, - NULL, NULL, NULL, 0); + kmem_cache_init (&ifps_cache, + "i386_fpsave_state", + sizeof(struct i386_fpsave_state), 16, + NULL, 0); } /* diff --git a/i386/i386/machine_task.c b/i386/i386/machine_task.c index 490b102..a514086 100644 --- a/i386/i386/machine_task.c +++ b/i386/i386/machine_task.c @@ -37,8 +37,10 @@ struct kmem_cache machine_task_iopb_cache; void machine_task_module_init (void) { - kmem_cache_init (&machine_task_iopb_cache, "i386_task_iopb", IOPB_BYTES, 0, - NULL, NULL, NULL, 0); + kmem_cache_init (&machine_task_iopb_cache, + "i386_task_iopb", + IOPB_BYTES, 0, + NULL, 0); } diff --git a/i386/i386/pcb.c b/i386/i386/pcb.c index 3a0eba0..4199f78 100644 --- a/i386/i386/pcb.c +++ b/i386/i386/pcb.c @@ -370,8 +370,10 @@ thread_t switch_context( void pcb_module_init(void) { - kmem_cache_init(&pcb_cache, "pcb", sizeof(struct pcb), 0, - NULL, NULL, NULL, 0); + kmem_cache_init (&pcb_cache, + "pcb", + sizeof(struct pcb), 0, + NULL, 0); fpu_module_init(); } diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c index cf7a736..3978303 100644 --- a/i386/intel/pmap.c +++ b/i386/intel/pmap.c @@ -977,9 +977,15 @@ void pmap_init(void) * and of the physical-to-virtual entries. */ s = (vm_size_t) sizeof(struct pmap); - kmem_cache_init(&pmap_cache, "pmap", s, 0, NULL, NULL, NULL, 0); + kmem_cache_init (&pmap_cache, + "pmap", + s, 0, + NULL, 0); s = (vm_size_t) sizeof(struct pv_entry); - kmem_cache_init(&pv_list_cache, "pv_entry", s, 0, NULL, NULL, NULL, 0); + kmem_cache_init (&pv_list_cache, + "pv_entry", + s, 0, + NULL, 0); #if NCPUS > 1 /* diff --git a/ipc/ipc_init.c b/ipc/ipc_init.c index 2c58a6e..b7483d4 100644 --- a/ipc/ipc_init.c +++ b/ipc/ipc_init.c @@ -72,17 +72,25 @@ ipc_bootstrap(void) ipc_port_timestamp_lock_init(); ipc_port_timestamp_data = 0; - kmem_cache_init(&ipc_space_cache, "ipc_space", - sizeof(struct ipc_space), 0, NULL, NULL, NULL, 0); - - kmem_cache_init(&ipc_entry_cache, "ipc_entry", - sizeof(struct ipc_entry), 0, NULL, NULL, NULL, 0); - - kmem_cache_init(&ipc_object_caches[IOT_PORT], "ipc_port", - sizeof(struct ipc_port), 0, NULL, NULL, NULL, 0); - - kmem_cache_init(&ipc_object_caches[IOT_PORT_SET], "ipc_pset", - sizeof(struct ipc_pset), 0, NULL, NULL, NULL, 0); + kmem_cache_init (&ipc_space_cache, + "ipc_space", + sizeof(struct ipc_space), 0, + NULL, 0); + + kmem_cache_init (&ipc_entry_cache, + "ipc_entry", + sizeof(struct ipc_entry), 0, + NULL, 0); + + kmem_cache_init (&ipc_object_caches[IOT_PORT], + "ipc_port", + sizeof(struct ipc_port), 0, + NULL, 0); + + kmem_cache_init (&ipc_object_caches[IOT_PORT_SET], + "ipc_pset", + sizeof(struct ipc_pset), 0, + NULL, 0); /* create special spaces */ diff --git a/ipc/ipc_marequest.c b/ipc/ipc_marequest.c index ded1711..ab4d8fe 100644 --- a/ipc/ipc_marequest.c +++ b/ipc/ipc_marequest.c @@ -136,8 +136,10 @@ ipc_marequest_init(void) bucket->imarb_head = IMAR_NULL; } - kmem_cache_init(&ipc_marequest_cache, "ipc_marequest", - sizeof(struct ipc_marequest), 0, NULL, NULL, NULL, 0); + kmem_cache_init (&ipc_marequest_cache, + "ipc_marequest", + sizeof(struct ipc_marequest), 0, + NULL, 0); } /* @@ -67,8 +67,10 @@ void global_act_init(void) { #ifndef ACT_STATIC_KLUDGE - kmem_cache_init(&act_cache, "Act", sizeof(struct Act), 0, - NULL, NULL, NULL, 0); + kmem_cache_init (&act_cache, + "Act", + sizeof(struct Act), 0, + NULL, 0); #else int i; diff --git a/kern/kalloc.h b/kern/kalloc.h index 004e3a6..c378873 100644 --- a/kern/kalloc.h +++ b/kern/kalloc.h @@ -33,6 +33,4 @@ extern vm_offset_t kalloc (vm_size_t size); extern void kfree (vm_offset_t data, vm_size_t size); -extern void kalloc_init (void); - #endif /* _KERN_KALLOC_H_ */ diff --git a/kern/processor.c b/kern/processor.c index bc70dde..f2f9a2d 100644 --- a/kern/processor.c +++ b/kern/processor.c @@ -108,8 +108,10 @@ void pset_sys_init(void) /* * Allocate the cache for processor sets. */ - kmem_cache_init(&pset_cache, "processor_set", - sizeof(struct processor_set), 0, NULL, NULL, NULL, 0); + kmem_cache_init (&pset_cache, + "processor_set", + sizeof(struct processor_set), 0, + NULL, 0); /* * Give each processor a control port. diff --git a/kern/rdxtree.c b/kern/rdxtree.c index 78868b1..83ba750 100644 --- a/kern/rdxtree.c +++ b/kern/rdxtree.c @@ -123,8 +123,10 @@ static struct kmem_cache rdxtree_node_cache; void rdxtree_cache_init(void) { - kmem_cache_init(&rdxtree_node_cache, "rdxtree_node", - sizeof(struct rdxtree_node), 0, NULL, NULL, NULL, 0); + kmem_cache_init (&rdxtree_node_cache, + "rdxtree_node", + sizeof(struct rdxtree_node), 0, + NULL, 0); } #ifdef RDXTREE_ENABLE_NODE_CREATION_FAILURES diff --git a/kern/slab.c b/kern/slab.c index ca8b44a..42af6a3 100644 --- a/kern/slab.c +++ b/kern/slab.c @@ -53,14 +53,11 @@ * * The per-cache self-scaling hash table for buffer-to-bufctl conversion, * described in 3.2.3 "Slab Layout for Large Objects", has been replaced with - * a constant time buffer-to-slab lookup that relies on the VM system. When - * slabs are created, their backing virtual pages are mapped to physical pages - * that are never aliased by other virtual addresses (unless explicitely by - * other kernel code). The lookup operation provides the associated physical - * page descriptor which can store data private to this allocator. The main - * drawback of this method is that it needs to walk the low level page tables, - * but it's expected that these have already been cached by the CPU due to - * prior access by the allocator user, depending on the hardware properties. + * a constant time buffer-to-slab lookup that relies on the VM system. Slabs + * are allocated from the direct mapping of physical memory, which enables + * the retrieval of physical addresses backing slabs with a simple shift. + * Physical addresses are then used to find page descriptors, which store + * data private to this allocator. * * This implementation uses per-cpu pools of objects, which service most * allocation requests. These pools act as caches (but are named differently @@ -81,6 +78,7 @@ #include <mach/vm_param.h> #include <mach/machine/vm_types.h> #include <vm/vm_kern.h> +#include <vm/vm_page.h> #include <vm/vm_types.h> #include <sys/types.h> @@ -135,9 +133,9 @@ #define KMEM_CPU_POOL_TRANSFER_RATIO 2 /* - * Size of the VM submap from which default backend functions allocate. + * Logarithm of the size of the smallest general cache. */ -#define KMEM_MAP_SIZE (96 * 1024 * 1024) +#define KMEM_CACHES_FIRST_ORDER 5 /* * Shift for the first kalloc cache size. @@ -204,12 +202,6 @@ static unsigned int kmem_nr_caches; static simple_lock_data_t __attribute__((used)) kmem_cache_list_lock; /* - * VM submap for slab caches. - */ -static struct vm_map kmem_map_store; -vm_map_t kmem_map = &kmem_map_store; - -/* * Time of the last memory reclaim, in clock ticks. */ static unsigned long kmem_gc_last_tick; @@ -310,30 +302,12 @@ static inline void * kmem_bufctl_to_buf(union kmem_bufctl *bufctl, return (void *)bufctl - cache->bufctl_dist; } -static vm_offset_t kmem_pagealloc(vm_size_t size) -{ - vm_offset_t addr; - kern_return_t kr; - - kr = kmem_alloc_wired(kmem_map, &addr, size); - - if (kr != KERN_SUCCESS) - return 0; - - return addr; -} - -static void kmem_pagefree(vm_offset_t ptr, vm_size_t size) -{ - kmem_free(kmem_map, ptr, size); -} - static void kmem_slab_create_verify(struct kmem_slab *slab, struct kmem_cache *cache) { struct kmem_buftag *buftag; - size_t buf_size; unsigned long buffers; + size_t buf_size; void *buf; buf_size = cache->buf_size; @@ -356,29 +330,26 @@ static void kmem_slab_create_verify(struct kmem_slab *slab, static struct kmem_slab * kmem_slab_create(struct kmem_cache *cache, size_t color) { + struct vm_page *page; struct kmem_slab *slab; union kmem_bufctl *bufctl; size_t buf_size; unsigned long buffers; void *slab_buf; - if (cache->slab_alloc_fn == NULL) - slab_buf = (void *)kmem_pagealloc(cache->slab_size); - else - slab_buf = (void *)cache->slab_alloc_fn(cache->slab_size); + page = vm_page_alloc_p(cache->slab_order, VM_PAGE_SEL_DIRECTMAP, + VM_PAGE_KMEM); - if (slab_buf == NULL) + if (page == NULL) return NULL; + slab_buf = vm_page_direct_ptr(page); + if (cache->flags & KMEM_CF_SLAB_EXTERNAL) { slab = (struct kmem_slab *)kmem_cache_alloc(&kmem_slab_cache); if (slab == NULL) { - if (cache->slab_free_fn == NULL) - kmem_pagefree((vm_offset_t)slab_buf, cache->slab_size); - else - cache->slab_free_fn((vm_offset_t)slab_buf, cache->slab_size); - + vm_page_free_p(page, cache->slab_order); return NULL; } } else { @@ -438,21 +409,12 @@ static void kmem_slab_destroy_verify(struct kmem_slab *slab, */ static void kmem_slab_destroy(struct kmem_slab *slab, struct kmem_cache *cache) { - vm_offset_t slab_buf; - assert(slab->nr_refs == 0); assert(slab->first_free != NULL); if (cache->flags & KMEM_CF_VERIFY) kmem_slab_destroy_verify(slab, cache); - slab_buf = (vm_offset_t)P2ALIGN((unsigned long)slab->addr, PAGE_SIZE); - - if (cache->slab_free_fn == NULL) - kmem_pagefree(slab_buf, cache->slab_size); - else - cache->slab_free_fn(slab_buf, cache->slab_size); - if (cache->flags & KMEM_CF_SLAB_EXTERNAL) kmem_cache_free(&kmem_slab_cache, (vm_offset_t)slab); } @@ -473,7 +435,7 @@ kmem_slab_vmref(struct kmem_slab *slab, size_t size) end = va + size; do { - page = vm_kmem_lookup_page(va); + page = vm_page_lookup_pa(vm_page_direct_pa(va)); assert(page != NULL); assert(page->slab_priv == NULL); page->slab_priv = slab; @@ -628,7 +590,8 @@ static void kmem_cache_compute_sizes(struct kmem_cache *cache, int flags) { size_t i, buffers, buf_size, slab_size, free_slab_size, optimal_size = 0; size_t waste, waste_min; - int embed, optimal_embed = 0; + int embed, optimal_embed = optimal_embed; + unsigned int slab_order, optimal_order = optimal_order; buf_size = cache->buf_size; @@ -640,7 +603,9 @@ static void kmem_cache_compute_sizes(struct kmem_cache *cache, int flags) do { i++; - slab_size = P2ROUND(i * buf_size, PAGE_SIZE); + + slab_order = vm_page_order(i * buf_size); + slab_size = PAGE_SIZE << slab_order; free_slab_size = slab_size; if (flags & KMEM_CACHE_NOOFFSLAB) @@ -663,6 +628,7 @@ static void kmem_cache_compute_sizes(struct kmem_cache *cache, int flags) if (waste <= waste_min) { waste_min = waste; + optimal_order = slab_order; optimal_size = slab_size; optimal_embed = embed; } @@ -672,10 +638,10 @@ static void kmem_cache_compute_sizes(struct kmem_cache *cache, int flags) assert(optimal_size > 0); assert(!(flags & KMEM_CACHE_NOOFFSLAB) || optimal_embed); + cache->slab_order = optimal_order; cache->slab_size = optimal_size; - slab_size = cache->slab_size - (optimal_embed - ? sizeof(struct kmem_slab) - : 0); + slab_size = cache->slab_size + - (optimal_embed ? sizeof(struct kmem_slab) : 0); cache->bufs_per_slab = slab_size / buf_size; cache->color_max = slab_size % buf_size; @@ -690,10 +656,9 @@ static void kmem_cache_compute_sizes(struct kmem_cache *cache, int flags) } } -void kmem_cache_init(struct kmem_cache *cache, const char *name, - size_t obj_size, size_t align, kmem_ctor_fn_t ctor, - kmem_slab_alloc_fn_t slab_alloc_fn, - kmem_slab_free_fn_t slab_free_fn, int flags) +void +kmem_cache_init(struct kmem_cache *cache, const char *name, size_t obj_size, + size_t align, kmem_ctor_fn_t ctor, int flags) { #if SLAB_USE_CPU_POOLS struct kmem_cpu_pool_type *cpu_pool_type; @@ -735,8 +700,6 @@ void kmem_cache_init(struct kmem_cache *cache, const char *name, cache->nr_slabs = 0; cache->nr_free_slabs = 0; cache->ctor = ctor; - cache->slab_alloc_fn = slab_alloc_fn; - cache->slab_free_fn = slab_free_fn; strncpy(cache->name, name, sizeof(cache->name)); cache->name[sizeof(cache->name) - 1] = '\0'; cache->buftag_dist = 0; @@ -905,7 +868,7 @@ static void kmem_cache_free_to_slab(struct kmem_cache *cache, void *buf) } else { struct vm_page *page; - page = vm_kmem_lookup_page((unsigned long)buf); + page = vm_page_lookup_pa(vm_page_direct_pa((unsigned long)buf)); assert(page != NULL); slab = page->slab_priv; assert(slab != NULL); @@ -1194,16 +1157,13 @@ void slab_bootstrap(void) void slab_init(void) { - vm_offset_t min, max; + size_t i, size; + char name[KMEM_CACHE_NAME_SIZE]; #if SLAB_USE_CPU_POOLS struct kmem_cpu_pool_type *cpu_pool_type; - char name[KMEM_CACHE_NAME_SIZE]; - size_t i, size; #endif /* SLAB_USE_CPU_POOLS */ - kmem_submap(kmem_map, kernel_map, &min, &max, KMEM_MAP_SIZE, FALSE); - #if SLAB_USE_CPU_POOLS for (i = 0; i < ARRAY_SIZE(kmem_cpu_pool_types); i++) { cpu_pool_type = &kmem_cpu_pool_types[i]; @@ -1211,7 +1171,7 @@ void slab_init(void) sprintf(name, "kmem_cpu_array_%d", cpu_pool_type->array_size); size = sizeof(void *) * cpu_pool_type->array_size; kmem_cache_init(cpu_pool_type->array_cache, name, size, - cpu_pool_type->array_align, NULL, NULL, NULL, 0); + cpu_pool_type->array_align, NULL, 0); } #endif /* SLAB_USE_CPU_POOLS */ @@ -1219,56 +1179,21 @@ void slab_init(void) * Prevent off slab data for the slab cache to avoid infinite recursion. */ kmem_cache_init(&kmem_slab_cache, "kmem_slab", sizeof(struct kmem_slab), - 0, NULL, NULL, NULL, KMEM_CACHE_NOOFFSLAB); -} - -static vm_offset_t kalloc_pagealloc(vm_size_t size) -{ - vm_offset_t addr; - kern_return_t kr; - - kr = kmem_alloc_wired(kmem_map, &addr, size); - - if (kr != KERN_SUCCESS) - return 0; - - return addr; -} - -static void kalloc_pagefree(vm_offset_t ptr, vm_size_t size) -{ - kmem_free(kmem_map, ptr, size); -} - -void kalloc_init(void) -{ - char name[KMEM_CACHE_NAME_SIZE]; - size_t i, size; + 0, NULL, KMEM_CACHE_NOOFFSLAB); - size = 1 << KALLOC_FIRST_SHIFT; + size = 1 << KMEM_CACHES_FIRST_ORDER; for (i = 0; i < ARRAY_SIZE(kalloc_caches); i++) { - sprintf(name, "kalloc_%lu", size); - kmem_cache_init(&kalloc_caches[i], name, size, 0, NULL, - kalloc_pagealloc, kalloc_pagefree, 0); + sprintf(name, "kmem_%zu", size); + kmem_cache_init(&kalloc_caches[i], name, size, 0, NULL, 0); size <<= 1; } } -/* - * Return the kalloc cache index matching the given allocation size, which - * must be strictly greater than 0. - */ -static inline size_t kalloc_get_index(unsigned long size) +static inline size_t +kalloc_get_index(unsigned long size) { - assert(size != 0); - - size = (size - 1) >> KALLOC_FIRST_SHIFT; - - if (size == 0) - return 0; - else - return (sizeof(long) * 8) - __builtin_clzl(size); + return iorder2(size) - KMEM_CACHES_FIRST_ORDER; } static void kalloc_verify(struct kmem_cache *cache, void *buf, size_t size) @@ -1301,8 +1226,17 @@ vm_offset_t kalloc(vm_size_t size) if ((buf != 0) && (cache->flags & KMEM_CF_VERIFY)) kalloc_verify(cache, buf, size); - } else - buf = (void *)kalloc_pagealloc(size); + } else { + struct vm_page *page; + + page = vm_page_alloc_p(vm_page_order(size), VM_PAGE_SEL_DIRECTMAP, + VM_PAGE_KERNEL); + + if (page == NULL) + return (vm_offset_t) 0; + + buf = vm_page_direct_ptr(page); + } return (vm_offset_t)buf; } @@ -1343,7 +1277,10 @@ void kfree(vm_offset_t data, vm_size_t size) kmem_cache_free(cache, data); } else { - kalloc_pagefree(data, size); + struct vm_page *page; + + page = vm_page_lookup_pa(vm_page_direct_pa((unsigned long)data)); + vm_page_free_p(page, vm_page_order(size)); } } diff --git a/kern/slab.h b/kern/slab.h index 52aa11b..6f5cc18 100644 --- a/kern/slab.h +++ b/kern/slab.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011 Free Software Foundation. + * Copyright (c) 2011-2015 Free Software Foundation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -69,14 +69,6 @@ struct kmem_cache; */ typedef void (*kmem_ctor_fn_t)(void *); -/* - * Types for slab allocation/free functions. - * - * All addresses and sizes must be page-aligned. - */ -typedef vm_offset_t (*kmem_slab_alloc_fn_t)(vm_size_t); -typedef void (*kmem_slab_free_fn_t)(vm_offset_t, vm_size_t); - #include <kern/slab_i.h> /* @@ -86,11 +78,6 @@ typedef struct kmem_cache *kmem_cache_t; #define KMEM_CACHE_NULL ((kmem_cache_t) 0) /* - * VM submap for slab allocations. - */ -extern vm_map_t kmem_map; - -/* * Cache initialization flags. */ #define KMEM_CACHE_NOCPUPOOL 0x1 /* Don't use the per-cpu pools */ @@ -99,14 +86,10 @@ extern vm_map_t kmem_map; /* * Initialize a cache. - * - * If a slab allocation/free function pointer is NULL, the default backend - * (vm_kmem on the kernel map) is used for the allocation/free action. */ void kmem_cache_init(struct kmem_cache *cache, const char *name, size_t obj_size, size_t align, kmem_ctor_fn_t ctor, - kmem_slab_alloc_fn_t slab_alloc_fn, - kmem_slab_free_fn_t slab_free_fn, int flags); + int flags); /* * Allocate an object from a cache. diff --git a/kern/slab_i.h b/kern/slab_i.h index 06da0ea..f8dc901 100644 --- a/kern/slab_i.h +++ b/kern/slab_i.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2011, 2012, 2013 Richard Braun. + * Copyright (c) 2010-2014 Richard Braun. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -197,6 +197,7 @@ struct kmem_cache { int flags; size_t bufctl_dist; /* Distance from buffer to bufctl */ size_t slab_size; + unsigned int slab_order; unsigned long bufs_per_slab; unsigned long nr_objs; /* Number of allocated objects */ unsigned long nr_free_slabs; @@ -210,8 +211,6 @@ struct kmem_cache { size_t color_max; unsigned long nr_bufs; /* Total number of buffers */ unsigned long nr_slabs; - kmem_slab_alloc_fn_t slab_alloc_fn; - kmem_slab_free_fn_t slab_free_fn; char name[KMEM_CACHE_NAME_SIZE]; size_t buftag_dist; /* Distance from buffer to buftag */ size_t redzone_pad; /* Bytes from end of object to redzone word */ diff --git a/kern/task.c b/kern/task.c index 357fb7f..08f6677 100644 --- a/kern/task.c +++ b/kern/task.c @@ -62,8 +62,10 @@ ipc_port_t new_task_notification = NULL; void task_init(void) { - kmem_cache_init(&task_cache, "task", sizeof(struct task), 0, - NULL, NULL, NULL, 0); + kmem_cache_init (&task_cache, + "task", + sizeof(struct task), 0, + NULL, 0); eml_init(); machine_task_module_init (); diff --git a/kern/thread.c b/kern/thread.c index 8676132..9c82af5 100644 --- a/kern/thread.c +++ b/kern/thread.c @@ -223,7 +223,7 @@ void stack_privilege( void thread_init(void) { kmem_cache_init(&thread_cache, "thread", sizeof(struct thread), 0, - NULL, NULL, NULL, 0); + NULL, 0); /* * Kernel stacks should be naturally aligned, so that it @@ -232,7 +232,7 @@ void thread_init(void) */ kmem_cache_init(&stack_cache, "stack", KERNEL_STACK_SIZE, KERNEL_STACK_SIZE, - NULL, NULL, NULL, 0); + NULL, 0); /* * Fill in a template thread for fast initialization. diff --git a/vm/memory_object_proxy.c b/vm/memory_object_proxy.c index a64bfcc..6346b5b 100644 --- a/vm/memory_object_proxy.c +++ b/vm/memory_object_proxy.c @@ -63,8 +63,10 @@ typedef struct memory_object_proxy *memory_object_proxy_t; void memory_object_proxy_init (void) { - kmem_cache_init (&memory_object_proxy_cache, "memory_object_proxy", - sizeof (struct memory_object_proxy), 0, NULL, NULL, NULL, 0); + kmem_cache_init (&memory_object_proxy_cache, + "memory_object_proxy", + sizeof(struct memory_object_proxy), 0, + NULL, 0); } /* Lookup a proxy memory object by its port. */ diff --git a/vm/vm_external.c b/vm/vm_external.c index 2e2593b..03ebcfb 100644 --- a/vm/vm_external.c +++ b/vm/vm_external.c @@ -137,14 +137,18 @@ void vm_external_module_initialize(void) { vm_size_t size = (vm_size_t) sizeof(struct vm_external); - kmem_cache_init(&vm_external_cache, "vm_external", size, 0, - NULL, NULL, NULL, 0); - - kmem_cache_init(&vm_object_small_existence_map_cache, - "small_existence_map", SMALL_SIZE, 0, - NULL, NULL, NULL, 0); - - kmem_cache_init(&vm_object_large_existence_map_cache, - "large_existence_map", LARGE_SIZE, 0, - NULL, NULL, NULL, 0); + kmem_cache_init (&vm_external_cache, + "vm_external", + size, 0, + NULL, 0); + + kmem_cache_init (&vm_object_small_existence_map_cache, + "small_existence_map", + SMALL_SIZE, 0, + NULL, 0); + + kmem_cache_init (&vm_object_large_existence_map_cache, + "large_existence_map", + LARGE_SIZE, 0, + NULL, 0); } diff --git a/vm/vm_fault.c b/vm/vm_fault.c index 1ed9e03..2fe74a4 100644 --- a/vm/vm_fault.c +++ b/vm/vm_fault.c @@ -104,8 +104,10 @@ extern struct db_watchpoint *db_watchpoint_list; */ void vm_fault_init(void) { - kmem_cache_init(&vm_fault_state_cache, "vm_fault_state", - sizeof(vm_fault_state_t), 0, NULL, NULL, NULL, 0); + kmem_cache_init (&vm_fault_state_cache, + "vm_fault_state", + sizeof(vm_fault_state_t), 0, + NULL, 0); } /* diff --git a/vm/vm_init.c b/vm/vm_init.c index 3d1081c..4fdcd83 100644 --- a/vm/vm_init.c +++ b/vm/vm_init.c @@ -36,7 +36,6 @@ #include <mach/machine/vm_types.h> #include <kern/slab.h> -#include <kern/kalloc.h> #include <vm/vm_fault.h> #include <vm/vm_object.h> #include <vm/vm_map.h> @@ -73,7 +72,6 @@ void vm_mem_bootstrap(void) kmem_init(start, end); pmap_init(); slab_init(); - kalloc_init(); vm_fault_init(); vm_page_module_init(); memory_manager_default_init(); diff --git a/vm/vm_map.c b/vm/vm_map.c index 0c888b6..1b331d6 100644 --- a/vm/vm_map.c +++ b/vm/vm_map.c @@ -152,44 +152,27 @@ vm_object_t vm_submap_object = &vm_submap_object_store; * vm_map_cache: used to allocate maps. * vm_map_entry_cache: used to allocate map entries. * vm_map_kentry_cache: used to allocate map entries for the kernel. - * - * Kernel map entries are allocated from a special cache, using a custom - * page allocation function to avoid recursion. It would be difficult - * (perhaps impossible) for the kernel to allocate more memory to an entry - * cache when it became empty since the very act of allocating memory - * implies the creation of a new entry. */ -vm_offset_t kentry_data; -vm_size_t kentry_data_size = KENTRY_DATA_SIZE; - -static vm_offset_t kentry_pagealloc(vm_size_t size) -{ - vm_offset_t result; - - if (size > kentry_data_size) - panic("vm_map: kentry memory exhausted"); - - result = kentry_data; - kentry_data += size; - kentry_data_size -= size; - return result; -} - void vm_map_init(void) { - kmem_cache_init(&vm_map_cache, "vm_map", sizeof(struct vm_map), 0, - NULL, NULL, NULL, 0); - kmem_cache_init(&vm_map_entry_cache, "vm_map_entry", - sizeof(struct vm_map_entry), 0, NULL, NULL, NULL, 0); -#if 0 + kmem_cache_init (&vm_map_cache, + "vm_map", + sizeof(struct vm_map), 0, + NULL, 0); + kmem_cache_init (&vm_map_entry_cache, + "vm_map_entry", + sizeof(struct vm_map_entry), 0, + NULL, 0); +#if 1 kmem_cache_init(&vm_map_kentry_cache, "vm_map_kentry", - sizeof(struct vm_map_entry), 0, NULL, kentry_pagealloc, - NULL, KMEM_CACHE_NOCPUPOOL | KMEM_CACHE_NOOFFSLAB - | KMEM_CACHE_NORECLAIM); + sizeof(struct vm_map_entry), 0, + NULL, KMEM_CACHE_NOCPUPOOL | KMEM_CACHE_NOOFFSLAB); #endif - kmem_cache_init(&vm_map_copy_cache, "vm_map_copy", - sizeof(struct vm_map_copy), 0, NULL, NULL, NULL, 0); + kmem_cache_init (&vm_map_copy_cache, + "vm_map_copy", + sizeof(struct vm_map_copy), 0, + NULL, 0); /* * Submap object is initialized by vm_object_init. diff --git a/vm/vm_object.c b/vm/vm_object.c index 836f5ef..91d5db8 100644 --- a/vm/vm_object.c +++ b/vm/vm_object.c @@ -266,8 +266,10 @@ vm_object_t vm_object_allocate( */ void vm_object_bootstrap(void) { - kmem_cache_init(&vm_object_cache, "vm_object", - sizeof(struct vm_object), 0, NULL, NULL, NULL, 0); + kmem_cache_init (&vm_object_cache, + "vm_object", + sizeof(struct vm_object), 0, + NULL, 0); queue_init(&vm_object_cached_list); vm_object_cache_lock_init(); diff --git a/vm/vm_resident.c b/vm/vm_resident.c index 4e1eed5..c9fc8c1 100644 --- a/vm/vm_resident.c +++ b/vm/vm_resident.c @@ -429,8 +429,10 @@ void pmap_startup( */ void vm_page_module_init(void) { - kmem_cache_init(&vm_page_cache, "vm_page", sizeof(struct vm_page), 0, - NULL, NULL, NULL, 0); + kmem_cache_init (&vm_page_cache, + "vm_page", + sizeof(struct vm_page), 0, + NULL, 0); } /* |