diff options
author | Richard Braun <rbraun@sceen.net> | 2014-12-10 21:52:40 +0100 |
---|---|---|
committer | Justus Winter <4winter@informatik.uni-hamburg.de> | 2015-09-14 14:45:06 +0200 |
commit | 93833bb0fe0d1e87112a944d0ae0288ba695fa51 (patch) | |
tree | 77881ee1b3c1f01fc1b51d566ca30655e7d8d378 /vm/vm_map.c | |
parent | ee90a004a1a20a9c12dd45b896bfe8ffff2746a0 (diff) |
kern/slab: directmap update
The main impact of the direct physical mapping on the kmem module is the
slab size computation. The page allocator requires the allocation size
to be a power-of-two above the page size since it uses the buddy memory
allocation algorithm.
Custom slab allocation functions are no longer needed since the only
user was the kentry area, which has been removed recently.
The KMEM_CACHE_NOCPUPOOL flag is also no longer needed since CPU pools,
which are allocated from a kmem cache, can now always be allocated out
of the direct physical mapping.
Diffstat (limited to 'vm/vm_map.c')
-rw-r--r-- | vm/vm_map.c | 47 |
1 files changed, 15 insertions, 32 deletions
diff --git a/vm/vm_map.c b/vm/vm_map.c index 0c888b6..1b331d6 100644 --- a/vm/vm_map.c +++ b/vm/vm_map.c @@ -152,44 +152,27 @@ vm_object_t vm_submap_object = &vm_submap_object_store; * vm_map_cache: used to allocate maps. * vm_map_entry_cache: used to allocate map entries. * vm_map_kentry_cache: used to allocate map entries for the kernel. - * - * Kernel map entries are allocated from a special cache, using a custom - * page allocation function to avoid recursion. It would be difficult - * (perhaps impossible) for the kernel to allocate more memory to an entry - * cache when it became empty since the very act of allocating memory - * implies the creation of a new entry. */ -vm_offset_t kentry_data; -vm_size_t kentry_data_size = KENTRY_DATA_SIZE; - -static vm_offset_t kentry_pagealloc(vm_size_t size) -{ - vm_offset_t result; - - if (size > kentry_data_size) - panic("vm_map: kentry memory exhausted"); - - result = kentry_data; - kentry_data += size; - kentry_data_size -= size; - return result; -} - void vm_map_init(void) { - kmem_cache_init(&vm_map_cache, "vm_map", sizeof(struct vm_map), 0, - NULL, NULL, NULL, 0); - kmem_cache_init(&vm_map_entry_cache, "vm_map_entry", - sizeof(struct vm_map_entry), 0, NULL, NULL, NULL, 0); -#if 0 + kmem_cache_init (&vm_map_cache, + "vm_map", + sizeof(struct vm_map), 0, + NULL, 0); + kmem_cache_init (&vm_map_entry_cache, + "vm_map_entry", + sizeof(struct vm_map_entry), 0, + NULL, 0); +#if 1 kmem_cache_init(&vm_map_kentry_cache, "vm_map_kentry", - sizeof(struct vm_map_entry), 0, NULL, kentry_pagealloc, - NULL, KMEM_CACHE_NOCPUPOOL | KMEM_CACHE_NOOFFSLAB - | KMEM_CACHE_NORECLAIM); + sizeof(struct vm_map_entry), 0, + NULL, KMEM_CACHE_NOCPUPOOL | KMEM_CACHE_NOOFFSLAB); #endif - kmem_cache_init(&vm_map_copy_cache, "vm_map_copy", - sizeof(struct vm_map_copy), 0, NULL, NULL, NULL, 0); + kmem_cache_init (&vm_map_copy_cache, + "vm_map_copy", + sizeof(struct vm_map_copy), 0, + NULL, 0); /* * Submap object is initialized by vm_object_init. |