diff options
author | Richard Braun <rbraun@sceen.net> | 2014-12-10 21:52:40 +0100 |
---|---|---|
committer | Justus Winter <4winter@informatik.uni-hamburg.de> | 2015-08-15 12:43:27 +0200 |
commit | f070e81f33c7f01a70923782b5358d014712d9ef (patch) | |
tree | 8ceb5b8852c111558d394b002d71fc34bc421996 /vm | |
parent | 0b0d06c34589595cec2bea6f9b55b8e9ae51c3cf (diff) |
kern/slab: directmap update
The main impact of the direct physical mapping on the kmem module is the
slab size computation. The page allocator requires the allocation size
to be a power-of-two above the page size since it uses the buddy memory
allocation algorithm.
Custom slab allocation functions are no longer needed since the only
user was the kentry area, which has been removed recently.
The KMEM_CACHE_NOCPUPOOL flag is also no longer needed since CPU pools,
which are allocated from a kmem cache, can now always be allocated out
of the direct physical mapping.
Diffstat (limited to 'vm')
-rw-r--r-- | vm/memory_object_proxy.c | 6 | ||||
-rw-r--r-- | vm/vm_external.c | 24 | ||||
-rw-r--r-- | vm/vm_fault.c | 6 | ||||
-rw-r--r-- | vm/vm_init.c | 2 | ||||
-rw-r--r-- | vm/vm_map.c | 47 | ||||
-rw-r--r-- | vm/vm_object.c | 6 | ||||
-rw-r--r-- | vm/vm_resident.c | 6 |
7 files changed, 45 insertions, 52 deletions
diff --git a/vm/memory_object_proxy.c b/vm/memory_object_proxy.c index a64bfcc..6346b5b 100644 --- a/vm/memory_object_proxy.c +++ b/vm/memory_object_proxy.c @@ -63,8 +63,10 @@ typedef struct memory_object_proxy *memory_object_proxy_t; void memory_object_proxy_init (void) { - kmem_cache_init (&memory_object_proxy_cache, "memory_object_proxy", - sizeof (struct memory_object_proxy), 0, NULL, NULL, NULL, 0); + kmem_cache_init (&memory_object_proxy_cache, + "memory_object_proxy", + sizeof(struct memory_object_proxy), 0, + NULL, 0); } /* Lookup a proxy memory object by its port. */ diff --git a/vm/vm_external.c b/vm/vm_external.c index 2e2593b..03ebcfb 100644 --- a/vm/vm_external.c +++ b/vm/vm_external.c @@ -137,14 +137,18 @@ void vm_external_module_initialize(void) { vm_size_t size = (vm_size_t) sizeof(struct vm_external); - kmem_cache_init(&vm_external_cache, "vm_external", size, 0, - NULL, NULL, NULL, 0); - - kmem_cache_init(&vm_object_small_existence_map_cache, - "small_existence_map", SMALL_SIZE, 0, - NULL, NULL, NULL, 0); - - kmem_cache_init(&vm_object_large_existence_map_cache, - "large_existence_map", LARGE_SIZE, 0, - NULL, NULL, NULL, 0); + kmem_cache_init (&vm_external_cache, + "vm_external", + size, 0, + NULL, 0); + + kmem_cache_init (&vm_object_small_existence_map_cache, + "small_existence_map", + SMALL_SIZE, 0, + NULL, 0); + + kmem_cache_init (&vm_object_large_existence_map_cache, + "large_existence_map", + LARGE_SIZE, 0, + NULL, 0); } diff --git a/vm/vm_fault.c b/vm/vm_fault.c index 46779f6..aa5febc 100644 --- a/vm/vm_fault.c +++ b/vm/vm_fault.c @@ -104,8 +104,10 @@ extern struct db_watchpoint *db_watchpoint_list; */ void vm_fault_init(void) { - kmem_cache_init(&vm_fault_state_cache, "vm_fault_state", - sizeof(vm_fault_state_t), 0, NULL, NULL, NULL, 0); + kmem_cache_init (&vm_fault_state_cache, + "vm_fault_state", + sizeof(vm_fault_state_t), 0, + NULL, 0); } /* diff --git a/vm/vm_init.c b/vm/vm_init.c index 3d1081c..4fdcd83 100644 --- a/vm/vm_init.c +++ b/vm/vm_init.c @@ -36,7 +36,6 @@ #include <mach/machine/vm_types.h> #include <kern/slab.h> -#include <kern/kalloc.h> #include <vm/vm_fault.h> #include <vm/vm_object.h> #include <vm/vm_map.h> @@ -73,7 +72,6 @@ void vm_mem_bootstrap(void) kmem_init(start, end); pmap_init(); slab_init(); - kalloc_init(); vm_fault_init(); vm_page_module_init(); memory_manager_default_init(); diff --git a/vm/vm_map.c b/vm/vm_map.c index 4977bff..a521476 100644 --- a/vm/vm_map.c +++ b/vm/vm_map.c @@ -152,44 +152,27 @@ vm_object_t vm_submap_object = &vm_submap_object_store; * vm_map_cache: used to allocate maps. * vm_map_entry_cache: used to allocate map entries. * vm_map_kentry_cache: used to allocate map entries for the kernel. - * - * Kernel map entries are allocated from a special cache, using a custom - * page allocation function to avoid recursion. It would be difficult - * (perhaps impossible) for the kernel to allocate more memory to an entry - * cache when it became empty since the very act of allocating memory - * implies the creation of a new entry. */ -vm_offset_t kentry_data; -vm_size_t kentry_data_size = KENTRY_DATA_SIZE; - -static vm_offset_t kentry_pagealloc(vm_size_t size) -{ - vm_offset_t result; - - if (size > kentry_data_size) - panic("vm_map: kentry memory exhausted"); - - result = kentry_data; - kentry_data += size; - kentry_data_size -= size; - return result; -} - void vm_map_init(void) { - kmem_cache_init(&vm_map_cache, "vm_map", sizeof(struct vm_map), 0, - NULL, NULL, NULL, 0); - kmem_cache_init(&vm_map_entry_cache, "vm_map_entry", - sizeof(struct vm_map_entry), 0, NULL, NULL, NULL, 0); -#if 0 + kmem_cache_init (&vm_map_cache, + "vm_map", + sizeof(struct vm_map), 0, + NULL, 0); + kmem_cache_init (&vm_map_entry_cache, + "vm_map_entry", + sizeof(struct vm_map_entry), 0, + NULL, 0); +#if 1 kmem_cache_init(&vm_map_kentry_cache, "vm_map_kentry", - sizeof(struct vm_map_entry), 0, NULL, kentry_pagealloc, - NULL, KMEM_CACHE_NOCPUPOOL | KMEM_CACHE_NOOFFSLAB - | KMEM_CACHE_NORECLAIM); + sizeof(struct vm_map_entry), 0, + NULL, KMEM_CACHE_NOCPUPOOL | KMEM_CACHE_NOOFFSLAB); #endif - kmem_cache_init(&vm_map_copy_cache, "vm_map_copy", - sizeof(struct vm_map_copy), 0, NULL, NULL, NULL, 0); + kmem_cache_init (&vm_map_copy_cache, + "vm_map_copy", + sizeof(struct vm_map_copy), 0, + NULL, 0); /* * Submap object is initialized by vm_object_init. diff --git a/vm/vm_object.c b/vm/vm_object.c index b403d30..ee09e3b 100644 --- a/vm/vm_object.c +++ b/vm/vm_object.c @@ -259,8 +259,10 @@ vm_object_t vm_object_allocate( */ void vm_object_bootstrap(void) { - kmem_cache_init(&vm_object_cache, "vm_object", - sizeof(struct vm_object), 0, NULL, NULL, NULL, 0); + kmem_cache_init (&vm_object_cache, + "vm_object", + sizeof(struct vm_object), 0, + NULL, 0); queue_init(&vm_object_cached_list); vm_object_cache_lock_init(); diff --git a/vm/vm_resident.c b/vm/vm_resident.c index c70fa73..d3b5a8e 100644 --- a/vm/vm_resident.c +++ b/vm/vm_resident.c @@ -429,8 +429,10 @@ void pmap_startup( */ void vm_page_module_init(void) { - kmem_cache_init(&vm_page_cache, "vm_page", sizeof(struct vm_page), 0, - NULL, NULL, NULL, 0); + kmem_cache_init (&vm_page_cache, + "vm_page", + sizeof(struct vm_page), 0, + NULL, 0); } /* |