summaryrefslogtreecommitdiff
path: root/vm/vm_map.c
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2011-12-17 16:23:33 +0000
committerRichard Braun <rbraun@sceen.net>2011-12-17 22:12:45 +0000
commit7fa0de680bbfed02e09ad517788fe336c0d855cc (patch)
tree83cd618d51f770f7fbf452cbe72c0f349068eefd /vm/vm_map.c
parentdb2078e4f1802434f791f4f1c333725c42fe172b (diff)
Adjust VM initialization
Unlike the zone allocator, the slab code can't be fed with an initial chunk of memory. Some VM objects used early during startup now have to be statically allocated, and kernel map entries need a special path to avoid recursion when being allocated. * vm/vm_map.c (vm_submap_object_store): New variable. (vm_submap_object): Point to vm_submap_object_store. (kentry_data_size): Initialize to arbitrary value. (kentry_count): Remove variable. (kentry_pagealloc): New function. (vm_map_setup): Likewise. (vm_map_create): Replace initialization with a call to vm_map_setup(). * vm/vm_map.h (vm_map_setup): New prototype. * vm/vm_kern.c (kernel_map_store): New variable. (kernel_map): Point to kernel_map_store. (kmem_suballoc): Remove function. Replaced with... (kmem_submap): New function. (kmem_init): Call vm_map_setup() instead of vm_map_create(). * vm/vm_kern.h (kmem_suballoc): Remove prototype. (kmem_submap): New prototype. * vm/vm_object.c (kernel_object_store): New variable. (kernel_object): Point to kernel_object_store. (vm_object_template): Change type from vm_object_t to struct vm_object. (_vm_object_setup): New function. (_vm_object_allocate): Replace initialization with a call to _vm_object_setup(). (vm_object_bootstrap): Don't allocate vm_object_template, and use it as a structure instead of a pointer. Initialize kernel_object and vm_submap_object with _vm_object_setup() instead of _vm_object_allocate(). * vm/vm_resident.c (vm_page_bootstrap): Don't initialize kentry_data_size. * device/ds_routines.c (device_io_map_store): New variable. (device_io_map): Point to device_io_map_store. (mach_device_init): Call kmem_submap() instead of kmem_suballoc(). * ipc/ipc_init.c (ipc_kernel_map_store): New variable. (ipc_kernel_map): Point to ipc_kernel_map_store.
Diffstat (limited to 'vm/vm_map.c')
-rw-r--r--vm/vm_map.c72
1 files changed, 47 insertions, 25 deletions
diff --git a/vm/vm_map.c b/vm/vm_map.c
index 1cae7db..5015c1c 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -143,7 +143,8 @@ boolean_t vm_map_lookup_entry(); /* forward declaration */
* vm_map_submap creates the submap.
*/
-vm_object_t vm_submap_object;
+static struct vm_object vm_submap_object_store;
+vm_object_t vm_submap_object = &vm_submap_object_store;
/*
* vm_map_init:
@@ -160,16 +161,28 @@ vm_object_t vm_submap_object;
* vm_map_entry_cache: used to allocate map entries.
* vm_map_kentry_cache: used to allocate map entries for the kernel.
*
- * The kernel allocates map entries from a special zone that is initially
- * "crammed" with memory. It would be difficult (perhaps impossible) for
- * the kernel to allocate more memory to a entry zone when it became
- * empty since the very act of allocating memory implies the creation
- * of a new entry.
+ * Kernel map entries are allocated from a special cache, using a custom
+ * page allocation function to avoid recursion. It would be difficult
+ * (perhaps impossible) for the kernel to allocate more memory to an entry
+ * cache when it became empty since the very act of allocating memory
+ * implies the creation of a new entry.
*/
vm_offset_t kentry_data;
-vm_size_t kentry_data_size;
-int kentry_count = 256; /* to init kentry_data_size */
+vm_size_t kentry_data_size = 32 * PAGE_SIZE;
+
+static vm_offset_t kentry_pagealloc(vm_size_t size)
+{
+ vm_offset_t result;
+
+ if (size > kentry_data_size)
+ panic("vm_map: kentry memory exhausted");
+
+ result = kentry_data;
+ kentry_data += size;
+ kentry_data_size -= size;
+ return result;
+}
void vm_map_init(void)
{
@@ -189,6 +202,31 @@ void vm_map_init(void)
*/
}
+void vm_map_setup(map, pmap, min, max, pageable)
+ vm_map_t map;
+ pmap_t pmap;
+ vm_offset_t min, max;
+ boolean_t pageable;
+{
+ vm_map_first_entry(map) = vm_map_to_entry(map);
+ vm_map_last_entry(map) = vm_map_to_entry(map);
+ map->hdr.nentries = 0;
+ map->hdr.entries_pageable = pageable;
+
+ map->size = 0;
+ map->ref_count = 1;
+ map->pmap = pmap;
+ map->min_offset = min;
+ map->max_offset = max;
+ map->wiring_required = FALSE;
+ map->wait_for_space = FALSE;
+ map->first_free = vm_map_to_entry(map);
+ map->hint = vm_map_to_entry(map);
+ vm_map_lock_init(map);
+ simple_lock_init(&map->ref_lock);
+ simple_lock_init(&map->hint_lock);
+}
+
/*
* vm_map_create:
*
@@ -207,23 +245,7 @@ vm_map_t vm_map_create(pmap, min, max, pageable)
if (result == VM_MAP_NULL)
panic("vm_map_create");
- vm_map_first_entry(result) = vm_map_to_entry(result);
- vm_map_last_entry(result) = vm_map_to_entry(result);
- result->hdr.nentries = 0;
- result->hdr.entries_pageable = pageable;
-
- result->size = 0;
- result->ref_count = 1;
- result->pmap = pmap;
- result->min_offset = min;
- result->max_offset = max;
- result->wiring_required = FALSE;
- result->wait_for_space = FALSE;
- result->first_free = vm_map_to_entry(result);
- result->hint = vm_map_to_entry(result);
- vm_map_lock_init(result);
- simple_lock_init(&result->ref_lock);
- simple_lock_init(&result->hint_lock);
+ vm_map_setup(result, pmap, min, max, pageable);
return(result);
}