diff options
-rw-r--r-- | device/ds_routines.c | 11 | ||||
-rw-r--r-- | ipc/ipc_init.c | 7 | ||||
-rw-r--r-- | vm/vm_kern.c | 30 | ||||
-rw-r--r-- | vm/vm_kern.h | 4 | ||||
-rw-r--r-- | vm/vm_map.c | 72 | ||||
-rw-r--r-- | vm/vm_map.h | 3 | ||||
-rw-r--r-- | vm/vm_object.c | 87 | ||||
-rw-r--r-- | vm/vm_resident.c | 1 |
8 files changed, 120 insertions, 95 deletions
diff --git a/device/ds_routines.c b/device/ds_routines.c index f0f8c59..5a6fdd2 100644 --- a/device/ds_routines.c +++ b/device/ds_routines.c @@ -130,7 +130,8 @@ static struct device_emulation_ops *emulation_list[] = &mach_device_emulation_ops, }; -vm_map_t device_io_map; +static struct vm_map device_io_map_store; +vm_map_t device_io_map = &device_io_map_store; #define NUM_EMULATION (sizeof (emulation_list) / sizeof (emulation_list[0])) @@ -1551,11 +1552,9 @@ void mach_device_init() queue_init(&io_done_list); simple_lock_init(&io_done_list_lock); - device_io_map = kmem_suballoc(kernel_map, - &device_io_min, - &device_io_max, - DEVICE_IO_MAP_SIZE, - FALSE); + kmem_submap(device_io_map, kernel_map, &device_io_min, &device_io_max, + DEVICE_IO_MAP_SIZE, FALSE); + /* * If the kernel receives many device_write requests, the * device_io_map might run out of space. To prevent diff --git a/ipc/ipc_init.c b/ipc/ipc_init.c index 9b7e126..36d0f19 100644 --- a/ipc/ipc_init.c +++ b/ipc/ipc_init.c @@ -53,7 +53,8 @@ -vm_map_t ipc_kernel_map; +static struct vm_map ipc_kernel_map_store; +vm_map_t ipc_kernel_map = &ipc_kernel_map_store; vm_size_t ipc_kernel_map_size = 8 * 1024 * 1024; int ipc_space_max = SPACE_MAX; @@ -117,8 +118,8 @@ ipc_init() { vm_offset_t min, max; - ipc_kernel_map = kmem_suballoc(kernel_map, &min, &max, - ipc_kernel_map_size, TRUE); + kmem_submap(ipc_kernel_map, kernel_map, &min, &max, + ipc_kernel_map_size, TRUE); ipc_host_init(); } diff --git a/vm/vm_kern.c b/vm/vm_kern.c index cfa33ff..fd46e98 100644 --- a/vm/vm_kern.c +++ b/vm/vm_kern.c @@ -58,7 +58,8 @@ * Variables exported by this module. */ -vm_map_t kernel_map; +static struct vm_map kernel_map_store; +vm_map_t kernel_map = &kernel_map_store; vm_map_t kernel_pageable_map; extern void kmem_alloc_pages(); @@ -811,27 +812,27 @@ kmem_remap_pages(object, offset, start, end, protection) } /* - * kmem_suballoc: + * kmem_submap: * - * Allocates a map to manage a subrange + * Initializes a map to manage a subrange * of the kernel virtual address space. * * Arguments are as follows: * + * map Map to initialize * parent Map to take range from * size Size of range to find * min, max Returned endpoints of map * pageable Can the region be paged */ -vm_map_t -kmem_suballoc(parent, min, max, size, pageable) - vm_map_t parent; +void +kmem_submap(map, parent, min, max, size, pageable) + vm_map_t map, parent; vm_offset_t *min, *max; vm_size_t size; boolean_t pageable; { - vm_map_t map; vm_offset_t addr; kern_return_t kr; @@ -850,20 +851,16 @@ kmem_suballoc(parent, min, max, size, pageable) vm_submap_object, (vm_offset_t) 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); if (kr != KERN_SUCCESS) - panic("kmem_suballoc"); + panic("kmem_submap"); pmap_reference(vm_map_pmap(parent)); - map = vm_map_create(vm_map_pmap(parent), addr, addr + size, pageable); - if (map == VM_MAP_NULL) - panic("kmem_suballoc"); - + vm_map_setup(map, vm_map_pmap(parent), addr, addr + size, pageable); kr = vm_map_submap(parent, addr, addr + size, map); if (kr != KERN_SUCCESS) - panic("kmem_suballoc"); + panic("kmem_submap"); *min = addr; *max = addr + size; - return map; } /* @@ -876,9 +873,8 @@ void kmem_init(start, end) vm_offset_t start; vm_offset_t end; { - kernel_map = vm_map_create(pmap_kernel(), - VM_MIN_KERNEL_ADDRESS, end, - FALSE); + vm_map_setup(kernel_map, pmap_kernel(), VM_MIN_KERNEL_ADDRESS, end, + FALSE); /* * Reserve virtual memory allocated up to this time. diff --git a/vm/vm_kern.h b/vm/vm_kern.h index ca93d7a..22b7c12 100644 --- a/vm/vm_kern.h +++ b/vm/vm_kern.h @@ -58,8 +58,8 @@ extern kern_return_t kmem_realloc(vm_map_t, vm_offset_t, vm_size_t, vm_offset_t *, vm_size_t); extern void kmem_free(vm_map_t, vm_offset_t, vm_size_t); -extern vm_map_t kmem_suballoc(vm_map_t, vm_offset_t *, vm_offset_t *, - vm_size_t, boolean_t); +extern void kmem_submap(vm_map_t, vm_map_t, vm_offset_t *, + vm_offset_t *, vm_size_t, boolean_t); extern kern_return_t kmem_io_map_copyout(vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t *, diff --git a/vm/vm_map.c b/vm/vm_map.c index 1cae7db..5015c1c 100644 --- a/vm/vm_map.c +++ b/vm/vm_map.c @@ -143,7 +143,8 @@ boolean_t vm_map_lookup_entry(); /* forward declaration */ * vm_map_submap creates the submap. */ -vm_object_t vm_submap_object; +static struct vm_object vm_submap_object_store; +vm_object_t vm_submap_object = &vm_submap_object_store; /* * vm_map_init: @@ -160,16 +161,28 @@ vm_object_t vm_submap_object; * vm_map_entry_cache: used to allocate map entries. * vm_map_kentry_cache: used to allocate map entries for the kernel. * - * The kernel allocates map entries from a special zone that is initially - * "crammed" with memory. It would be difficult (perhaps impossible) for - * the kernel to allocate more memory to a entry zone when it became - * empty since the very act of allocating memory implies the creation - * of a new entry. + * Kernel map entries are allocated from a special cache, using a custom + * page allocation function to avoid recursion. It would be difficult + * (perhaps impossible) for the kernel to allocate more memory to an entry + * cache when it became empty since the very act of allocating memory + * implies the creation of a new entry. */ vm_offset_t kentry_data; -vm_size_t kentry_data_size; -int kentry_count = 256; /* to init kentry_data_size */ +vm_size_t kentry_data_size = 32 * PAGE_SIZE; + +static vm_offset_t kentry_pagealloc(vm_size_t size) +{ + vm_offset_t result; + + if (size > kentry_data_size) + panic("vm_map: kentry memory exhausted"); + + result = kentry_data; + kentry_data += size; + kentry_data_size -= size; + return result; +} void vm_map_init(void) { @@ -189,6 +202,31 @@ void vm_map_init(void) */ } +void vm_map_setup(map, pmap, min, max, pageable) + vm_map_t map; + pmap_t pmap; + vm_offset_t min, max; + boolean_t pageable; +{ + vm_map_first_entry(map) = vm_map_to_entry(map); + vm_map_last_entry(map) = vm_map_to_entry(map); + map->hdr.nentries = 0; + map->hdr.entries_pageable = pageable; + + map->size = 0; + map->ref_count = 1; + map->pmap = pmap; + map->min_offset = min; + map->max_offset = max; + map->wiring_required = FALSE; + map->wait_for_space = FALSE; + map->first_free = vm_map_to_entry(map); + map->hint = vm_map_to_entry(map); + vm_map_lock_init(map); + simple_lock_init(&map->ref_lock); + simple_lock_init(&map->hint_lock); +} + /* * vm_map_create: * @@ -207,23 +245,7 @@ vm_map_t vm_map_create(pmap, min, max, pageable) if (result == VM_MAP_NULL) panic("vm_map_create"); - vm_map_first_entry(result) = vm_map_to_entry(result); - vm_map_last_entry(result) = vm_map_to_entry(result); - result->hdr.nentries = 0; - result->hdr.entries_pageable = pageable; - - result->size = 0; - result->ref_count = 1; - result->pmap = pmap; - result->min_offset = min; - result->max_offset = max; - result->wiring_required = FALSE; - result->wait_for_space = FALSE; - result->first_free = vm_map_to_entry(result); - result->hint = vm_map_to_entry(result); - vm_map_lock_init(result); - simple_lock_init(&result->ref_lock); - simple_lock_init(&result->hint_lock); + vm_map_setup(result, pmap, min, max, pageable); return(result); } diff --git a/vm/vm_map.h b/vm/vm_map.h index 567fe93..f4e9395 100644 --- a/vm/vm_map.h +++ b/vm/vm_map.h @@ -357,6 +357,9 @@ extern int kentry_count; /* Initialize the module */ extern void vm_map_init(void); +/* Initialize an empty map */ +extern void vm_map_setup(vm_map_t, pmap_t, vm_offset_t, vm_offset_t, + boolean_t); /* Create an empty map */ extern vm_map_t vm_map_create(pmap_t, vm_offset_t, vm_offset_t, boolean_t); diff --git a/vm/vm_object.c b/vm/vm_object.c index 1c6e431..d80124a 100644 --- a/vm/vm_object.c +++ b/vm/vm_object.c @@ -147,7 +147,8 @@ struct kmem_cache vm_object_cache; /* vm backing store cache */ * All wired-down kernel memory belongs to a single virtual * memory object (kernel_object) to avoid wasting data structures. */ -vm_object_t kernel_object; +static struct vm_object kernel_object_store; +vm_object_t kernel_object = &kernel_object_store; /* * Virtual memory objects that are not referenced by @@ -198,7 +199,7 @@ decl_simple_lock_data(,vm_object_cached_lock_data) * object structure, be sure to add initialization * (see vm_object_init). */ -vm_object_t vm_object_template; +struct vm_object vm_object_template; /* * vm_object_allocate: @@ -206,6 +207,16 @@ vm_object_t vm_object_template; * Returns a new object with the given size. */ +static void _vm_object_setup( + vm_object_t object, + vm_size_t size) +{ + *object = vm_object_template; + queue_init(&object->memq); + vm_object_lock_init(object); + object->size = size; +} + vm_object_t _vm_object_allocate( vm_size_t size) { @@ -213,10 +224,7 @@ vm_object_t _vm_object_allocate( object = (vm_object_t) kmem_cache_alloc(&vm_object_cache); - *object = *vm_object_template; - queue_init(&object->memq); - vm_object_lock_init(object); - object->size = size; + _vm_object_setup(object, size); return object; } @@ -254,53 +262,50 @@ void vm_object_bootstrap(void) * Fill in a template object, for quick initialization */ - vm_object_template = (vm_object_t) kmem_cache_alloc(&vm_object_cache); - memset(vm_object_template, 0, sizeof *vm_object_template); - - vm_object_template->ref_count = 1; - vm_object_template->size = 0; - vm_object_template->resident_page_count = 0; - vm_object_template->copy = VM_OBJECT_NULL; - vm_object_template->shadow = VM_OBJECT_NULL; - vm_object_template->shadow_offset = (vm_offset_t) 0; + vm_object_template.ref_count = 1; + vm_object_template.size = 0; + vm_object_template.resident_page_count = 0; + vm_object_template.copy = VM_OBJECT_NULL; + vm_object_template.shadow = VM_OBJECT_NULL; + vm_object_template.shadow_offset = (vm_offset_t) 0; - vm_object_template->pager = IP_NULL; - vm_object_template->paging_offset = 0; - vm_object_template->pager_request = PAGER_REQUEST_NULL; - vm_object_template->pager_name = IP_NULL; + vm_object_template.pager = IP_NULL; + vm_object_template.paging_offset = 0; + vm_object_template.pager_request = PAGER_REQUEST_NULL; + vm_object_template.pager_name = IP_NULL; - vm_object_template->pager_created = FALSE; - vm_object_template->pager_initialized = FALSE; - vm_object_template->pager_ready = FALSE; + vm_object_template.pager_created = FALSE; + vm_object_template.pager_initialized = FALSE; + vm_object_template.pager_ready = FALSE; - vm_object_template->copy_strategy = MEMORY_OBJECT_COPY_NONE; + vm_object_template.copy_strategy = MEMORY_OBJECT_COPY_NONE; /* ignored if temporary, will be reset before * permanent object becomes ready */ - vm_object_template->use_shared_copy = FALSE; - vm_object_template->shadowed = FALSE; - - vm_object_template->absent_count = 0; - vm_object_template->all_wanted = 0; /* all bits FALSE */ - - vm_object_template->paging_in_progress = 0; - vm_object_template->can_persist = FALSE; - vm_object_template->internal = TRUE; - vm_object_template->temporary = TRUE; - vm_object_template->alive = TRUE; - vm_object_template->lock_in_progress = FALSE; - vm_object_template->lock_restart = FALSE; - vm_object_template->use_old_pageout = TRUE; /* XXX change later */ - vm_object_template->last_alloc = (vm_offset_t) 0; + vm_object_template.use_shared_copy = FALSE; + vm_object_template.shadowed = FALSE; + + vm_object_template.absent_count = 0; + vm_object_template.all_wanted = 0; /* all bits FALSE */ + + vm_object_template.paging_in_progress = 0; + vm_object_template.can_persist = FALSE; + vm_object_template.internal = TRUE; + vm_object_template.temporary = TRUE; + vm_object_template.alive = TRUE; + vm_object_template.lock_in_progress = FALSE; + vm_object_template.lock_restart = FALSE; + vm_object_template.use_old_pageout = TRUE; /* XXX change later */ + vm_object_template.last_alloc = (vm_offset_t) 0; #if MACH_PAGEMAP - vm_object_template->existence_info = VM_EXTERNAL_NULL; + vm_object_template.existence_info = VM_EXTERNAL_NULL; #endif /* MACH_PAGEMAP */ /* * Initialize the "kernel object" */ - kernel_object = _vm_object_allocate( + _vm_object_setup(kernel_object, VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS); /* @@ -308,7 +313,7 @@ void vm_object_bootstrap(void) * kernel object so that no limit is imposed on submap sizes. */ - vm_submap_object = _vm_object_allocate( + _vm_object_setup(vm_submap_object, VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS); #if MACH_PAGEMAP diff --git a/vm/vm_resident.c b/vm/vm_resident.c index 5a63ad4..ae71a74 100644 --- a/vm/vm_resident.c +++ b/vm/vm_resident.c @@ -238,7 +238,6 @@ void vm_page_bootstrap( * Steal memory for the kernel map entries. */ - kentry_data_size = kentry_count * sizeof(struct vm_map_entry); kentry_data = pmap_steal_memory(kentry_data_size); /* |