diff options
Diffstat (limited to 'vm')
-rw-r--r-- | vm/memory_object_proxy.c | 20 | ||||
-rw-r--r-- | vm/vm_external.c | 43 | ||||
-rw-r--r-- | vm/vm_fault.c | 21 | ||||
-rw-r--r-- | vm/vm_init.c | 6 | ||||
-rw-r--r-- | vm/vm_kern.c | 30 | ||||
-rw-r--r-- | vm/vm_kern.h | 4 | ||||
-rw-r--r-- | vm/vm_map.c | 178 | ||||
-rw-r--r-- | vm/vm_map.h | 3 | ||||
-rw-r--r-- | vm/vm_object.c | 103 | ||||
-rw-r--r-- | vm/vm_page.h | 1 | ||||
-rw-r--r-- | vm/vm_pageout.c | 7 | ||||
-rw-r--r-- | vm/vm_resident.c | 23 |
12 files changed, 216 insertions, 223 deletions
diff --git a/vm/memory_object_proxy.c b/vm/memory_object_proxy.c index fdab6e0..4fed312 100644 --- a/vm/memory_object_proxy.c +++ b/vm/memory_object_proxy.c @@ -41,15 +41,14 @@ #include <mach/notify.h> #include <mach/vm_prot.h> #include <kern/printf.h> -#include <kern/zalloc.h> -#include <kern/mach_param.h> +#include <kern/slab.h> #include <ipc/ipc_port.h> #include <ipc/ipc_space.h> #include <vm/memory_object_proxy.h> -/* The zone which holds our proxy memory objects. */ -static zone_t memory_object_proxy_zone; +/* The cache which holds our proxy memory objects. */ +static struct kmem_cache memory_object_proxy_cache; struct memory_object_proxy { @@ -64,13 +63,8 @@ typedef struct memory_object_proxy *memory_object_proxy_t; void memory_object_proxy_init (void) { - /* For limit, see PORT_MAX. */ - memory_object_proxy_zone = zinit (sizeof (struct memory_object_proxy), 0, - (TASK_MAX * 3 + THREAD_MAX) - * sizeof (struct memory_object_proxy), - 256 * sizeof (struct memory_object_proxy), - ZONE_EXHAUSTIBLE, - "proxy memory object zone"); + kmem_cache_init (&memory_object_proxy_cache, "memory_object_proxy", + sizeof (struct memory_object_proxy), 0, NULL, NULL, NULL, 0); } /* Lookup a proxy memory object by its port. */ @@ -153,13 +147,13 @@ memory_object_create_proxy (ipc_space_t space, vm_prot_t max_protection, if (start[0] != 0 || len[0] != (vm_offset_t) ~0) return KERN_INVALID_ARGUMENT; - proxy = (memory_object_proxy_t) zalloc (memory_object_proxy_zone); + proxy = (memory_object_proxy_t) kmem_cache_alloc (&memory_object_proxy_cache); /* Allocate port, keeping a reference for it. */ proxy->port = ipc_port_alloc_kernel (); if (proxy->port == IP_NULL) { - zfree (memory_object_proxy_zone, (vm_offset_t) proxy); + kmem_cache_free (&memory_object_proxy_cache, (vm_offset_t) proxy); return KERN_RESOURCE_SHORTAGE; } /* Associate the port with the proxy memory object. */ diff --git a/vm/vm_external.c b/vm/vm_external.c index ac47faa..e9643ff 100644 --- a/vm/vm_external.c +++ b/vm/vm_external.c @@ -31,7 +31,7 @@ */ #include <mach/boolean.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <vm/vm_external.h> #include <mach/vm_param.h> #include <kern/assert.h> @@ -40,7 +40,7 @@ boolean_t vm_external_unsafe = FALSE; -zone_t vm_external_zone = ZONE_NULL; +struct kmem_cache vm_external_cache; /* * The implementation uses bit arrays to record whether @@ -52,8 +52,8 @@ zone_t vm_external_zone = ZONE_NULL; #define SMALL_SIZE (VM_EXTERNAL_SMALL_SIZE/8) #define LARGE_SIZE (VM_EXTERNAL_LARGE_SIZE/8) -zone_t vm_object_small_existence_map_zone; -zone_t vm_object_large_existence_map_zone; +struct kmem_cache vm_object_small_existence_map_cache; +struct kmem_cache vm_object_large_existence_map_cache; vm_external_t vm_external_create(size) @@ -62,20 +62,17 @@ vm_external_t vm_external_create(size) vm_external_t result; vm_size_t bytes; - if (vm_external_zone == ZONE_NULL) - return(VM_EXTERNAL_NULL); - - result = (vm_external_t) zalloc(vm_external_zone); + result = (vm_external_t) kmem_cache_alloc(&vm_external_cache); result->existence_map = (char *) 0; bytes = (atop(size) + 07) >> 3; if (bytes <= SMALL_SIZE) { result->existence_map = - (char *) zalloc(vm_object_small_existence_map_zone); + (char *) kmem_cache_alloc(&vm_object_small_existence_map_cache); result->existence_size = SMALL_SIZE; } else if (bytes <= LARGE_SIZE) { result->existence_map = - (char *) zalloc(vm_object_large_existence_map_zone); + (char *) kmem_cache_alloc(&vm_object_large_existence_map_cache); result->existence_size = LARGE_SIZE; } return(result); @@ -89,14 +86,14 @@ void vm_external_destroy(e) if (e->existence_map != (char *) 0) { if (e->existence_size <= SMALL_SIZE) { - zfree(vm_object_small_existence_map_zone, + kmem_cache_free(&vm_object_small_existence_map_cache, (vm_offset_t) e->existence_map); } else { - zfree(vm_object_large_existence_map_zone, + kmem_cache_free(&vm_object_large_existence_map_cache, (vm_offset_t) e->existence_map); } } - zfree(vm_external_zone, (vm_offset_t) e); + kmem_cache_free(&vm_external_cache, (vm_offset_t) e); } vm_external_state_t _vm_external_state_get(e, offset) @@ -142,18 +139,14 @@ void vm_external_module_initialize(void) { vm_size_t size = (vm_size_t) sizeof(struct vm_external); - vm_external_zone = zinit(size, 0, 16*1024*size, size, - 0, "external page bitmaps"); + kmem_cache_init(&vm_external_cache, "vm_external", size, 0, + NULL, NULL, NULL, 0); - vm_object_small_existence_map_zone = zinit(SMALL_SIZE, 0, - round_page(LARGE_SIZE * SMALL_SIZE), - round_page(SMALL_SIZE), - ZONE_EXHAUSTIBLE, - "object small existence maps"); + kmem_cache_init(&vm_object_small_existence_map_cache, + "small_existence_map", SMALL_SIZE, 0, + NULL, NULL, NULL, 0); - vm_object_large_existence_map_zone = zinit(LARGE_SIZE, 0, - round_page(8 * LARGE_SIZE), - round_page(LARGE_SIZE), - ZONE_EXHAUSTIBLE, - "object large existence maps"); + kmem_cache_init(&vm_object_large_existence_map_cache, + "large_existence_map", LARGE_SIZE, 0, + NULL, NULL, NULL, 0); } diff --git a/vm/vm_fault.c b/vm/vm_fault.c index cce043a..10955ed 100644 --- a/vm/vm_fault.c +++ b/vm/vm_fault.c @@ -51,9 +51,8 @@ #include <mach/memory_object.h> #include <vm/memory_object_user.user.h> /* For memory_object_data_{request,unlock} */ -#include <kern/mach_param.h> #include <kern/macro_help.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #if MACH_PCSAMPLE #include <kern/pc_sample.h> @@ -85,7 +84,7 @@ typedef struct vm_fault_state { vm_prot_t vmfp_access; } vm_fault_state_t; -zone_t vm_fault_state_zone = 0; +struct kmem_cache vm_fault_state_cache; int vm_object_absent_max = 50; @@ -107,10 +106,8 @@ extern struct db_watchpoint *db_watchpoint_list; */ void vm_fault_init(void) { - vm_fault_state_zone = zinit(sizeof(vm_fault_state_t), 0, - THREAD_MAX * sizeof(vm_fault_state_t), - sizeof(vm_fault_state_t), - 0, "vm fault state"); + kmem_cache_init(&vm_fault_state_cache, "vm_fault_state", + sizeof(vm_fault_state_t), 0, NULL, NULL, NULL, 0); } /* @@ -1206,12 +1203,12 @@ kern_return_t vm_fault(map, vaddr, fault_type, change_wiring, /* * if this assignment stmt is written as - * 'active_threads[cpu_number()] = zalloc()', - * cpu_number may be evaluated before zalloc; - * if zalloc blocks, cpu_number will be wrong + * 'active_threads[cpu_number()] = kmem_cache_alloc()', + * cpu_number may be evaluated before kmem_cache_alloc; + * if kmem_cache_alloc blocks, cpu_number will be wrong */ - state = (char *) zalloc(vm_fault_state_zone); + state = (char *) kmem_cache_alloc(&vm_fault_state_cache); current_thread()->ith_other = state; } @@ -1490,7 +1487,7 @@ kern_return_t vm_fault(map, vaddr, fault_type, change_wiring, register vm_fault_state_t *state = (vm_fault_state_t *) current_thread()->ith_other; - zfree(vm_fault_state_zone, (vm_offset_t) state); + kmem_cache_free(&vm_fault_state_cache, (vm_offset_t) state); (*continuation)(kr); /*NOTREACHED*/ } diff --git a/vm/vm_init.c b/vm/vm_init.c index 33fca65..89eb098 100644 --- a/vm/vm_init.c +++ b/vm/vm_init.c @@ -35,7 +35,7 @@ */ #include <mach/machine/vm_types.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <kern/kalloc.h> #include <vm/vm_fault.h> #include <vm/vm_object.h> @@ -67,12 +67,12 @@ void vm_mem_bootstrap() * Initialize other VM packages */ - zone_bootstrap(); + slab_bootstrap(); vm_object_bootstrap(); vm_map_init(); kmem_init(start, end); pmap_init(); - zone_init(); + slab_init(); kalloc_init(); vm_fault_init(); vm_page_module_init(); diff --git a/vm/vm_kern.c b/vm/vm_kern.c index cfa33ff..fd46e98 100644 --- a/vm/vm_kern.c +++ b/vm/vm_kern.c @@ -58,7 +58,8 @@ * Variables exported by this module. */ -vm_map_t kernel_map; +static struct vm_map kernel_map_store; +vm_map_t kernel_map = &kernel_map_store; vm_map_t kernel_pageable_map; extern void kmem_alloc_pages(); @@ -811,27 +812,27 @@ kmem_remap_pages(object, offset, start, end, protection) } /* - * kmem_suballoc: + * kmem_submap: * - * Allocates a map to manage a subrange + * Initializes a map to manage a subrange * of the kernel virtual address space. * * Arguments are as follows: * + * map Map to initialize * parent Map to take range from * size Size of range to find * min, max Returned endpoints of map * pageable Can the region be paged */ -vm_map_t -kmem_suballoc(parent, min, max, size, pageable) - vm_map_t parent; +void +kmem_submap(map, parent, min, max, size, pageable) + vm_map_t map, parent; vm_offset_t *min, *max; vm_size_t size; boolean_t pageable; { - vm_map_t map; vm_offset_t addr; kern_return_t kr; @@ -850,20 +851,16 @@ kmem_suballoc(parent, min, max, size, pageable) vm_submap_object, (vm_offset_t) 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); if (kr != KERN_SUCCESS) - panic("kmem_suballoc"); + panic("kmem_submap"); pmap_reference(vm_map_pmap(parent)); - map = vm_map_create(vm_map_pmap(parent), addr, addr + size, pageable); - if (map == VM_MAP_NULL) - panic("kmem_suballoc"); - + vm_map_setup(map, vm_map_pmap(parent), addr, addr + size, pageable); kr = vm_map_submap(parent, addr, addr + size, map); if (kr != KERN_SUCCESS) - panic("kmem_suballoc"); + panic("kmem_submap"); *min = addr; *max = addr + size; - return map; } /* @@ -876,9 +873,8 @@ void kmem_init(start, end) vm_offset_t start; vm_offset_t end; { - kernel_map = vm_map_create(pmap_kernel(), - VM_MIN_KERNEL_ADDRESS, end, - FALSE); + vm_map_setup(kernel_map, pmap_kernel(), VM_MIN_KERNEL_ADDRESS, end, + FALSE); /* * Reserve virtual memory allocated up to this time. diff --git a/vm/vm_kern.h b/vm/vm_kern.h index ca93d7a..22b7c12 100644 --- a/vm/vm_kern.h +++ b/vm/vm_kern.h @@ -58,8 +58,8 @@ extern kern_return_t kmem_realloc(vm_map_t, vm_offset_t, vm_size_t, vm_offset_t *, vm_size_t); extern void kmem_free(vm_map_t, vm_offset_t, vm_size_t); -extern vm_map_t kmem_suballoc(vm_map_t, vm_offset_t *, vm_offset_t *, - vm_size_t, boolean_t); +extern void kmem_submap(vm_map_t, vm_map_t, vm_offset_t *, + vm_offset_t *, vm_size_t, boolean_t); extern kern_return_t kmem_io_map_copyout(vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t *, diff --git a/vm/vm_map.c b/vm/vm_map.c index ce83403..de10eec 100644 --- a/vm/vm_map.c +++ b/vm/vm_map.c @@ -41,7 +41,8 @@ #include <mach/vm_param.h> #include <kern/assert.h> #include <kern/debug.h> -#include <kern/zalloc.h> +#include <kern/kalloc.h> +#include <kern/slab.h> #include <vm/pmap.h> #include <vm/vm_fault.h> #include <vm/vm_map.h> @@ -70,7 +71,7 @@ void vm_map_copy_page_discard (vm_map_copy_t copy); * map entry to the same memory - the wired count in the new entry * must be set to zero. vm_map_entry_copy_full() creates a new * entry that is identical to the old entry. This preserves the - * wire count; it's used for map splitting and zone changing in + * wire count; it's used for map splitting and cache changing in * vm_map_copyout. */ #define vm_map_entry_copy(NEW,OLD) \ @@ -130,10 +131,10 @@ MACRO_END * vm_object_copy_strategically() in vm_object.c. */ -zone_t vm_map_zone; /* zone for vm_map structures */ -zone_t vm_map_entry_zone; /* zone for vm_map_entry structures */ -zone_t vm_map_kentry_zone; /* zone for kernel entry structures */ -zone_t vm_map_copy_zone; /* zone for vm_map_copy structures */ +struct kmem_cache vm_map_cache; /* cache for vm_map structures */ +struct kmem_cache vm_map_entry_cache; /* cache for vm_map_entry structures */ +struct kmem_cache vm_map_kentry_cache; /* cache for kernel entry structures */ +struct kmem_cache vm_map_copy_cache; /* cache for vm_map_copy structures */ boolean_t vm_map_lookup_entry(); /* forward declaration */ @@ -143,7 +144,8 @@ boolean_t vm_map_lookup_entry(); /* forward declaration */ * vm_map_submap creates the submap. */ -vm_object_t vm_submap_object; +static struct vm_object vm_submap_object_store; +vm_object_t vm_submap_object = &vm_submap_object_store; /* * vm_map_init: @@ -151,51 +153,81 @@ vm_object_t vm_submap_object; * Initialize the vm_map module. Must be called before * any other vm_map routines. * - * Map and entry structures are allocated from zones -- we must - * initialize those zones. + * Map and entry structures are allocated from caches -- we must + * initialize those caches. * - * There are three zones of interest: + * There are three caches of interest: * - * vm_map_zone: used to allocate maps. - * vm_map_entry_zone: used to allocate map entries. - * vm_map_kentry_zone: used to allocate map entries for the kernel. + * vm_map_cache: used to allocate maps. + * vm_map_entry_cache: used to allocate map entries. + * vm_map_kentry_cache: used to allocate map entries for the kernel. * - * The kernel allocates map entries from a special zone that is initially - * "crammed" with memory. It would be difficult (perhaps impossible) for - * the kernel to allocate more memory to a entry zone when it became - * empty since the very act of allocating memory implies the creation - * of a new entry. + * Kernel map entries are allocated from a special cache, using a custom + * page allocation function to avoid recursion. It would be difficult + * (perhaps impossible) for the kernel to allocate more memory to an entry + * cache when it became empty since the very act of allocating memory + * implies the creation of a new entry. */ vm_offset_t kentry_data; -vm_size_t kentry_data_size; -int kentry_count = 256; /* to init kentry_data_size */ +vm_size_t kentry_data_size = 32 * PAGE_SIZE; -void vm_map_init(void) +static vm_offset_t kentry_pagealloc(vm_size_t size) { - vm_map_zone = zinit((vm_size_t) sizeof(struct vm_map), 0, 40*1024, - PAGE_SIZE, 0, "maps"); - vm_map_entry_zone = zinit((vm_size_t) sizeof(struct vm_map_entry), - 0, 1024*1024, PAGE_SIZE*5, - 0, "non-kernel map entries"); - vm_map_kentry_zone = zinit((vm_size_t) sizeof(struct vm_map_entry), 0, - kentry_data_size, kentry_data_size, - ZONE_FIXED /* XXX */, "kernel map entries"); - - vm_map_copy_zone = zinit((vm_size_t) sizeof(struct vm_map_copy), - 0, 16*1024, PAGE_SIZE, 0, - "map copies"); + vm_offset_t result; - /* - * Cram the kentry zone with initial data. - */ - zcram(vm_map_kentry_zone, kentry_data, kentry_data_size); + if (size > kentry_data_size) + panic("vm_map: kentry memory exhausted"); + + result = kentry_data; + kentry_data += size; + kentry_data_size -= size; + return result; +} + +void vm_map_init(void) +{ + kmem_cache_init(&vm_map_cache, "vm_map", sizeof(struct vm_map), 0, + NULL, NULL, NULL, 0); + kmem_cache_init(&vm_map_entry_cache, "vm_map_entry", + sizeof(struct vm_map_entry), 0, NULL, NULL, NULL, 0); + kmem_cache_init(&vm_map_kentry_cache, "vm_map_kentry", + sizeof(struct vm_map_entry), 0, NULL, kentry_pagealloc, + NULL, KMEM_CACHE_NOCPUPOOL | KMEM_CACHE_NOOFFSLAB + | KMEM_CACHE_NORECLAIM); + kmem_cache_init(&vm_map_copy_cache, "vm_map_copy", + sizeof(struct vm_map_copy), 0, NULL, NULL, NULL, 0); /* * Submap object is initialized by vm_object_init. */ } +void vm_map_setup(map, pmap, min, max, pageable) + vm_map_t map; + pmap_t pmap; + vm_offset_t min, max; + boolean_t pageable; +{ + vm_map_first_entry(map) = vm_map_to_entry(map); + vm_map_last_entry(map) = vm_map_to_entry(map); + map->hdr.nentries = 0; + map->hdr.entries_pageable = pageable; + + map->size = 0; + map->ref_count = 1; + map->pmap = pmap; + map->min_offset = min; + map->max_offset = max; + map->wiring_required = FALSE; + map->wait_for_space = FALSE; + map->first_free = vm_map_to_entry(map); + map->hint = vm_map_to_entry(map); + vm_map_lock_init(map); + simple_lock_init(&map->ref_lock); + simple_lock_init(&map->hint_lock); +} + /* * vm_map_create: * @@ -210,27 +242,11 @@ vm_map_t vm_map_create(pmap, min, max, pageable) { register vm_map_t result; - result = (vm_map_t) zalloc(vm_map_zone); + result = (vm_map_t) kmem_cache_alloc(&vm_map_cache); if (result == VM_MAP_NULL) panic("vm_map_create"); - vm_map_first_entry(result) = vm_map_to_entry(result); - vm_map_last_entry(result) = vm_map_to_entry(result); - result->hdr.nentries = 0; - result->hdr.entries_pageable = pageable; - - result->size = 0; - result->ref_count = 1; - result->pmap = pmap; - result->min_offset = min; - result->max_offset = max; - result->wiring_required = FALSE; - result->wait_for_space = FALSE; - result->first_free = vm_map_to_entry(result); - result->hint = vm_map_to_entry(result); - vm_map_lock_init(result); - simple_lock_init(&result->ref_lock); - simple_lock_init(&result->hint_lock); + vm_map_setup(result, pmap, min, max, pageable); return(result); } @@ -250,15 +266,15 @@ vm_map_t vm_map_create(pmap, min, max, pageable) vm_map_entry_t _vm_map_entry_create(map_header) register struct vm_map_header *map_header; { - register zone_t zone; + register kmem_cache_t cache; register vm_map_entry_t entry; if (map_header->entries_pageable) - zone = vm_map_entry_zone; + cache = &vm_map_entry_cache; else - zone = vm_map_kentry_zone; + cache = &vm_map_kentry_cache; - entry = (vm_map_entry_t) zalloc(zone); + entry = (vm_map_entry_t) kmem_cache_alloc(cache); if (entry == VM_MAP_ENTRY_NULL) panic("vm_map_entry_create"); @@ -280,14 +296,14 @@ void _vm_map_entry_dispose(map_header, entry) register struct vm_map_header *map_header; register vm_map_entry_t entry; { - register zone_t zone; + register kmem_cache_t cache; if (map_header->entries_pageable) - zone = vm_map_entry_zone; + cache = &vm_map_entry_cache; else - zone = vm_map_kentry_zone; + cache = &vm_map_kentry_cache; - zfree(zone, (vm_offset_t) entry); + kmem_cache_free(cache, (vm_offset_t) entry); } /* @@ -368,7 +384,7 @@ void vm_map_deallocate(map) pmap_destroy(map->pmap); - zfree(vm_map_zone, (vm_offset_t) map); + kmem_cache_free(&vm_map_cache, (vm_offset_t) map); } /* @@ -1907,7 +1923,7 @@ free_next_copy: register vm_map_copy_t new_copy; new_copy = (vm_map_copy_t) copy->cpy_cont_args; - zfree(vm_map_copy_zone, (vm_offset_t) copy); + kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy); copy = new_copy; goto free_next_copy; } @@ -1918,7 +1934,7 @@ free_next_copy: break; } - zfree(vm_map_copy_zone, (vm_offset_t) copy); + kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy); } /* @@ -1952,7 +1968,7 @@ vm_map_copy_copy(copy) * from the old one into it. */ - new_copy = (vm_map_copy_t) zalloc(vm_map_copy_zone); + new_copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache); *new_copy = *copy; if (copy->type == VM_MAP_COPY_ENTRY_LIST) { @@ -2160,7 +2176,7 @@ start_pass_1: /* * XXXO If there are no permanent objects in the destination, - * XXXO and the source and destination map entry zones match, + * XXXO and the source and destination map entry caches match, * XXXO and the destination map entry is not shared, * XXXO then the map entries can be deleted and replaced * XXXO with those from the copy. The following code is the @@ -2403,7 +2419,7 @@ start_pass_1: ((where)->vme_next = vm_map_copy_first_entry(copy)) \ ->vme_prev = (where); \ (map)->hdr.nentries += (copy)->cpy_hdr.nentries; \ - zfree(vm_map_copy_zone, (vm_offset_t) copy); \ + kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy); \ MACRO_END /* @@ -2459,7 +2475,7 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy) VM_INHERIT_DEFAULT); if (kr != KERN_SUCCESS) return(kr); - zfree(vm_map_copy_zone, (vm_offset_t) copy); + kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy); return(KERN_SUCCESS); } @@ -2516,15 +2532,15 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy) * Mismatches occur when dealing with the default * pager. */ - zone_t old_zone; + kmem_cache_t old_cache; vm_map_entry_t next, new; /* - * Find the zone that the copies were allocated from + * Find the cache that the copies were allocated from */ - old_zone = (copy->cpy_hdr.entries_pageable) - ? vm_map_entry_zone - : vm_map_kentry_zone; + old_cache = (copy->cpy_hdr.entries_pageable) + ? &vm_map_entry_cache + : &vm_map_kentry_cache; entry = vm_map_copy_first_entry(copy); /* @@ -2547,7 +2563,7 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy) vm_map_copy_last_entry(copy), new); next = entry->vme_next; - zfree(old_zone, (vm_offset_t) entry); + kmem_cache_free(old_cache, (vm_offset_t) entry); entry = next; } } @@ -3036,10 +3052,10 @@ error: * Consume on success logic. */ if (copy != orig_copy) { - zfree(vm_map_copy_zone, (vm_offset_t) copy); + kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy); } if (result == KERN_SUCCESS) { - zfree(vm_map_copy_zone, (vm_offset_t) orig_copy); + kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) orig_copy); } return(result); @@ -3116,7 +3132,7 @@ kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) * remember the endpoints prior to rounding. */ - copy = (vm_map_copy_t) zalloc(vm_map_copy_zone); + copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache); vm_map_copy_first_entry(copy) = vm_map_copy_last_entry(copy) = vm_map_copy_to_entry(copy); copy->type = VM_MAP_COPY_ENTRY_LIST; @@ -3443,7 +3459,7 @@ kern_return_t vm_map_copyin_object(object, offset, size, copy_result) * and null links. */ - copy = (vm_map_copy_t) zalloc(vm_map_copy_zone); + copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache); vm_map_copy_first_entry(copy) = vm_map_copy_last_entry(copy) = VM_MAP_ENTRY_NULL; copy->type = VM_MAP_COPY_OBJECT; @@ -3598,7 +3614,7 @@ kern_return_t vm_map_copyin_page_list(src_map, src_addr, len, src_destroy, * be page-aligned. */ - copy = (vm_map_copy_t) zalloc(vm_map_copy_zone); + copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache); copy->type = VM_MAP_COPY_PAGE_LIST; copy->cpy_npages = 0; copy->offset = src_addr; diff --git a/vm/vm_map.h b/vm/vm_map.h index 567fe93..f4e9395 100644 --- a/vm/vm_map.h +++ b/vm/vm_map.h @@ -357,6 +357,9 @@ extern int kentry_count; /* Initialize the module */ extern void vm_map_init(void); +/* Initialize an empty map */ +extern void vm_map_setup(vm_map_t, pmap_t, vm_offset_t, vm_offset_t, + boolean_t); /* Create an empty map */ extern vm_map_t vm_map_create(pmap_t, vm_offset_t, vm_offset_t, boolean_t); diff --git a/vm/vm_object.c b/vm/vm_object.c index 9057973..d80124a 100644 --- a/vm/vm_object.c +++ b/vm/vm_object.c @@ -47,7 +47,7 @@ #include <kern/lock.h> #include <kern/queue.h> #include <kern/xpr.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <vm/memory_object.h> #include <vm/vm_fault.h> #include <vm/vm_map.h> @@ -141,13 +141,14 @@ void vm_object_deactivate_pages(vm_object_t); * ZZZ Continue this comment. */ -zone_t vm_object_zone; /* vm backing store zone */ +struct kmem_cache vm_object_cache; /* vm backing store cache */ /* * All wired-down kernel memory belongs to a single virtual * memory object (kernel_object) to avoid wasting data structures. */ -vm_object_t kernel_object; +static struct vm_object kernel_object_store; +vm_object_t kernel_object = &kernel_object_store; /* * Virtual memory objects that are not referenced by @@ -198,7 +199,7 @@ decl_simple_lock_data(,vm_object_cached_lock_data) * object structure, be sure to add initialization * (see vm_object_init). */ -vm_object_t vm_object_template; +struct vm_object vm_object_template; /* * vm_object_allocate: @@ -206,17 +207,24 @@ vm_object_t vm_object_template; * Returns a new object with the given size. */ +static void _vm_object_setup( + vm_object_t object, + vm_size_t size) +{ + *object = vm_object_template; + queue_init(&object->memq); + vm_object_lock_init(object); + object->size = size; +} + vm_object_t _vm_object_allocate( vm_size_t size) { register vm_object_t object; - object = (vm_object_t) zalloc(vm_object_zone); + object = (vm_object_t) kmem_cache_alloc(&vm_object_cache); - *object = *vm_object_template; - queue_init(&object->memq); - vm_object_lock_init(object); - object->size = size; + _vm_object_setup(object, size); return object; } @@ -244,10 +252,8 @@ vm_object_t vm_object_allocate( */ void vm_object_bootstrap(void) { - vm_object_zone = zinit((vm_size_t) sizeof(struct vm_object), 0, - round_page(512*1024), - round_page(12*1024), - 0, "objects"); + kmem_cache_init(&vm_object_cache, "vm_object", + sizeof(struct vm_object), 0, NULL, NULL, NULL, 0); queue_init(&vm_object_cached_list); simple_lock_init(&vm_object_cached_lock_data); @@ -256,53 +262,50 @@ void vm_object_bootstrap(void) * Fill in a template object, for quick initialization */ - vm_object_template = (vm_object_t) zalloc(vm_object_zone); - memset(vm_object_template, 0, sizeof *vm_object_template); - - vm_object_template->ref_count = 1; - vm_object_template->size = 0; - vm_object_template->resident_page_count = 0; - vm_object_template->copy = VM_OBJECT_NULL; - vm_object_template->shadow = VM_OBJECT_NULL; - vm_object_template->shadow_offset = (vm_offset_t) 0; + vm_object_template.ref_count = 1; + vm_object_template.size = 0; + vm_object_template.resident_page_count = 0; + vm_object_template.copy = VM_OBJECT_NULL; + vm_object_template.shadow = VM_OBJECT_NULL; + vm_object_template.shadow_offset = (vm_offset_t) 0; - vm_object_template->pager = IP_NULL; - vm_object_template->paging_offset = 0; - vm_object_template->pager_request = PAGER_REQUEST_NULL; - vm_object_template->pager_name = IP_NULL; + vm_object_template.pager = IP_NULL; + vm_object_template.paging_offset = 0; + vm_object_template.pager_request = PAGER_REQUEST_NULL; + vm_object_template.pager_name = IP_NULL; - vm_object_template->pager_created = FALSE; - vm_object_template->pager_initialized = FALSE; - vm_object_template->pager_ready = FALSE; + vm_object_template.pager_created = FALSE; + vm_object_template.pager_initialized = FALSE; + vm_object_template.pager_ready = FALSE; - vm_object_template->copy_strategy = MEMORY_OBJECT_COPY_NONE; + vm_object_template.copy_strategy = MEMORY_OBJECT_COPY_NONE; /* ignored if temporary, will be reset before * permanent object becomes ready */ - vm_object_template->use_shared_copy = FALSE; - vm_object_template->shadowed = FALSE; - - vm_object_template->absent_count = 0; - vm_object_template->all_wanted = 0; /* all bits FALSE */ - - vm_object_template->paging_in_progress = 0; - vm_object_template->can_persist = FALSE; - vm_object_template->internal = TRUE; - vm_object_template->temporary = TRUE; - vm_object_template->alive = TRUE; - vm_object_template->lock_in_progress = FALSE; - vm_object_template->lock_restart = FALSE; - vm_object_template->use_old_pageout = TRUE; /* XXX change later */ - vm_object_template->last_alloc = (vm_offset_t) 0; + vm_object_template.use_shared_copy = FALSE; + vm_object_template.shadowed = FALSE; + + vm_object_template.absent_count = 0; + vm_object_template.all_wanted = 0; /* all bits FALSE */ + + vm_object_template.paging_in_progress = 0; + vm_object_template.can_persist = FALSE; + vm_object_template.internal = TRUE; + vm_object_template.temporary = TRUE; + vm_object_template.alive = TRUE; + vm_object_template.lock_in_progress = FALSE; + vm_object_template.lock_restart = FALSE; + vm_object_template.use_old_pageout = TRUE; /* XXX change later */ + vm_object_template.last_alloc = (vm_offset_t) 0; #if MACH_PAGEMAP - vm_object_template->existence_info = VM_EXTERNAL_NULL; + vm_object_template.existence_info = VM_EXTERNAL_NULL; #endif /* MACH_PAGEMAP */ /* * Initialize the "kernel object" */ - kernel_object = _vm_object_allocate( + _vm_object_setup(kernel_object, VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS); /* @@ -310,7 +313,7 @@ void vm_object_bootstrap(void) * kernel object so that no limit is imposed on submap sizes. */ - vm_submap_object = _vm_object_allocate( + _vm_object_setup(vm_submap_object, VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS); #if MACH_PAGEMAP @@ -660,7 +663,7 @@ void vm_object_terminate( * Free the space for the object. */ - zfree(vm_object_zone, (vm_offset_t) object); + kmem_cache_free(&vm_object_cache, (vm_offset_t) object); } /* @@ -2618,7 +2621,7 @@ void vm_object_collapse( vm_object_unlock(object); if (old_name_port != IP_NULL) ipc_port_dealloc_kernel(old_name_port); - zfree(vm_object_zone, (vm_offset_t) backing_object); + kmem_cache_free(&vm_object_cache, (vm_offset_t) backing_object); vm_object_lock(object); object_collapses++; diff --git a/vm/vm_page.h b/vm/vm_page.h index f13b0af..4536d1c 100644 --- a/vm/vm_page.h +++ b/vm/vm_page.h @@ -41,7 +41,6 @@ #include <vm/vm_types.h> #include <kern/queue.h> #include <kern/lock.h> -#include <kern/zalloc.h> #include <kern/macro_help.h> #include <kern/sched_prim.h> /* definitions of wait/wakeup */ diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c index 7a755bf..77c1cfe 100644 --- a/vm/vm_pageout.c +++ b/vm/vm_pageout.c @@ -43,6 +43,7 @@ #include <mach/vm_statistics.h> #include <kern/counters.h> #include <kern/debug.h> +#include <kern/slab.h> #include <kern/task.h> #include <kern/thread.h> #include <vm/pmap.h> @@ -544,8 +545,8 @@ void vm_pageout_scan() * into an internal object and then immediately double-page it, * sending it to the default pager. * - * consider_zone_gc should be last, because the other operations - * might return memory to zones. When we pause we use + * slab_collect should be last, because the other operations + * might return memory to caches. When we pause we use * vm_pageout_scan_continue as our continuation, so we will * reenter vm_pageout_scan periodically and attempt to reclaim * internal memory even if we never reach vm_page_free_target. @@ -555,7 +556,7 @@ void vm_pageout_scan() net_kmsg_collect(); consider_task_collect(); consider_thread_collect(); - consider_zone_gc(); + slab_collect(); for (burst_count = 0;;) { register vm_page_t m; diff --git a/vm/vm_resident.c b/vm/vm_resident.c index 96354a4..ae71a74 100644 --- a/vm/vm_resident.c +++ b/vm/vm_resident.c @@ -45,7 +45,7 @@ #include <mach/vm_statistics.h> #include <machine/vm_param.h> #include <kern/xpr.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <vm/pmap.h> #include <vm/vm_map.h> #include <vm/vm_page.h> @@ -58,10 +58,6 @@ #include <vm/vm_user.h> #endif -/* in zalloc.c XXX */ -extern vm_offset_t zdata; -extern vm_size_t zdata_size; - /* * Associated with eacn page of user-allocatable memory is a * page structure. @@ -126,7 +122,7 @@ unsigned int vm_page_free_count_minimum; /* debugging */ * These page structures are allocated the way * most other kernel structures are. */ -zone_t vm_page_zone; +struct kmem_cache vm_page_cache; /* * Fictitious pages don't have a physical address, @@ -239,14 +235,11 @@ void vm_page_bootstrap( vm_page_free_wanted = 0; /* - * Steal memory for the zone system. + * Steal memory for the kernel map entries. */ - kentry_data_size = kentry_count * sizeof(struct vm_map_entry); kentry_data = pmap_steal_memory(kentry_data_size); - zdata = pmap_steal_memory(zdata_size); - /* * Allocate (and initialize) the virtual-to-physical * table hash buckets. @@ -430,10 +423,8 @@ void pmap_startup( */ void vm_page_module_init(void) { - vm_page_zone = zinit((vm_size_t) sizeof(struct vm_page), 0, - VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS, - PAGE_SIZE, - 0, "vm pages"); + kmem_cache_init(&vm_page_cache, "vm_page", sizeof(struct vm_page), 0, + NULL, NULL, NULL, 0); } /* @@ -455,7 +446,7 @@ void vm_page_create( for (paddr = round_page(start); paddr < trunc_page(end); paddr += PAGE_SIZE) { - m = (vm_page_t) zalloc(vm_page_zone); + m = (vm_page_t) kmem_cache_alloc(&vm_page_cache); if (m == VM_PAGE_NULL) panic("vm_page_create"); @@ -810,7 +801,7 @@ void vm_page_more_fictitious(void) int i; for (i = 0; i < vm_page_fictitious_quantum; i++) { - m = (vm_page_t) zalloc(vm_page_zone); + m = (vm_page_t) kmem_cache_alloc(&vm_page_cache); if (m == VM_PAGE_NULL) panic("vm_page_more_fictitious"); |