diff options
author | Richard Braun <rbraun@sceen.net> | 2011-12-17 15:24:05 +0000 |
---|---|---|
committer | Richard Braun <rbraun@sceen.net> | 2011-12-17 22:12:40 +0000 |
commit | db2078e4f1802434f791f4f1c333725c42fe172b (patch) | |
tree | 39961f1164f12e1496a84d2f40451dcb78609b7e /vm | |
parent | 7bc54a622e0c57a1085cd2990a1deedc8bd4743d (diff) |
Adjust the kernel to use the slab allocator
* device/dev_lookup.c: Replace zalloc header, types and function calls
with their slab counterparts.
* device/dev_pager.c: Likewise.
* device/ds_routines.c: Likewise.
* device/io_req.h: Likewise.
* device/net_io.c: Likewise.
* i386/i386/fpu.c: Likewise.
* i386/i386/io_perm.c: Likewise.
* i386/i386/machine_task.c: Likewise.
* i386/i386/pcb.c: Likewise.
* i386/i386/task.h: Likewise.
* i386/intel/pmap.c: Likewise.
* i386/intel/pmap.h: Remove #include <kernel/zalloc.h>.
* include/mach_debug/mach_debug.defs (host_zone_info): Replace
routine declaration with skip directive.
(host_slab_info): New routine declaration.
* include/mach_debug/mach_debug_types.defs (zone_name_t)
(zone_name_array_t, zone_info_t, zone_info_array_t): Remove types.
(cache_info_t, cache_info_array_t): New types.
* include/mach_debug/mach_debug_types.h: Replace #include
<mach_debug/zone_info.h> with <mach_debug/slab_info.h>.
* ipc/ipc_entry.c: Replace zalloc header, types and function calls with
their slab counterparts.
* ipc/ipc_entry.h: Likewise.
* ipc/ipc_init.c: Likewise.
* ipc/ipc_marequest.c: Likewise.
* ipc/ipc_object.c: Likewise.
* ipc/ipc_object.h: Likewise.
* ipc/ipc_space.c: Likewise.
* ipc/ipc_space.h: Likewise.
* ipc/ipc_table.c (kalloc_map): Remove extern declaration.
* kern/act.c: Replace zalloc header, types and function calls with their
slab counterparts.
* kern/kalloc.h: Add #include <vm/vm_types.h>.
(MINSIZE): Remove definition.
(kalloc_map): Add extern declaration.
(kget): Remove prototype.
* kern/mach_clock.c: Adjust comment.
* kern/processor.c: Replace zalloc header, types and function calls with
their slab counterparts.
* kern/startup.c: Remove #include <kernel/zalloc.h>.
* kern/task.c: Replace zalloc header, types and function calls with
their slab counterparts.
* kern/thread.c: Likewise.
* vm/memory_object_proxy.c: Likewise.
* vm/vm_external.c: Likewise.
* vm/vm_fault.c: Likewise.
* vm/vm_init.c: Likewise.
* vm/vm_map.c: Likewise.
* vm/vm_object.c: Likewise.
* vm/vm_page.h: Remove #include <kernel/zalloc.h>.
* vm/vm_pageout.c: Replace zalloc header, types and function calls with
their slab counterparts.
* vm/vm_resident.c: Likewise.
(zdata, zdata_size): Remove declarations.
(vm_page_bootstrap): Don't steal memory for the zone system.
Diffstat (limited to 'vm')
-rw-r--r-- | vm/memory_object_proxy.c | 19 | ||||
-rw-r--r-- | vm/vm_external.c | 43 | ||||
-rw-r--r-- | vm/vm_fault.c | 20 | ||||
-rw-r--r-- | vm/vm_init.c | 6 | ||||
-rw-r--r-- | vm/vm_map.c | 105 | ||||
-rw-r--r-- | vm/vm_object.c | 18 | ||||
-rw-r--r-- | vm/vm_page.h | 1 | ||||
-rw-r--r-- | vm/vm_pageout.c | 7 | ||||
-rw-r--r-- | vm/vm_resident.c | 22 |
9 files changed, 105 insertions, 136 deletions
diff --git a/vm/memory_object_proxy.c b/vm/memory_object_proxy.c index fdab6e0..9c0528c 100644 --- a/vm/memory_object_proxy.c +++ b/vm/memory_object_proxy.c @@ -41,15 +41,15 @@ #include <mach/notify.h> #include <mach/vm_prot.h> #include <kern/printf.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <kern/mach_param.h> #include <ipc/ipc_port.h> #include <ipc/ipc_space.h> #include <vm/memory_object_proxy.h> -/* The zone which holds our proxy memory objects. */ -static zone_t memory_object_proxy_zone; +/* The cache which holds our proxy memory objects. */ +static struct kmem_cache memory_object_proxy_cache; struct memory_object_proxy { @@ -64,13 +64,8 @@ typedef struct memory_object_proxy *memory_object_proxy_t; void memory_object_proxy_init (void) { - /* For limit, see PORT_MAX. */ - memory_object_proxy_zone = zinit (sizeof (struct memory_object_proxy), 0, - (TASK_MAX * 3 + THREAD_MAX) - * sizeof (struct memory_object_proxy), - 256 * sizeof (struct memory_object_proxy), - ZONE_EXHAUSTIBLE, - "proxy memory object zone"); + kmem_cache_init (&memory_object_proxy_cache, "memory_object_proxy", + sizeof (struct memory_object_proxy), 0, NULL, NULL, NULL, 0); } /* Lookup a proxy memory object by its port. */ @@ -153,13 +148,13 @@ memory_object_create_proxy (ipc_space_t space, vm_prot_t max_protection, if (start[0] != 0 || len[0] != (vm_offset_t) ~0) return KERN_INVALID_ARGUMENT; - proxy = (memory_object_proxy_t) zalloc (memory_object_proxy_zone); + proxy = (memory_object_proxy_t) kmem_cache_alloc (&memory_object_proxy_cache); /* Allocate port, keeping a reference for it. */ proxy->port = ipc_port_alloc_kernel (); if (proxy->port == IP_NULL) { - zfree (memory_object_proxy_zone, (vm_offset_t) proxy); + kmem_cache_free (&memory_object_proxy_cache, (vm_offset_t) proxy); return KERN_RESOURCE_SHORTAGE; } /* Associate the port with the proxy memory object. */ diff --git a/vm/vm_external.c b/vm/vm_external.c index ac47faa..e9643ff 100644 --- a/vm/vm_external.c +++ b/vm/vm_external.c @@ -31,7 +31,7 @@ */ #include <mach/boolean.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <vm/vm_external.h> #include <mach/vm_param.h> #include <kern/assert.h> @@ -40,7 +40,7 @@ boolean_t vm_external_unsafe = FALSE; -zone_t vm_external_zone = ZONE_NULL; +struct kmem_cache vm_external_cache; /* * The implementation uses bit arrays to record whether @@ -52,8 +52,8 @@ zone_t vm_external_zone = ZONE_NULL; #define SMALL_SIZE (VM_EXTERNAL_SMALL_SIZE/8) #define LARGE_SIZE (VM_EXTERNAL_LARGE_SIZE/8) -zone_t vm_object_small_existence_map_zone; -zone_t vm_object_large_existence_map_zone; +struct kmem_cache vm_object_small_existence_map_cache; +struct kmem_cache vm_object_large_existence_map_cache; vm_external_t vm_external_create(size) @@ -62,20 +62,17 @@ vm_external_t vm_external_create(size) vm_external_t result; vm_size_t bytes; - if (vm_external_zone == ZONE_NULL) - return(VM_EXTERNAL_NULL); - - result = (vm_external_t) zalloc(vm_external_zone); + result = (vm_external_t) kmem_cache_alloc(&vm_external_cache); result->existence_map = (char *) 0; bytes = (atop(size) + 07) >> 3; if (bytes <= SMALL_SIZE) { result->existence_map = - (char *) zalloc(vm_object_small_existence_map_zone); + (char *) kmem_cache_alloc(&vm_object_small_existence_map_cache); result->existence_size = SMALL_SIZE; } else if (bytes <= LARGE_SIZE) { result->existence_map = - (char *) zalloc(vm_object_large_existence_map_zone); + (char *) kmem_cache_alloc(&vm_object_large_existence_map_cache); result->existence_size = LARGE_SIZE; } return(result); @@ -89,14 +86,14 @@ void vm_external_destroy(e) if (e->existence_map != (char *) 0) { if (e->existence_size <= SMALL_SIZE) { - zfree(vm_object_small_existence_map_zone, + kmem_cache_free(&vm_object_small_existence_map_cache, (vm_offset_t) e->existence_map); } else { - zfree(vm_object_large_existence_map_zone, + kmem_cache_free(&vm_object_large_existence_map_cache, (vm_offset_t) e->existence_map); } } - zfree(vm_external_zone, (vm_offset_t) e); + kmem_cache_free(&vm_external_cache, (vm_offset_t) e); } vm_external_state_t _vm_external_state_get(e, offset) @@ -142,18 +139,14 @@ void vm_external_module_initialize(void) { vm_size_t size = (vm_size_t) sizeof(struct vm_external); - vm_external_zone = zinit(size, 0, 16*1024*size, size, - 0, "external page bitmaps"); + kmem_cache_init(&vm_external_cache, "vm_external", size, 0, + NULL, NULL, NULL, 0); - vm_object_small_existence_map_zone = zinit(SMALL_SIZE, 0, - round_page(LARGE_SIZE * SMALL_SIZE), - round_page(SMALL_SIZE), - ZONE_EXHAUSTIBLE, - "object small existence maps"); + kmem_cache_init(&vm_object_small_existence_map_cache, + "small_existence_map", SMALL_SIZE, 0, + NULL, NULL, NULL, 0); - vm_object_large_existence_map_zone = zinit(LARGE_SIZE, 0, - round_page(8 * LARGE_SIZE), - round_page(LARGE_SIZE), - ZONE_EXHAUSTIBLE, - "object large existence maps"); + kmem_cache_init(&vm_object_large_existence_map_cache, + "large_existence_map", LARGE_SIZE, 0, + NULL, NULL, NULL, 0); } diff --git a/vm/vm_fault.c b/vm/vm_fault.c index cce043a..840f038 100644 --- a/vm/vm_fault.c +++ b/vm/vm_fault.c @@ -53,7 +53,7 @@ /* For memory_object_data_{request,unlock} */ #include <kern/mach_param.h> #include <kern/macro_help.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #if MACH_PCSAMPLE #include <kern/pc_sample.h> @@ -85,7 +85,7 @@ typedef struct vm_fault_state { vm_prot_t vmfp_access; } vm_fault_state_t; -zone_t vm_fault_state_zone = 0; +struct kmem_cache vm_fault_state_cache; int vm_object_absent_max = 50; @@ -107,10 +107,8 @@ extern struct db_watchpoint *db_watchpoint_list; */ void vm_fault_init(void) { - vm_fault_state_zone = zinit(sizeof(vm_fault_state_t), 0, - THREAD_MAX * sizeof(vm_fault_state_t), - sizeof(vm_fault_state_t), - 0, "vm fault state"); + kmem_cache_init(&vm_fault_state_cache, "vm_fault_state", + sizeof(vm_fault_state_t), 0, NULL, NULL, NULL, 0); } /* @@ -1206,12 +1204,12 @@ kern_return_t vm_fault(map, vaddr, fault_type, change_wiring, /* * if this assignment stmt is written as - * 'active_threads[cpu_number()] = zalloc()', - * cpu_number may be evaluated before zalloc; - * if zalloc blocks, cpu_number will be wrong + * 'active_threads[cpu_number()] = kmem_cache_alloc()', + * cpu_number may be evaluated before kmem_cache_alloc; + * if kmem_cache_alloc blocks, cpu_number will be wrong */ - state = (char *) zalloc(vm_fault_state_zone); + state = (char *) kmem_cache_alloc(&vm_fault_state_cache); current_thread()->ith_other = state; } @@ -1490,7 +1488,7 @@ kern_return_t vm_fault(map, vaddr, fault_type, change_wiring, register vm_fault_state_t *state = (vm_fault_state_t *) current_thread()->ith_other; - zfree(vm_fault_state_zone, (vm_offset_t) state); + kmem_cache_free(&vm_fault_state_cache, (vm_offset_t) state); (*continuation)(kr); /*NOTREACHED*/ } diff --git a/vm/vm_init.c b/vm/vm_init.c index 33fca65..89eb098 100644 --- a/vm/vm_init.c +++ b/vm/vm_init.c @@ -35,7 +35,7 @@ */ #include <mach/machine/vm_types.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <kern/kalloc.h> #include <vm/vm_fault.h> #include <vm/vm_object.h> @@ -67,12 +67,12 @@ void vm_mem_bootstrap() * Initialize other VM packages */ - zone_bootstrap(); + slab_bootstrap(); vm_object_bootstrap(); vm_map_init(); kmem_init(start, end); pmap_init(); - zone_init(); + slab_init(); kalloc_init(); vm_fault_init(); vm_page_module_init(); diff --git a/vm/vm_map.c b/vm/vm_map.c index ce83403..1cae7db 100644 --- a/vm/vm_map.c +++ b/vm/vm_map.c @@ -41,7 +41,7 @@ #include <mach/vm_param.h> #include <kern/assert.h> #include <kern/debug.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <vm/pmap.h> #include <vm/vm_fault.h> #include <vm/vm_map.h> @@ -70,7 +70,7 @@ void vm_map_copy_page_discard (vm_map_copy_t copy); * map entry to the same memory - the wired count in the new entry * must be set to zero. vm_map_entry_copy_full() creates a new * entry that is identical to the old entry. This preserves the - * wire count; it's used for map splitting and zone changing in + * wire count; it's used for map splitting and cache changing in * vm_map_copyout. */ #define vm_map_entry_copy(NEW,OLD) \ @@ -130,10 +130,10 @@ MACRO_END * vm_object_copy_strategically() in vm_object.c. */ -zone_t vm_map_zone; /* zone for vm_map structures */ -zone_t vm_map_entry_zone; /* zone for vm_map_entry structures */ -zone_t vm_map_kentry_zone; /* zone for kernel entry structures */ -zone_t vm_map_copy_zone; /* zone for vm_map_copy structures */ +struct kmem_cache vm_map_cache; /* cache for vm_map structures */ +struct kmem_cache vm_map_entry_cache; /* cache for vm_map_entry structures */ +struct kmem_cache vm_map_kentry_cache; /* cache for kernel entry structures */ +struct kmem_cache vm_map_copy_cache; /* cache for vm_map_copy structures */ boolean_t vm_map_lookup_entry(); /* forward declaration */ @@ -151,14 +151,14 @@ vm_object_t vm_submap_object; * Initialize the vm_map module. Must be called before * any other vm_map routines. * - * Map and entry structures are allocated from zones -- we must - * initialize those zones. + * Map and entry structures are allocated from caches -- we must + * initialize those caches. * - * There are three zones of interest: + * There are three caches of interest: * - * vm_map_zone: used to allocate maps. - * vm_map_entry_zone: used to allocate map entries. - * vm_map_kentry_zone: used to allocate map entries for the kernel. + * vm_map_cache: used to allocate maps. + * vm_map_entry_cache: used to allocate map entries. + * vm_map_kentry_cache: used to allocate map entries for the kernel. * * The kernel allocates map entries from a special zone that is initially * "crammed" with memory. It would be difficult (perhaps impossible) for @@ -173,23 +173,16 @@ int kentry_count = 256; /* to init kentry_data_size */ void vm_map_init(void) { - vm_map_zone = zinit((vm_size_t) sizeof(struct vm_map), 0, 40*1024, - PAGE_SIZE, 0, "maps"); - vm_map_entry_zone = zinit((vm_size_t) sizeof(struct vm_map_entry), - 0, 1024*1024, PAGE_SIZE*5, - 0, "non-kernel map entries"); - vm_map_kentry_zone = zinit((vm_size_t) sizeof(struct vm_map_entry), 0, - kentry_data_size, kentry_data_size, - ZONE_FIXED /* XXX */, "kernel map entries"); - - vm_map_copy_zone = zinit((vm_size_t) sizeof(struct vm_map_copy), - 0, 16*1024, PAGE_SIZE, 0, - "map copies"); - - /* - * Cram the kentry zone with initial data. - */ - zcram(vm_map_kentry_zone, kentry_data, kentry_data_size); + kmem_cache_init(&vm_map_cache, "vm_map", sizeof(struct vm_map), 0, + NULL, NULL, NULL, 0); + kmem_cache_init(&vm_map_entry_cache, "vm_map_entry", + sizeof(struct vm_map_entry), 0, NULL, NULL, NULL, 0); + kmem_cache_init(&vm_map_kentry_cache, "vm_map_kentry", + sizeof(struct vm_map_entry), 0, NULL, kentry_pagealloc, + NULL, KMEM_CACHE_NOCPUPOOL | KMEM_CACHE_NOOFFSLAB + | KMEM_CACHE_NORECLAIM); + kmem_cache_init(&vm_map_copy_cache, "vm_map_copy", + sizeof(struct vm_map_copy), 0, NULL, NULL, NULL, 0); /* * Submap object is initialized by vm_object_init. @@ -210,7 +203,7 @@ vm_map_t vm_map_create(pmap, min, max, pageable) { register vm_map_t result; - result = (vm_map_t) zalloc(vm_map_zone); + result = (vm_map_t) kmem_cache_alloc(&vm_map_cache); if (result == VM_MAP_NULL) panic("vm_map_create"); @@ -250,15 +243,15 @@ vm_map_t vm_map_create(pmap, min, max, pageable) vm_map_entry_t _vm_map_entry_create(map_header) register struct vm_map_header *map_header; { - register zone_t zone; + register kmem_cache_t cache; register vm_map_entry_t entry; if (map_header->entries_pageable) - zone = vm_map_entry_zone; + cache = &vm_map_entry_cache; else - zone = vm_map_kentry_zone; + cache = &vm_map_kentry_cache; - entry = (vm_map_entry_t) zalloc(zone); + entry = (vm_map_entry_t) kmem_cache_alloc(cache); if (entry == VM_MAP_ENTRY_NULL) panic("vm_map_entry_create"); @@ -280,14 +273,14 @@ void _vm_map_entry_dispose(map_header, entry) register struct vm_map_header *map_header; register vm_map_entry_t entry; { - register zone_t zone; + register kmem_cache_t cache; if (map_header->entries_pageable) - zone = vm_map_entry_zone; + cache = &vm_map_entry_cache; else - zone = vm_map_kentry_zone; + cache = &vm_map_kentry_cache; - zfree(zone, (vm_offset_t) entry); + kmem_cache_free(cache, (vm_offset_t) entry); } /* @@ -368,7 +361,7 @@ void vm_map_deallocate(map) pmap_destroy(map->pmap); - zfree(vm_map_zone, (vm_offset_t) map); + kmem_cache_free(&vm_map_cache, (vm_offset_t) map); } /* @@ -1907,7 +1900,7 @@ free_next_copy: register vm_map_copy_t new_copy; new_copy = (vm_map_copy_t) copy->cpy_cont_args; - zfree(vm_map_copy_zone, (vm_offset_t) copy); + kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy); copy = new_copy; goto free_next_copy; } @@ -1918,7 +1911,7 @@ free_next_copy: break; } - zfree(vm_map_copy_zone, (vm_offset_t) copy); + kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy); } /* @@ -1952,7 +1945,7 @@ vm_map_copy_copy(copy) * from the old one into it. */ - new_copy = (vm_map_copy_t) zalloc(vm_map_copy_zone); + new_copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache); *new_copy = *copy; if (copy->type == VM_MAP_COPY_ENTRY_LIST) { @@ -2160,7 +2153,7 @@ start_pass_1: /* * XXXO If there are no permanent objects in the destination, - * XXXO and the source and destination map entry zones match, + * XXXO and the source and destination map entry caches match, * XXXO and the destination map entry is not shared, * XXXO then the map entries can be deleted and replaced * XXXO with those from the copy. The following code is the @@ -2403,7 +2396,7 @@ start_pass_1: ((where)->vme_next = vm_map_copy_first_entry(copy)) \ ->vme_prev = (where); \ (map)->hdr.nentries += (copy)->cpy_hdr.nentries; \ - zfree(vm_map_copy_zone, (vm_offset_t) copy); \ + kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy); \ MACRO_END /* @@ -2459,7 +2452,7 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy) VM_INHERIT_DEFAULT); if (kr != KERN_SUCCESS) return(kr); - zfree(vm_map_copy_zone, (vm_offset_t) copy); + kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy); return(KERN_SUCCESS); } @@ -2516,15 +2509,15 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy) * Mismatches occur when dealing with the default * pager. */ - zone_t old_zone; + kmem_cache_t old_cache; vm_map_entry_t next, new; /* - * Find the zone that the copies were allocated from + * Find the cache that the copies were allocated from */ - old_zone = (copy->cpy_hdr.entries_pageable) - ? vm_map_entry_zone - : vm_map_kentry_zone; + old_cache = (copy->cpy_hdr.entries_pageable) + ? &vm_map_entry_cache + : &vm_map_kentry_cache; entry = vm_map_copy_first_entry(copy); /* @@ -2547,7 +2540,7 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy) vm_map_copy_last_entry(copy), new); next = entry->vme_next; - zfree(old_zone, (vm_offset_t) entry); + kmem_cache_free(old_cache, (vm_offset_t) entry); entry = next; } } @@ -3036,10 +3029,10 @@ error: * Consume on success logic. */ if (copy != orig_copy) { - zfree(vm_map_copy_zone, (vm_offset_t) copy); + kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy); } if (result == KERN_SUCCESS) { - zfree(vm_map_copy_zone, (vm_offset_t) orig_copy); + kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) orig_copy); } return(result); @@ -3116,7 +3109,7 @@ kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) * remember the endpoints prior to rounding. */ - copy = (vm_map_copy_t) zalloc(vm_map_copy_zone); + copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache); vm_map_copy_first_entry(copy) = vm_map_copy_last_entry(copy) = vm_map_copy_to_entry(copy); copy->type = VM_MAP_COPY_ENTRY_LIST; @@ -3443,7 +3436,7 @@ kern_return_t vm_map_copyin_object(object, offset, size, copy_result) * and null links. */ - copy = (vm_map_copy_t) zalloc(vm_map_copy_zone); + copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache); vm_map_copy_first_entry(copy) = vm_map_copy_last_entry(copy) = VM_MAP_ENTRY_NULL; copy->type = VM_MAP_COPY_OBJECT; @@ -3598,7 +3591,7 @@ kern_return_t vm_map_copyin_page_list(src_map, src_addr, len, src_destroy, * be page-aligned. */ - copy = (vm_map_copy_t) zalloc(vm_map_copy_zone); + copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache); copy->type = VM_MAP_COPY_PAGE_LIST; copy->cpy_npages = 0; copy->offset = src_addr; diff --git a/vm/vm_object.c b/vm/vm_object.c index 9057973..1c6e431 100644 --- a/vm/vm_object.c +++ b/vm/vm_object.c @@ -47,7 +47,7 @@ #include <kern/lock.h> #include <kern/queue.h> #include <kern/xpr.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <vm/memory_object.h> #include <vm/vm_fault.h> #include <vm/vm_map.h> @@ -141,7 +141,7 @@ void vm_object_deactivate_pages(vm_object_t); * ZZZ Continue this comment. */ -zone_t vm_object_zone; /* vm backing store zone */ +struct kmem_cache vm_object_cache; /* vm backing store cache */ /* * All wired-down kernel memory belongs to a single virtual @@ -211,7 +211,7 @@ vm_object_t _vm_object_allocate( { register vm_object_t object; - object = (vm_object_t) zalloc(vm_object_zone); + object = (vm_object_t) kmem_cache_alloc(&vm_object_cache); *object = *vm_object_template; queue_init(&object->memq); @@ -244,10 +244,8 @@ vm_object_t vm_object_allocate( */ void vm_object_bootstrap(void) { - vm_object_zone = zinit((vm_size_t) sizeof(struct vm_object), 0, - round_page(512*1024), - round_page(12*1024), - 0, "objects"); + kmem_cache_init(&vm_object_cache, "vm_object", + sizeof(struct vm_object), 0, NULL, NULL, NULL, 0); queue_init(&vm_object_cached_list); simple_lock_init(&vm_object_cached_lock_data); @@ -256,7 +254,7 @@ void vm_object_bootstrap(void) * Fill in a template object, for quick initialization */ - vm_object_template = (vm_object_t) zalloc(vm_object_zone); + vm_object_template = (vm_object_t) kmem_cache_alloc(&vm_object_cache); memset(vm_object_template, 0, sizeof *vm_object_template); vm_object_template->ref_count = 1; @@ -660,7 +658,7 @@ void vm_object_terminate( * Free the space for the object. */ - zfree(vm_object_zone, (vm_offset_t) object); + kmem_cache_free(&vm_object_cache, (vm_offset_t) object); } /* @@ -2618,7 +2616,7 @@ void vm_object_collapse( vm_object_unlock(object); if (old_name_port != IP_NULL) ipc_port_dealloc_kernel(old_name_port); - zfree(vm_object_zone, (vm_offset_t) backing_object); + kmem_cache_free(&vm_object_cache, (vm_offset_t) backing_object); vm_object_lock(object); object_collapses++; diff --git a/vm/vm_page.h b/vm/vm_page.h index f13b0af..4536d1c 100644 --- a/vm/vm_page.h +++ b/vm/vm_page.h @@ -41,7 +41,6 @@ #include <vm/vm_types.h> #include <kern/queue.h> #include <kern/lock.h> -#include <kern/zalloc.h> #include <kern/macro_help.h> #include <kern/sched_prim.h> /* definitions of wait/wakeup */ diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c index 7a755bf..77c1cfe 100644 --- a/vm/vm_pageout.c +++ b/vm/vm_pageout.c @@ -43,6 +43,7 @@ #include <mach/vm_statistics.h> #include <kern/counters.h> #include <kern/debug.h> +#include <kern/slab.h> #include <kern/task.h> #include <kern/thread.h> #include <vm/pmap.h> @@ -544,8 +545,8 @@ void vm_pageout_scan() * into an internal object and then immediately double-page it, * sending it to the default pager. * - * consider_zone_gc should be last, because the other operations - * might return memory to zones. When we pause we use + * slab_collect should be last, because the other operations + * might return memory to caches. When we pause we use * vm_pageout_scan_continue as our continuation, so we will * reenter vm_pageout_scan periodically and attempt to reclaim * internal memory even if we never reach vm_page_free_target. @@ -555,7 +556,7 @@ void vm_pageout_scan() net_kmsg_collect(); consider_task_collect(); consider_thread_collect(); - consider_zone_gc(); + slab_collect(); for (burst_count = 0;;) { register vm_page_t m; diff --git a/vm/vm_resident.c b/vm/vm_resident.c index 96354a4..5a63ad4 100644 --- a/vm/vm_resident.c +++ b/vm/vm_resident.c @@ -45,7 +45,7 @@ #include <mach/vm_statistics.h> #include <machine/vm_param.h> #include <kern/xpr.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <vm/pmap.h> #include <vm/vm_map.h> #include <vm/vm_page.h> @@ -58,10 +58,6 @@ #include <vm/vm_user.h> #endif -/* in zalloc.c XXX */ -extern vm_offset_t zdata; -extern vm_size_t zdata_size; - /* * Associated with eacn page of user-allocatable memory is a * page structure. @@ -126,7 +122,7 @@ unsigned int vm_page_free_count_minimum; /* debugging */ * These page structures are allocated the way * most other kernel structures are. */ -zone_t vm_page_zone; +struct kmem_cache vm_page_cache; /* * Fictitious pages don't have a physical address, @@ -239,14 +235,12 @@ void vm_page_bootstrap( vm_page_free_wanted = 0; /* - * Steal memory for the zone system. + * Steal memory for the kernel map entries. */ kentry_data_size = kentry_count * sizeof(struct vm_map_entry); kentry_data = pmap_steal_memory(kentry_data_size); - zdata = pmap_steal_memory(zdata_size); - /* * Allocate (and initialize) the virtual-to-physical * table hash buckets. @@ -430,10 +424,8 @@ void pmap_startup( */ void vm_page_module_init(void) { - vm_page_zone = zinit((vm_size_t) sizeof(struct vm_page), 0, - VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS, - PAGE_SIZE, - 0, "vm pages"); + kmem_cache_init(&vm_page_cache, "vm_page", sizeof(struct vm_page), 0, + NULL, NULL, NULL, 0); } /* @@ -455,7 +447,7 @@ void vm_page_create( for (paddr = round_page(start); paddr < trunc_page(end); paddr += PAGE_SIZE) { - m = (vm_page_t) zalloc(vm_page_zone); + m = (vm_page_t) kmem_cache_alloc(&vm_page_cache); if (m == VM_PAGE_NULL) panic("vm_page_create"); @@ -810,7 +802,7 @@ void vm_page_more_fictitious(void) int i; for (i = 0; i < vm_page_fictitious_quantum; i++) { - m = (vm_page_t) zalloc(vm_page_zone); + m = (vm_page_t) kmem_cache_alloc(&vm_page_cache); if (m == VM_PAGE_NULL) panic("vm_page_more_fictitious"); |