diff options
40 files changed, 281 insertions, 372 deletions
diff --git a/device/dev_lookup.c b/device/dev_lookup.c index 2391e8d..98a2d02 100644 --- a/device/dev_lookup.c +++ b/device/dev_lookup.c @@ -32,7 +32,7 @@ #include <mach/vm_param.h> #include <kern/queue.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <device/device_types.h> #include <device/dev_hdr.h> @@ -62,7 +62,7 @@ queue_head_t dev_number_hash_table[NDEVHASH]; decl_simple_lock_data(, dev_number_lock) -zone_t dev_hdr_zone; +struct kmem_cache dev_hdr_cache; /* * Enter device in the number lookup table. @@ -151,7 +151,7 @@ device_lookup(name) simple_unlock(&dev_number_lock); - new_device = (mach_device_t) zalloc(dev_hdr_zone); + new_device = (mach_device_t) kmem_cache_alloc(&dev_hdr_cache); simple_lock_init(&new_device->ref_lock); new_device->ref_count = 1; simple_lock_init(&new_device->lock); @@ -187,7 +187,7 @@ device_lookup(name) simple_unlock(&dev_number_lock); if (new_device != MACH_DEVICE_NULL) - zfree(dev_hdr_zone, (vm_offset_t)new_device); + kmem_cache_free(&dev_hdr_cache, (vm_offset_t)new_device); } return (device); @@ -233,7 +233,7 @@ mach_device_deallocate(device) simple_unlock(&device->ref_lock); simple_unlock(&dev_number_lock); - zfree(dev_hdr_zone, (vm_offset_t)device); + kmem_cache_free(&dev_hdr_cache, (vm_offset_t)device); } /* @@ -376,9 +376,6 @@ dev_lookup_init() simple_lock_init(&dev_port_lock); - dev_hdr_zone = zinit(sizeof(struct mach_device), 0, - sizeof(struct mach_device) * NDEVICES, - PAGE_SIZE, - FALSE, - "open device entry"); + kmem_cache_init(&dev_hdr_cache, "mach_device", + sizeof(struct mach_device), 0, NULL, NULL, NULL, 0); } diff --git a/device/dev_pager.c b/device/dev_pager.c index 447781e..dc5ba73 100644 --- a/device/dev_pager.c +++ b/device/dev_pager.c @@ -44,7 +44,7 @@ #include <kern/debug.h> #include <kern/printf.h> #include <kern/queue.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <kern/kalloc.h> #include <vm/vm_page.h> @@ -126,7 +126,7 @@ typedef struct dev_pager *dev_pager_t; #define DEV_PAGER_NULL ((dev_pager_t)0) -zone_t dev_pager_zone; +struct kmem_cache dev_pager_cache; void dev_pager_reference(register dev_pager_t ds) { @@ -144,7 +144,7 @@ void dev_pager_deallocate(register dev_pager_t ds) } simple_unlock(&ds->lock); - zfree(dev_pager_zone, (vm_offset_t)ds); + kmem_cache_free(&dev_pager_cache, (vm_offset_t)ds); } /* @@ -161,7 +161,7 @@ struct dev_pager_entry { typedef struct dev_pager_entry *dev_pager_entry_t; queue_head_t dev_pager_hashtable[DEV_PAGER_HASH_COUNT]; -zone_t dev_pager_hash_zone; +struct kmem_cache dev_pager_hash_cache; decl_simple_lock_data(, dev_pager_hash_lock) @@ -174,13 +174,8 @@ void dev_pager_hash_init(void) register vm_size_t size; size = sizeof(struct dev_pager_entry); - dev_pager_hash_zone = zinit( - size, - 0, - size * 1000, - PAGE_SIZE, - FALSE, - "dev_pager port hash"); + kmem_cache_init(&dev_pager_hash_cache, "dev_pager_entry", size, 0, + NULL, NULL, NULL, 0); for (i = 0; i < DEV_PAGER_HASH_COUNT; i++) queue_init(&dev_pager_hashtable[i]); simple_lock_init(&dev_pager_hash_lock); @@ -192,7 +187,7 @@ void dev_pager_hash_insert( { register dev_pager_entry_t new_entry; - new_entry = (dev_pager_entry_t) zalloc(dev_pager_hash_zone); + new_entry = (dev_pager_entry_t) kmem_cache_alloc(&dev_pager_hash_cache); new_entry->name = name_port; new_entry->pager_rec = rec; @@ -220,7 +215,7 @@ void dev_pager_hash_delete(ipc_port_t name_port) } simple_unlock(&dev_pager_hash_lock); if (entry) - zfree(dev_pager_hash_zone, (vm_offset_t)entry); + kmem_cache_free(&dev_pager_hash_cache, (vm_offset_t)entry); } dev_pager_t dev_pager_hash_lookup(ipc_port_t name_port) @@ -273,7 +268,7 @@ kern_return_t device_pager_setup( return (D_SUCCESS); } - d = (dev_pager_t) zalloc(dev_pager_zone); + d = (dev_pager_t) kmem_cache_alloc(&dev_pager_cache); if (d == DEV_PAGER_NULL) return (KERN_RESOURCE_SHORTAGE); @@ -726,15 +721,11 @@ void device_pager_init(void) register vm_size_t size; /* - * Initialize zone of paging structures. + * Initialize cache of paging structures. */ size = sizeof(struct dev_pager); - dev_pager_zone = zinit(size, - 0, - (vm_size_t) size * 1000, - PAGE_SIZE, - FALSE, - "device pager structures"); + kmem_cache_init(&dev_pager_cache, "dev_pager", size, 0, + NULL, NULL, NULL, 0); /* * Initialize the name port hashing stuff. diff --git a/device/ds_routines.c b/device/ds_routines.c index d4a08fb..f0f8c59 100644 --- a/device/ds_routines.c +++ b/device/ds_routines.c @@ -73,7 +73,7 @@ #include <kern/debug.h> #include <kern/printf.h> #include <kern/queue.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <kern/thread.h> #include <kern/task.h> #include <kern/sched_prim.h> @@ -855,7 +855,7 @@ device_write_get(ior, wait) */ if (ior->io_op & IO_INBAND) { assert(ior->io_count <= sizeof (io_buf_ptr_inband_t)); - new_addr = zalloc(io_inband_zone); + new_addr = kmem_cache_alloc(&io_inband_cache); memcpy((void*)new_addr, ior->io_data, ior->io_count); ior->io_data = (io_buf_ptr_t)new_addr; ior->io_alloc_size = sizeof (io_buf_ptr_inband_t); @@ -935,7 +935,7 @@ device_write_dealloc(ior) * Inband case. */ if (ior->io_op & IO_INBAND) { - zfree(io_inband_zone, (vm_offset_t)ior->io_data); + kmem_cache_free(&io_inband_cache, (vm_offset_t)ior->io_data); return (TRUE); } @@ -1245,7 +1245,7 @@ kern_return_t device_read_alloc(ior, size) return (KERN_SUCCESS); if (ior->io_op & IO_INBAND) { - ior->io_data = (io_buf_ptr_t) zalloc(io_inband_zone); + ior->io_data = (io_buf_ptr_t) kmem_cache_alloc(&io_inband_cache); ior->io_alloc_size = sizeof(io_buf_ptr_inband_t); } else { size = round_page(size); @@ -1338,7 +1338,7 @@ boolean_t ds_read_done(ior) if (ior->io_count != 0) { if (ior->io_op & IO_INBAND) { if (ior->io_alloc_size > 0) - zfree(io_inband_zone, (vm_offset_t)ior->io_data); + kmem_cache_free(&io_inband_cache, (vm_offset_t)ior->io_data); } else { register vm_offset_t end_alloc; @@ -1575,11 +1575,8 @@ void mach_device_init() */ device_io_map->wait_for_space = TRUE; - io_inband_zone = zinit(sizeof(io_buf_ptr_inband_t), 0, - 1000 * sizeof(io_buf_ptr_inband_t), - 10 * sizeof(io_buf_ptr_inband_t), - FALSE, - "io inband read buffers"); + kmem_cache_init(&io_inband_cache, "io_buf_ptr_inband", + sizeof(io_buf_ptr_inband_t), 0, NULL, NULL, NULL, 0); mach_device_trap_init(); } @@ -1615,7 +1612,7 @@ void iowait(ior) */ #define IOTRAP_REQSIZE 2048 -zone_t io_trap_zone; +struct kmem_cache io_trap_cache; /* * Initialization. Called from mach_device_init(). @@ -1623,24 +1620,21 @@ zone_t io_trap_zone; static void mach_device_trap_init(void) { - io_trap_zone = zinit(IOTRAP_REQSIZE, 0, - 256 * IOTRAP_REQSIZE, - 16 * IOTRAP_REQSIZE, - FALSE, - "wired device trap buffers"); + kmem_cache_init(&io_trap_cache, "io_req", IOTRAP_REQSIZE, 0, + NULL, NULL, NULL, 0); } /* * Allocate an io_req_t. - * Currently zalloc's from io_trap_zone. + * Currently allocates from io_trap_cache. * - * Could have lists of different size zones. + * Could have lists of different size caches. * Could call a device-specific routine. */ io_req_t ds_trap_req_alloc(mach_device_t device, vm_size_t data_size) { - return (io_req_t) zalloc(io_trap_zone); + return (io_req_t) kmem_cache_alloc(&io_trap_cache); } /* @@ -1656,7 +1650,7 @@ ds_trap_write_done(io_req_t ior) /* * Should look at reply port and maybe send a message. */ - zfree(io_trap_zone, (vm_offset_t) ior); + kmem_cache_free(&io_trap_cache, (vm_offset_t) ior); /* * Give up device reference from ds_write_trap. @@ -1732,7 +1726,7 @@ device_write_trap (mach_device_t device, dev_mode_t mode, */ mach_device_deallocate(device); - zfree(io_trap_zone, (vm_offset_t) ior); + kmem_cache_free(&io_trap_cache, (vm_offset_t) ior); return (result); } @@ -1823,7 +1817,7 @@ device_writev_trap (mach_device_t device, dev_mode_t mode, */ mach_device_deallocate(device); - zfree(io_trap_zone, (vm_offset_t) ior); + kmem_cache_free(&io_trap_cache, (vm_offset_t) ior); return (result); } diff --git a/device/io_req.h b/device/io_req.h index 162524d..65e23e6 100644 --- a/device/io_req.h +++ b/device/io_req.h @@ -35,6 +35,7 @@ #include <mach/port.h> #include <mach/message.h> #include <mach/vm_param.h> +#include <kern/slab.h> #include <kern/kalloc.h> #include <kern/lock.h> #include <vm/vm_page.h> @@ -124,7 +125,7 @@ struct io_req { void iodone(io_req_t); /* - * Macros to allocate and free IORs - will convert to zones later. + * Macros to allocate and free IORs - will convert to caches later. */ #define io_req_alloc(ior,size) \ MACRO_BEGIN \ @@ -136,6 +137,6 @@ void iodone(io_req_t); (kfree((vm_offset_t)(ior), sizeof(struct io_req))) -zone_t io_inband_zone; /* for inband reads */ +struct kmem_cache io_inband_cache; /* for inband reads */ #endif /* _IO_REQ_ */ diff --git a/device/net_io.c b/device/net_io.c index 8446395..52a0716 100644 --- a/device/net_io.c +++ b/device/net_io.c @@ -61,6 +61,7 @@ #include <kern/printf.h> #include <kern/queue.h> #include <kern/sched_prim.h> +#include <kern/slab.h> #include <kern/thread.h> #include <machine/machspl.h> @@ -302,7 +303,7 @@ struct net_rcv_port { }; typedef struct net_rcv_port *net_rcv_port_t; -zone_t net_rcv_zone; /* zone of net_rcv_port structs */ +struct kmem_cache net_rcv_cache; /* cache of net_rcv_port structs */ #define NET_HASH_SIZE 256 @@ -324,7 +325,7 @@ struct net_hash_entry { }; typedef struct net_hash_entry *net_hash_entry_t; -zone_t net_hash_entry_zone; +struct kmem_cache net_hash_entry_cache; /* * This structure represents a packet filter with multiple sessions. @@ -1195,7 +1196,7 @@ net_set_filter(ifp, rcv_port, priority, filter, filter_count) * If there is no match instruction, we allocate * a normal packet filter structure. */ - my_infp = (net_rcv_port_t) zalloc(net_rcv_zone); + my_infp = (net_rcv_port_t) kmem_cache_alloc(&net_rcv_cache); my_infp->rcv_port = rcv_port; is_new_infp = TRUE; } else { @@ -1205,7 +1206,7 @@ net_set_filter(ifp, rcv_port, priority, filter, filter_count) * a hash table to deal with them. */ my_infp = 0; - hash_entp = (net_hash_entry_t) zalloc(net_hash_entry_zone); + hash_entp = (net_hash_entry_t) kmem_cache_alloc(&net_hash_entry_cache); is_new_infp = FALSE; } @@ -1310,7 +1311,8 @@ net_set_filter(ifp, rcv_port, priority, filter, filter_count) ipc_port_release_send(rcv_port); if (match != 0) - zfree (net_hash_entry_zone, (vm_offset_t)hash_entp); + kmem_cache_free(&net_hash_entry_cache, + (vm_offset_t)hash_entp); rval = D_NO_MEMORY; goto clean_and_return; @@ -1526,20 +1528,12 @@ net_io_init() register vm_size_t size; size = sizeof(struct net_rcv_port); - net_rcv_zone = zinit(size, - 0, - size * 1000, - PAGE_SIZE, - FALSE, - "net_rcv_port"); + kmem_cache_init(&net_rcv_cache, "net_rcv_port", size, 0, + NULL, NULL, NULL, 0); size = sizeof(struct net_hash_entry); - net_hash_entry_zone = zinit(size, - 0, - size * 100, - PAGE_SIZE, - FALSE, - "net_hash_entry"); + kmem_cache_init(&net_hash_entry_cache, "net_hash_entry", size, 0, + NULL, NULL, NULL, 0); size = ikm_plus_overhead(sizeof(struct net_rcv_msg)); net_kmsg_size = round_page(size); @@ -2167,7 +2161,7 @@ net_free_dead_infp (dead_infp) nextfp = (net_rcv_port_t) queue_next(&infp->input); ipc_port_release_send(infp->rcv_port); net_del_q_info(infp->rcv_qlimit); - zfree(net_rcv_zone, (vm_offset_t) infp); + kmem_cache_free(&net_rcv_cache, (vm_offset_t) infp); } } @@ -2190,7 +2184,7 @@ net_free_dead_entp (dead_entp) ipc_port_release_send(entp->rcv_port); net_del_q_info(entp->rcv_qlimit); - zfree(net_hash_entry_zone, (vm_offset_t) entp); + kmem_cache_free(&net_hash_entry_cache, (vm_offset_t) entp); } } diff --git a/i386/i386/fpu.c b/i386/i386/fpu.c index 2626a38..75bf655 100644 --- a/i386/i386/fpu.c +++ b/i386/i386/fpu.c @@ -47,7 +47,7 @@ #include <kern/mach_param.h> #include <kern/printf.h> #include <kern/thread.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <i386/thread.h> #include <i386/fpu.h> @@ -72,7 +72,7 @@ extern void i386_exception(); int fp_kind = FP_387; /* 80387 present */ -zone_t ifps_zone; /* zone for FPU save area */ +struct kmem_cache ifps_cache; /* cache for FPU save area */ static unsigned long mxcsr_feature_mask = 0xffffffff; /* Always AND user-provided mxcsr with this security mask */ void fp_save(thread_t thread); @@ -193,10 +193,9 @@ init_fpu() void fpu_module_init() { - ifps_zone = zinit(sizeof(struct i386_fpsave_state), 16, - THREAD_MAX * sizeof(struct i386_fpsave_state), - THREAD_CHUNK * sizeof(struct i386_fpsave_state), - 0, "i386 fpsave state"); + kmem_cache_init(&ifps_cache, "i386_fpsave_state", + sizeof(struct i386_fpsave_state), 16, + NULL, NULL, NULL, 0); } /* @@ -221,7 +220,7 @@ ASSERT_IPL(SPL0); clear_fpu(); } #endif /* NCPUS == 1 */ - zfree(ifps_zone, (vm_offset_t) fps); + kmem_cache_free(&ifps_cache, (vm_offset_t) fps); } /* The two following functions were stolen from Linux's i387.c */ @@ -335,7 +334,7 @@ ASSERT_IPL(SPL0); simple_unlock(&pcb->lock); if (ifps != 0) { - zfree(ifps_zone, (vm_offset_t) ifps); + kmem_cache_free(&ifps_cache, (vm_offset_t) ifps); } } else { @@ -356,7 +355,7 @@ ASSERT_IPL(SPL0); if (ifps == 0) { if (new_ifps == 0) { simple_unlock(&pcb->lock); - new_ifps = (struct i386_fpsave_state *) zalloc(ifps_zone); + new_ifps = (struct i386_fpsave_state *) kmem_cache_alloc(&ifps_cache); goto Retry; } ifps = new_ifps; @@ -396,7 +395,7 @@ ASSERT_IPL(SPL0); simple_unlock(&pcb->lock); if (new_ifps != 0) - zfree(ifps_zone, (vm_offset_t) new_ifps); + kmem_cache_free(&ifps_cache, (vm_offset_t) new_ifps); } return KERN_SUCCESS; @@ -609,7 +608,7 @@ fpextovrflt() clear_fpu(); if (ifps) - zfree(ifps_zone, (vm_offset_t) ifps); + kmem_cache_free(&ifps_cache, (vm_offset_t) ifps); /* * Raise exception. @@ -785,7 +784,7 @@ fp_load(thread) ASSERT_IPL(SPL0); ifps = pcb->ims.ifps; if (ifps == 0) { - ifps = (struct i386_fpsave_state *) zalloc(ifps_zone); + ifps = (struct i386_fpsave_state *) kmem_cache_alloc(&ifps_cache); memset(ifps, 0, sizeof *ifps); pcb->ims.ifps = ifps; fpinit(); @@ -836,7 +835,7 @@ fp_state_alloc() pcb_t pcb = current_thread()->pcb; struct i386_fpsave_state *ifps; - ifps = (struct i386_fpsave_state *)zalloc(ifps_zone); + ifps = (struct i386_fpsave_state *)kmem_cache_alloc(&ifps_cache); memset(ifps, 0, sizeof *ifps); pcb->ims.ifps = ifps; diff --git a/i386/i386/io_perm.c b/i386/i386/io_perm.c index df25cc6..7dcb858 100644 --- a/i386/i386/io_perm.c +++ b/i386/i386/io_perm.c @@ -54,7 +54,7 @@ #include <ipc/ipc_port.h> #include <ipc/ipc_space.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <kern/lock.h> #include <kern/queue.h> #include <kern/thread.h> @@ -257,12 +257,12 @@ i386_io_perm_modify (task_t target_task, io_perm_t io_perm, boolean_t enable) if (!iopb) { simple_unlock (&target_task->machine.iopb_lock); - iopb = (unsigned char *) zalloc (machine_task_iopb_zone); + iopb = (unsigned char *) kmem_cache_alloc (&machine_task_iopb_cache); simple_lock (&target_task->machine.iopb_lock); if (target_task->machine.iopb) { if (iopb) - zfree (machine_task_iopb_zone, (vm_offset_t) iopb); + kmem_cache_free (&machine_task_iopb_cache, (vm_offset_t) iopb); iopb = target_task->machine.iopb; iopb_size = target_task->machine.iopb_size; } diff --git a/i386/i386/machine_task.c b/i386/i386/machine_task.c index 35b89e0..689bf04 100644 --- a/i386/i386/machine_task.c +++ b/i386/i386/machine_task.c @@ -22,15 +22,15 @@ #include <kern/lock.h> #include <mach/mach_types.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <kern/mach_param.h> #include <machine/task.h> #include <machine/io_perm.h> -/* The zone which holds our IO permission bitmaps. */ -zone_t machine_task_iopb_zone; +/* The cache which holds our IO permission bitmaps. */ +struct kmem_cache machine_task_iopb_cache; /* Initialize the machine task module. The function is called once at @@ -38,11 +38,8 @@ zone_t machine_task_iopb_zone; void machine_task_module_init (void) { - machine_task_iopb_zone = zinit (IOPB_BYTES, 0, - TASK_MAX * IOPB_BYTES, - IOPB_BYTES, - ZONE_COLLECTABLE | ZONE_EXHAUSTIBLE, - "i386 machine task iopb"); + kmem_cache_init (&machine_task_iopb_cache, "i386_task_iopb", IOPB_BYTES, 0, + NULL, NULL, NULL, 0); } @@ -62,7 +59,8 @@ void machine_task_terminate (task_t task) { if (task->machine.iopb) - zfree (machine_task_iopb_zone, (vm_offset_t) task->machine.iopb); + kmem_cache_free (&machine_task_iopb_cache, + (vm_offset_t) task->machine.iopb); } @@ -74,7 +72,8 @@ machine_task_collect (task_t task) simple_lock (&task->machine.iopb_lock); if (task->machine.iopb_size == 0 && task->machine.iopb) { - zfree (machine_task_iopb_zone, (vm_offset_t) task->machine.iopb); + kmem_cache_free (&machine_task_iopb_cache, + (vm_offset_t) task->machine.iopb); task->machine.iopb = 0; } simple_unlock (&task->machine.iopb_lock); diff --git a/i386/i386/pcb.c b/i386/i386/pcb.c index fffa92a..11ef5e7 100644 --- a/i386/i386/pcb.c +++ b/i386/i386/pcb.c @@ -39,6 +39,7 @@ #include <kern/mach_param.h> #include <kern/thread.h> #include <kern/sched_prim.h> +#include <kern/slab.h> #include <vm/vm_kern.h> #include <vm/pmap.h> @@ -65,7 +66,7 @@ extern void Thread_continue(); extern void user_ldt_free(); -zone_t pcb_zone; +struct kmem_cache pcb_cache; vm_offset_t kernel_stack[NCPUS]; /* top of active_stack */ @@ -369,10 +370,8 @@ thread_t switch_context(old, continuation, new) void pcb_module_init() { - pcb_zone = zinit(sizeof(struct pcb), 0, - THREAD_MAX * sizeof(struct pcb), - THREAD_CHUNK * sizeof(struct pcb), - 0, "i386 pcb state"); + kmem_cache_init(&pcb_cache, "pcb", sizeof(struct pcb), 0, + NULL, NULL, NULL, 0); fpu_module_init(); } @@ -382,7 +381,7 @@ void pcb_init(thread) { register pcb_t pcb; - pcb = (pcb_t) zalloc(pcb_zone); + pcb = (pcb_t) kmem_cache_alloc(&pcb_cache); if (pcb == 0) panic("pcb_init"); @@ -422,7 +421,7 @@ void pcb_terminate(thread) fp_free(pcb->ims.ifps); if (pcb->ims.ldt != 0) user_ldt_free(pcb->ims.ldt); - zfree(pcb_zone, (vm_offset_t) pcb); + kmem_cache_free(&pcb_cache, (vm_offset_t) pcb); thread->pcb = 0; } diff --git a/i386/i386/task.h b/i386/i386/task.h index ca8de04..0060ad4 100644 --- a/i386/i386/task.h +++ b/i386/i386/task.h @@ -24,7 +24,7 @@ #define _I386_TASK_H_ #include <kern/kern_types.h> -#include <kern/zalloc.h> +#include <kern/slab.h> /* The machine specific data of a task. */ struct machine_task @@ -41,7 +41,7 @@ struct machine_task typedef struct machine_task machine_task_t; -extern zone_t machine_task_iopb_zone; +extern struct kmem_cache machine_task_iopb_cache; /* Initialize the machine task module. The function is called once at start up by task_init in kern/task.c. */ diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c index 9f34f2d..94d2e9c 100644 --- a/i386/intel/pmap.c +++ b/i386/intel/pmap.c @@ -63,7 +63,7 @@ #include <kern/debug.h> #include <kern/printf.h> #include <kern/thread.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <kern/lock.h> @@ -113,7 +113,7 @@ pv_entry_t pv_head_table; /* array of entries, one per page */ /* * pv_list entries are kept on a list that can only be accessed * with the pmap system locked (at SPLVM, not in the cpus_active set). - * The list is refilled from the pv_list_zone if it becomes empty. + * The list is refilled from the pv_list_cache if it becomes empty. */ pv_entry_t pv_free_list; /* free list at SPLVM */ decl_simple_lock_data(, pv_free_list_lock) @@ -133,7 +133,7 @@ decl_simple_lock_data(, pv_free_list_lock) simple_unlock(&pv_free_list_lock); \ } -zone_t pv_list_zone; /* zone of pv_entry structures */ +struct kmem_cache pv_list_cache; /* cache of pv_entry structures */ /* * Each entry in the pv_head_table is locked by a bit in the @@ -400,7 +400,7 @@ struct pmap_update_list cpu_update_list[NCPUS]; struct pmap kernel_pmap_store; pmap_t kernel_pmap; -struct zone *pmap_zone; /* zone of pmap structures */ +struct kmem_cache pmap_cache; /* cache of pmap structures */ int pmap_debug = 0; /* flag for debugging prints */ @@ -937,13 +937,13 @@ void pmap_init() pmap_phys_attributes = (char *) addr; /* - * Create the zone of physical maps, + * Create the cache of physical maps, * and of the physical-to-virtual entries. */ s = (vm_size_t) sizeof(struct pmap); - pmap_zone = zinit(s, 0, 400*s, 4096, 0, "pmap"); /* XXX */ + kmem_cache_init(&pmap_cache, "pmap", s, 0, NULL, NULL, NULL, 0); s = (vm_size_t) sizeof(struct pv_entry); - pv_list_zone = zinit(s, 0, 10000*s, 4096, 0, "pv_list"); /* XXX */ + kmem_cache_init(&pv_list_cache, "pv_entry", s, 0, NULL, NULL, NULL, 0); #if NCPUS > 1 /* @@ -1009,7 +1009,7 @@ pmap_page_table_page_alloc() /* * We cannot allocate the pmap_object in pmap_init, - * because it is called before the zone package is up. + * because it is called before the cache package is up. * Allocate it now if it is missing. */ if (pmap_object == VM_OBJECT_NULL) @@ -1113,11 +1113,11 @@ pmap_t pmap_create(size) } /* - * Allocate a pmap struct from the pmap_zone. Then allocate - * the page descriptor table from the pd_zone. + * Allocate a pmap struct from the pmap_cache. Then allocate + * the page descriptor table. */ - p = (pmap_t) zalloc(pmap_zone); + p = (pmap_t) kmem_cache_alloc(&pmap_cache); if (p == PMAP_NULL) panic("pmap_create"); @@ -1232,7 +1232,7 @@ void pmap_destroy(p) #endif /* MACH_XEN */ kmem_free(kernel_map, (vm_offset_t)p->pdpbase, INTEL_PGBYTES); #endif /* PAE */ - zfree(pmap_zone, (vm_offset_t) p); + kmem_cache_free(&pmap_cache, (vm_offset_t) p); } /* @@ -1782,7 +1782,7 @@ if (pmap_debug) printf("pmap(%x, %x)\n", v, pa); /* * Must allocate a new pvlist entry while we're unlocked; - * zalloc may cause pageout (which will lock the pmap system). + * Allocating may cause pageout (which will lock the pmap system). * If we determine we need a pvlist entry, we will unlock * and allocate one. Then we will retry, throughing away * the allocated entry later (if we no longer need it). @@ -1966,9 +1966,9 @@ Retry: PMAP_READ_UNLOCK(pmap, spl); /* - * Refill from zone. + * Refill from cache. */ - pv_e = (pv_entry_t) zalloc(pv_list_zone); + pv_e = (pv_entry_t) kmem_cache_alloc(&pv_list_cache); goto Retry; } } diff --git a/i386/intel/pmap.h b/i386/intel/pmap.h index 7ba7d2c..e02ad36 100644 --- a/i386/intel/pmap.h +++ b/i386/intel/pmap.h @@ -37,7 +37,6 @@ #ifndef __ASSEMBLER__ -#include <kern/zalloc.h> #include <kern/lock.h> #include <mach/machine/vm_param.h> #include <mach/vm_statistics.h> diff --git a/include/mach_debug/mach_debug.defs b/include/mach_debug/mach_debug.defs index 2a58dc4..053c3fe 100644 --- a/include/mach_debug/mach_debug.defs +++ b/include/mach_debug/mach_debug.defs @@ -42,17 +42,7 @@ skip; /* host_ipc_statistics_reset */ skip; /* host_callout_info */ skip; /* host_callout_statistics */ skip; /* host_callout_statistics_reset */ - -/* - * Returns information about the memory allocation zones. - */ -routine host_zone_info( - host : host_t; - out names : zone_name_array_t, - CountInOut, Dealloc; - out info : zone_info_array_t, - CountInOut, Dealloc); - +skip; /* host_zone_info */ skip; /* host_ipc_bucket_info */ #if !defined(MACH_IPC_DEBUG) || MACH_IPC_DEBUG @@ -228,6 +218,14 @@ routine mach_vm_object_pages( out pages : vm_page_info_array_t, CountInOut, Dealloc); +/* + * Returns information about the memory allocation caches. + */ +routine host_slab_info( + host : host_t; + out info : cache_info_array_t, + CountInOut, Dealloc); + #else /* !defined(MACH_VM_DEBUG) || MACH_VM_DEBUG */ skip; /* mach_vm_region_info */ skip; /* mach_vm_object_info */ diff --git a/include/mach_debug/mach_debug_types.defs b/include/mach_debug/mach_debug_types.defs index 9f1976f..f60125a 100644 --- a/include/mach_debug/mach_debug_types.defs +++ b/include/mach_debug/mach_debug_types.defs @@ -32,11 +32,8 @@ #include <mach/std_types.defs> -type zone_name_t = struct[80] of char; -type zone_name_array_t = array[] of zone_name_t; - -type zone_info_t = struct[9] of integer_t; -type zone_info_array_t = array[] of zone_info_t; +type cache_info_t = struct[19] of integer_t; +type cache_info_array_t = array[] of cache_info_t; type hash_info_bucket_t = struct[1] of natural_t; type hash_info_bucket_array_t = array[] of hash_info_bucket_t; diff --git a/include/mach_debug/mach_debug_types.h b/include/mach_debug/mach_debug_types.h index 2ba0cb1..5d4efcd 100644 --- a/include/mach_debug/mach_debug_types.h +++ b/include/mach_debug/mach_debug_types.h @@ -32,7 +32,7 @@ #include <mach_debug/ipc_info.h> #include <mach_debug/vm_info.h> -#include <mach_debug/zone_info.h> +#include <mach_debug/slab_info.h> #include <mach_debug/hash_info.h> typedef char symtab_name_t[32]; diff --git a/ipc/ipc_entry.c b/ipc/ipc_entry.c index 42e8dd8..3a06244 100644 --- a/ipc/ipc_entry.c +++ b/ipc/ipc_entry.c @@ -41,7 +41,7 @@ #include <mach/port.h> #include <kern/assert.h> #include <kern/sched_prim.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <ipc/port.h> #include <ipc/ipc_types.h> #include <ipc/ipc_entry.h> @@ -51,7 +51,7 @@ #include <ipc/ipc_table.h> #include <ipc/ipc_object.h> -zone_t ipc_tree_entry_zone; +struct kmem_cache ipc_tree_entry_cache; /* * Routine: ipc_entry_tree_collision diff --git a/ipc/ipc_entry.h b/ipc/ipc_entry.h index a577cf0..6afa4f6 100644 --- a/ipc/ipc_entry.h +++ b/ipc/ipc_entry.h @@ -41,7 +41,7 @@ #include <mach/mach_types.h> #include <mach/port.h> #include <mach/kern_return.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <ipc/port.h> #include <ipc/ipc_table.h> #include <ipc/ipc_types.h> @@ -129,10 +129,10 @@ typedef struct ipc_tree_entry { #define ite_request ite_entry.ie_request #define ite_next ite_entry.hash.tree -extern zone_t ipc_tree_entry_zone; +extern struct kmem_cache ipc_tree_entry_cache; -#define ite_alloc() ((ipc_tree_entry_t) zalloc(ipc_tree_entry_zone)) -#define ite_free(ite) zfree(ipc_tree_entry_zone, (vm_offset_t) (ite)) +#define ite_alloc() ((ipc_tree_entry_t) kmem_cache_alloc(&ipc_tree_entry_cache)) +#define ite_free(ite) kmem_cache_free(&ipc_tree_entry_cache, (vm_offset_t) (ite)) extern ipc_entry_t diff --git a/ipc/ipc_init.c b/ipc/ipc_init.c index e9ca64d..9b7e126 100644 --- a/ipc/ipc_init.c +++ b/ipc/ipc_init.c @@ -37,6 +37,7 @@ #include <mach/kern_return.h> #include <kern/mach_param.h> #include <kern/ipc_host.h> +#include <kern/slab.h> #include <vm/vm_map.h> #include <vm/vm_kern.h> #include <ipc/ipc_entry.h> @@ -77,28 +78,17 @@ ipc_bootstrap(void) ipc_port_timestamp_lock_init(); ipc_port_timestamp_data = 0; - ipc_space_zone = zinit(sizeof(struct ipc_space), 0, - ipc_space_max * sizeof(struct ipc_space), - sizeof(struct ipc_space), - 0, "ipc spaces"); - - ipc_tree_entry_zone = - zinit(sizeof(struct ipc_tree_entry), 0, - ipc_tree_entry_max * sizeof(struct ipc_tree_entry), - sizeof(struct ipc_tree_entry), - IPC_ZONE_TYPE, "ipc tree entries"); - - ipc_object_zones[IOT_PORT] = - zinit(sizeof(struct ipc_port), 0, - ipc_port_max * sizeof(struct ipc_port), - sizeof(struct ipc_port), - 0, "ipc ports"); - - ipc_object_zones[IOT_PORT_SET] = - zinit(sizeof(struct ipc_pset), 0, - ipc_pset_max * sizeof(struct ipc_pset), - sizeof(struct ipc_pset), - IPC_ZONE_TYPE, "ipc port sets"); + kmem_cache_init(&ipc_space_cache, "ipc_space", + sizeof(struct ipc_space), 0, NULL, NULL, NULL, 0); + + kmem_cache_init(&ipc_tree_entry_cache, "ipc_tree_entry", + sizeof(struct ipc_tree_entry), 0, NULL, NULL, NULL, 0); + + kmem_cache_init(&ipc_object_caches[IOT_PORT], "ipc_port", + sizeof(struct ipc_port), 0, NULL, NULL, NULL, 0); + + kmem_cache_init(&ipc_object_caches[IOT_PORT_SET], "ipc_pset", + sizeof(struct ipc_pset), 0, NULL, NULL, NULL, 0); /* create special spaces */ diff --git a/ipc/ipc_marequest.c b/ipc/ipc_marequest.c index 540382a..2087c67 100644 --- a/ipc/ipc_marequest.c +++ b/ipc/ipc_marequest.c @@ -39,7 +39,7 @@ #include <kern/lock.h> #include <kern/mach_param.h> #include <kern/kalloc.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <ipc/port.h> #include <ipc/ipc_init.h> #include <ipc/ipc_space.h> @@ -58,11 +58,11 @@ #endif -zone_t ipc_marequest_zone; +struct kmem_cache ipc_marequest_cache; int ipc_marequest_max = IMAR_MAX; -#define imar_alloc() ((ipc_marequest_t) zalloc(ipc_marequest_zone)) -#define imar_free(imar) zfree(ipc_marequest_zone, (vm_offset_t) (imar)) +#define imar_alloc() ((ipc_marequest_t) kmem_cache_alloc(&ipc_marequest_cache)) +#define imar_free(imar) kmem_cache_free(&ipc_marequest_cache, (vm_offset_t) (imar)) typedef unsigned int ipc_marequest_index_t; @@ -142,11 +142,8 @@ ipc_marequest_init(void) bucket->imarb_head = IMAR_NULL; } - ipc_marequest_zone = - zinit(sizeof(struct ipc_marequest), 0, - ipc_marequest_max * sizeof(struct ipc_marequest), - sizeof(struct ipc_marequest), - IPC_ZONE_TYPE, "ipc msg-accepted requests"); + kmem_cache_init(&ipc_marequest_cache, "ipc_marequest", + sizeof(struct ipc_marequest), 0, NULL, NULL, NULL, 0); } /* diff --git a/ipc/ipc_object.c b/ipc/ipc_object.c index a7a7ddb..4850fb1 100644 --- a/ipc/ipc_object.c +++ b/ipc/ipc_object.c @@ -47,8 +47,9 @@ #include <ipc/ipc_pset.h> #include <kern/debug.h> #include <kern/printf.h> +#include <kern/slab.h> -zone_t ipc_object_zones[IOT_NUMBER]; +struct kmem_cache ipc_object_caches[IOT_NUMBER]; diff --git a/ipc/ipc_object.h b/ipc/ipc_object.h index 2bbf8bd..adf5bca 100644 --- a/ipc/ipc_object.h +++ b/ipc/ipc_object.h @@ -39,7 +39,7 @@ #include <ipc/ipc_types.h> #include <kern/lock.h> #include <kern/macro_help.h> -#include <kern/zalloc.h> +#include <kern/slab.h> typedef unsigned int ipc_object_refs_t; typedef unsigned int ipc_object_bits_t; @@ -57,7 +57,7 @@ typedef struct ipc_object { #define IO_VALID(io) (((io) != IO_NULL) && ((io) != IO_DEAD)) #define IO_BITS_KOTYPE 0x0000ffff /* used by the object */ -#define IO_BITS_OTYPE 0x7fff0000 /* determines a zone */ +#define IO_BITS_OTYPE 0x7fff0000 /* determines a cache */ #define IO_BITS_ACTIVE 0x80000000U /* is object alive? */ #define io_active(io) ((int)(io)->io_bits < 0) /* hack */ @@ -75,13 +75,13 @@ typedef struct ipc_object { #define IOT_PORT_SET 1 #define IOT_NUMBER 2 /* number of types used */ -extern zone_t ipc_object_zones[IOT_NUMBER]; +extern struct kmem_cache ipc_object_caches[IOT_NUMBER]; #define io_alloc(otype) \ - ((ipc_object_t) zalloc(ipc_object_zones[(otype)])) + ((ipc_object_t) kmem_cache_alloc(&ipc_object_caches[(otype)])) #define io_free(otype, io) \ - zfree(ipc_object_zones[(otype)], (vm_offset_t) (io)) + kmem_cache_free(&ipc_object_caches[(otype)], (vm_offset_t) (io)) #define io_lock_init(io) simple_lock_init(&(io)->io_lock_data) #define io_lock(io) simple_lock(&(io)->io_lock_data) diff --git a/ipc/ipc_space.c b/ipc/ipc_space.c index 0f50f15..ab55e83 100644 --- a/ipc/ipc_space.c +++ b/ipc/ipc_space.c @@ -43,7 +43,7 @@ #include <mach/port.h> #include <kern/assert.h> #include <kern/sched_prim.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <ipc/port.h> #include <ipc/ipc_entry.h> #include <ipc/ipc_splay.h> @@ -55,7 +55,7 @@ -zone_t ipc_space_zone; +struct kmem_cache ipc_space_cache; ipc_space_t ipc_space_kernel; ipc_space_t ipc_space_reply; diff --git a/ipc/ipc_space.h b/ipc/ipc_space.h index d030bf7..c4683d2 100644 --- a/ipc/ipc_space.h +++ b/ipc/ipc_space.h @@ -44,6 +44,7 @@ #include <mach/mach_types.h> #include <kern/macro_help.h> #include <kern/lock.h> +#include <kern/slab.h> #include <ipc/ipc_splay.h> #include <ipc/ipc_types.h> @@ -82,10 +83,10 @@ struct ipc_space { #define IS_NULL ((ipc_space_t) 0) -extern zone_t ipc_space_zone; +extern struct kmem_cache ipc_space_cache; -#define is_alloc() ((ipc_space_t) zalloc(ipc_space_zone)) -#define is_free(is) zfree(ipc_space_zone, (vm_offset_t) (is)) +#define is_alloc() ((ipc_space_t) kmem_cache_alloc(&ipc_space_cache)) +#define is_free(is) kmem_cache_free(&ipc_space_cache, (vm_offset_t) (is)) extern struct ipc_space *ipc_space_kernel; extern struct ipc_space *ipc_space_reply; diff --git a/ipc/ipc_table.c b/ipc/ipc_table.c index e572358..d5b7904 100644 --- a/ipc/ipc_table.c +++ b/ipc/ipc_table.c @@ -50,13 +50,6 @@ void ipc_table_fill( unsigned int min, vm_size_t elemsize); -/* - * We borrow the kalloc map, rather than creating - * yet another submap of the kernel map. - */ - -extern vm_map_t kalloc_map; - ipc_table_size_t ipc_table_entries; unsigned int ipc_table_entries_size = 512; @@ -31,7 +31,7 @@ #include <mach/kern_return.h> #include <mach/alert.h> #include <kern/mach_param.h> /* XXX INCALL_... */ -#include <kern/zalloc.h> +#include <kern/slab.h> #include <kern/thread.h> #include <kern/task.h> #include <kern/debug.h> @@ -47,7 +47,7 @@ static void special_handler(ReturnHandler *rh, struct Act *act); #endif #ifndef ACT_STATIC_KLUDGE -static zone_t act_zone; +static struct kmem_cache act_cache; #else static Act *act_freelist; static Act free_acts[ACT_STATIC_KLUDGE]; @@ -68,11 +68,8 @@ void global_act_init() { #ifndef ACT_STATIC_KLUDGE - act_zone = zinit( - sizeof(struct Act), 0, - ACT_MAX * sizeof(struct Act), /* XXX */ - ACT_CHUNK * sizeof(struct Act), - 0, "activations"); + kmem_cache_init(&act_cache, "Act", sizeof(struct Act), 0, + NULL, NULL, NULL, 0); #else int i; @@ -104,7 +101,7 @@ kern_return_t act_create(task_t task, vm_offset_t user_stack, int rc; #ifndef ACT_STATIC_KLUDGE - act = (Act*)zalloc(act_zone); + act = (Act*)kmem_cache_alloc(&act_cache); if (act == 0) return(KERN_RESOURCE_SHORTAGE); #else @@ -170,9 +167,9 @@ static void act_free(Act *inc) /* Drop the task reference. */ task_deallocate(inc->task); - /* Put the act back on the act zone */ + /* Put the act back on the act cache */ #ifndef ACT_STATIC_KLUDGE - zfree(act_zone, (vm_offset_t)inc); + kmem_cache_free(&act_cache, (vm_offset_t)inc); #else /* XXX ipt_lock(act_freelist); */ inc->ipt_next = act_freelist; diff --git a/kern/kalloc.h b/kern/kalloc.h index a80f6db..1330b54 100644 --- a/kern/kalloc.h +++ b/kern/kalloc.h @@ -28,11 +28,11 @@ #define _KERN_KALLOC_H_ #include <mach/machine/vm_types.h> +#include <vm/vm_types.h> -#define MINSIZE 16 +extern vm_map_t kalloc_map; extern vm_offset_t kalloc (vm_size_t size); -extern vm_offset_t kget (vm_size_t size); extern void kfree (vm_offset_t data, vm_size_t size); extern void kalloc_init (void); diff --git a/kern/mach_clock.c b/kern/mach_clock.c index 4ba7c08..050f088 100644 --- a/kern/mach_clock.c +++ b/kern/mach_clock.c @@ -514,7 +514,7 @@ int timeclose() /* * Compatibility for device drivers. * New code should use set_timeout/reset_timeout and private timers. - * These code can't use a zone to allocate timers, because + * These code can't use a cache to allocate timers, because * it can be called from interrupt handlers. */ diff --git a/kern/processor.c b/kern/processor.c index 718ff3a..3ece341 100644 --- a/kern/processor.c +++ b/kern/processor.c @@ -46,8 +46,8 @@ #include <ipc/ipc_port.h> #if MACH_HOST -#include <kern/zalloc.h> -zone_t pset_zone; +#include <kern/slab.h> +struct kmem_cache pset_cache; #endif /* MACH_HOST */ @@ -112,10 +112,10 @@ void pset_sys_init(void) register processor_t processor; /* - * Allocate the zone for processor sets. + * Allocate the cache for processor sets. */ - pset_zone = zinit(sizeof(struct processor_set), 0, 128*PAGE_SIZE, - PAGE_SIZE, 0, "processor sets"); + kmem_cache_init(&pset_cache, "processor_set", + sizeof(struct processor_set), 0, NULL, NULL, NULL, 0); /* * Give each processor a control port. @@ -394,7 +394,7 @@ void pset_deallocate( /* * That's it, free data structure. */ - zfree(pset_zone, (vm_offset_t)pset); + kmem_cache_free(&pset_cache, (vm_offset_t)pset); #endif /* MACH_HOST */ } @@ -538,7 +538,7 @@ processor_set_create( if (host == HOST_NULL) return KERN_INVALID_ARGUMENT; - pset = (processor_set_t) zalloc(pset_zone); + pset = (processor_set_t) kmem_cache_alloc(&pset_cache); pset_init(pset); pset_reference(pset); /* for new_set out argument */ pset_reference(pset); /* for new_name out argument */ diff --git a/kern/startup.c b/kern/startup.c index a4b5a6f..3bdda16 100644 --- a/kern/startup.c +++ b/kern/startup.c @@ -48,7 +48,6 @@ #include <kern/timer.h> #include <kern/xpr.h> #include <kern/time_stamp.h> -#include <kern/zalloc.h> #include <vm/vm_kern.h> #include <vm/vm_map.h> #include <vm/vm_object.h> diff --git a/kern/task.c b/kern/task.c index 88da16e..2e9fd76 100644 --- a/kern/task.c +++ b/kern/task.c @@ -43,7 +43,7 @@ #include <kern/mach_param.h> #include <kern/task.h> #include <kern/thread.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <kern/kalloc.h> #include <kern/processor.h> #include <kern/sched_prim.h> /* for thread_wakeup */ @@ -52,7 +52,7 @@ #include <machine/machspl.h> /* for splsched */ task_t kernel_task = TASK_NULL; -zone_t task_zone; +struct kmem_cache task_cache; extern void eml_init(void); extern void eml_task_reference(task_t, task_t); @@ -60,11 +60,8 @@ extern void eml_task_deallocate(task_t); void task_init(void) { - task_zone = zinit( - sizeof(struct task), 0, - TASK_MAX * sizeof(struct task), - TASK_CHUNK * sizeof(struct task), - 0, "tasks"); + kmem_cache_init(&task_cache, "task", sizeof(struct task), 0, + NULL, NULL, NULL, 0); eml_init(); machine_task_module_init (); @@ -120,7 +117,7 @@ kern_return_t task_create( int i; #endif - new_task = (task_t) zalloc(task_zone); + new_task = (task_t) kmem_cache_alloc(&task_cache); if (new_task == TASK_NULL) { panic("task_create: no memory for task structure"); } @@ -235,7 +232,7 @@ void task_deallocate( pset_deallocate(pset); vm_map_deallocate(task->map); is_release(task->itk_space); - zfree(task_zone, (vm_offset_t) task); + kmem_cache_free(&task_cache, (vm_offset_t) task); } void task_reference( diff --git a/kern/thread.c b/kern/thread.c index bf2df94..f23af58 100644 --- a/kern/thread.c +++ b/kern/thread.c @@ -54,7 +54,7 @@ #include <kern/thread.h> #include <kern/thread_swap.h> #include <kern/host.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <kern/mach_clock.h> #include <vm/vm_kern.h> #include <ipc/ipc_kmsg.h> @@ -67,7 +67,7 @@ thread_t active_threads[NCPUS]; vm_offset_t active_stacks[NCPUS]; -struct zone *thread_zone; +struct kmem_cache thread_cache; queue_head_t reaper_queue; decl_simple_lock_data(, reaper_lock) @@ -300,11 +300,8 @@ void stack_privilege( void thread_init(void) { - thread_zone = zinit( - sizeof(struct thread), 0, - THREAD_MAX * sizeof(struct thread), - THREAD_CHUNK * sizeof(struct thread), - 0, "threads"); + kmem_cache_init(&thread_cache, "thread", sizeof(struct thread), 0, + NULL, NULL, NULL, 0); /* * Fill in a template thread for fast initialization. @@ -414,7 +411,7 @@ kern_return_t thread_create( * Allocate a thread and initialize static fields */ - new_thread = (thread_t) zalloc(thread_zone); + new_thread = (thread_t) kmem_cache_alloc(&thread_cache); if (new_thread == THREAD_NULL) return KERN_RESOURCE_SHORTAGE; @@ -710,7 +707,7 @@ void thread_deallocate( evc_notify_abort(thread); pcb_terminate(thread); - zfree(thread_zone, (vm_offset_t) thread); + kmem_cache_free(&thread_cache, (vm_offset_t) thread); } void thread_reference( diff --git a/vm/memory_object_proxy.c b/vm/memory_object_proxy.c index fdab6e0..9c0528c 100644 --- a/vm/memory_object_proxy.c +++ b/vm/memory_object_proxy.c @@ -41,15 +41,15 @@ #include <mach/notify.h> #include <mach/vm_prot.h> #include <kern/printf.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <kern/mach_param.h> #include <ipc/ipc_port.h> #include <ipc/ipc_space.h> #include <vm/memory_object_proxy.h> -/* The zone which holds our proxy memory objects. */ -static zone_t memory_object_proxy_zone; +/* The cache which holds our proxy memory objects. */ +static struct kmem_cache memory_object_proxy_cache; struct memory_object_proxy { @@ -64,13 +64,8 @@ typedef struct memory_object_proxy *memory_object_proxy_t; void memory_object_proxy_init (void) { - /* For limit, see PORT_MAX. */ - memory_object_proxy_zone = zinit (sizeof (struct memory_object_proxy), 0, - (TASK_MAX * 3 + THREAD_MAX) - * sizeof (struct memory_object_proxy), - 256 * sizeof (struct memory_object_proxy), - ZONE_EXHAUSTIBLE, - "proxy memory object zone"); + kmem_cache_init (&memory_object_proxy_cache, "memory_object_proxy", + sizeof (struct memory_object_proxy), 0, NULL, NULL, NULL, 0); } /* Lookup a proxy memory object by its port. */ @@ -153,13 +148,13 @@ memory_object_create_proxy (ipc_space_t space, vm_prot_t max_protection, if (start[0] != 0 || len[0] != (vm_offset_t) ~0) return KERN_INVALID_ARGUMENT; - proxy = (memory_object_proxy_t) zalloc (memory_object_proxy_zone); + proxy = (memory_object_proxy_t) kmem_cache_alloc (&memory_object_proxy_cache); /* Allocate port, keeping a reference for it. */ proxy->port = ipc_port_alloc_kernel (); if (proxy->port == IP_NULL) { - zfree (memory_object_proxy_zone, (vm_offset_t) proxy); + kmem_cache_free (&memory_object_proxy_cache, (vm_offset_t) proxy); return KERN_RESOURCE_SHORTAGE; } /* Associate the port with the proxy memory object. */ diff --git a/vm/vm_external.c b/vm/vm_external.c index ac47faa..e9643ff 100644 --- a/vm/vm_external.c +++ b/vm/vm_external.c @@ -31,7 +31,7 @@ */ #include <mach/boolean.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <vm/vm_external.h> #include <mach/vm_param.h> #include <kern/assert.h> @@ -40,7 +40,7 @@ boolean_t vm_external_unsafe = FALSE; -zone_t vm_external_zone = ZONE_NULL; +struct kmem_cache vm_external_cache; /* * The implementation uses bit arrays to record whether @@ -52,8 +52,8 @@ zone_t vm_external_zone = ZONE_NULL; #define SMALL_SIZE (VM_EXTERNAL_SMALL_SIZE/8) #define LARGE_SIZE (VM_EXTERNAL_LARGE_SIZE/8) -zone_t vm_object_small_existence_map_zone; -zone_t vm_object_large_existence_map_zone; +struct kmem_cache vm_object_small_existence_map_cache; +struct kmem_cache vm_object_large_existence_map_cache; vm_external_t vm_external_create(size) @@ -62,20 +62,17 @@ vm_external_t vm_external_create(size) vm_external_t result; vm_size_t bytes; - if (vm_external_zone == ZONE_NULL) - return(VM_EXTERNAL_NULL); - - result = (vm_external_t) zalloc(vm_external_zone); + result = (vm_external_t) kmem_cache_alloc(&vm_external_cache); result->existence_map = (char *) 0; bytes = (atop(size) + 07) >> 3; if (bytes <= SMALL_SIZE) { result->existence_map = - (char *) zalloc(vm_object_small_existence_map_zone); + (char *) kmem_cache_alloc(&vm_object_small_existence_map_cache); result->existence_size = SMALL_SIZE; } else if (bytes <= LARGE_SIZE) { result->existence_map = - (char *) zalloc(vm_object_large_existence_map_zone); + (char *) kmem_cache_alloc(&vm_object_large_existence_map_cache); result->existence_size = LARGE_SIZE; } return(result); @@ -89,14 +86,14 @@ void vm_external_destroy(e) if (e->existence_map != (char *) 0) { if (e->existence_size <= SMALL_SIZE) { - zfree(vm_object_small_existence_map_zone, + kmem_cache_free(&vm_object_small_existence_map_cache, (vm_offset_t) e->existence_map); } else { - zfree(vm_object_large_existence_map_zone, + kmem_cache_free(&vm_object_large_existence_map_cache, (vm_offset_t) e->existence_map); } } - zfree(vm_external_zone, (vm_offset_t) e); + kmem_cache_free(&vm_external_cache, (vm_offset_t) e); } vm_external_state_t _vm_external_state_get(e, offset) @@ -142,18 +139,14 @@ void vm_external_module_initialize(void) { vm_size_t size = (vm_size_t) sizeof(struct vm_external); - vm_external_zone = zinit(size, 0, 16*1024*size, size, - 0, "external page bitmaps"); + kmem_cache_init(&vm_external_cache, "vm_external", size, 0, + NULL, NULL, NULL, 0); - vm_object_small_existence_map_zone = zinit(SMALL_SIZE, 0, - round_page(LARGE_SIZE * SMALL_SIZE), - round_page(SMALL_SIZE), - ZONE_EXHAUSTIBLE, - "object small existence maps"); + kmem_cache_init(&vm_object_small_existence_map_cache, + "small_existence_map", SMALL_SIZE, 0, + NULL, NULL, NULL, 0); - vm_object_large_existence_map_zone = zinit(LARGE_SIZE, 0, - round_page(8 * LARGE_SIZE), - round_page(LARGE_SIZE), - ZONE_EXHAUSTIBLE, - "object large existence maps"); + kmem_cache_init(&vm_object_large_existence_map_cache, + "large_existence_map", LARGE_SIZE, 0, + NULL, NULL, NULL, 0); } diff --git a/vm/vm_fault.c b/vm/vm_fault.c index cce043a..840f038 100644 --- a/vm/vm_fault.c +++ b/vm/vm_fault.c @@ -53,7 +53,7 @@ /* For memory_object_data_{request,unlock} */ #include <kern/mach_param.h> #include <kern/macro_help.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #if MACH_PCSAMPLE #include <kern/pc_sample.h> @@ -85,7 +85,7 @@ typedef struct vm_fault_state { vm_prot_t vmfp_access; } vm_fault_state_t; -zone_t vm_fault_state_zone = 0; +struct kmem_cache vm_fault_state_cache; int vm_object_absent_max = 50; @@ -107,10 +107,8 @@ extern struct db_watchpoint *db_watchpoint_list; */ void vm_fault_init(void) { - vm_fault_state_zone = zinit(sizeof(vm_fault_state_t), 0, - THREAD_MAX * sizeof(vm_fault_state_t), - sizeof(vm_fault_state_t), - 0, "vm fault state"); + kmem_cache_init(&vm_fault_state_cache, "vm_fault_state", + sizeof(vm_fault_state_t), 0, NULL, NULL, NULL, 0); } /* @@ -1206,12 +1204,12 @@ kern_return_t vm_fault(map, vaddr, fault_type, change_wiring, /* * if this assignment stmt is written as - * 'active_threads[cpu_number()] = zalloc()', - * cpu_number may be evaluated before zalloc; - * if zalloc blocks, cpu_number will be wrong + * 'active_threads[cpu_number()] = kmem_cache_alloc()', + * cpu_number may be evaluated before kmem_cache_alloc; + * if kmem_cache_alloc blocks, cpu_number will be wrong */ - state = (char *) zalloc(vm_fault_state_zone); + state = (char *) kmem_cache_alloc(&vm_fault_state_cache); current_thread()->ith_other = state; } @@ -1490,7 +1488,7 @@ kern_return_t vm_fault(map, vaddr, fault_type, change_wiring, register vm_fault_state_t *state = (vm_fault_state_t *) current_thread()->ith_other; - zfree(vm_fault_state_zone, (vm_offset_t) state); + kmem_cache_free(&vm_fault_state_cache, (vm_offset_t) state); (*continuation)(kr); /*NOTREACHED*/ } diff --git a/vm/vm_init.c b/vm/vm_init.c index 33fca65..89eb098 100644 --- a/vm/vm_init.c +++ b/vm/vm_init.c @@ -35,7 +35,7 @@ */ #include <mach/machine/vm_types.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <kern/kalloc.h> #include <vm/vm_fault.h> #include <vm/vm_object.h> @@ -67,12 +67,12 @@ void vm_mem_bootstrap() * Initialize other VM packages */ - zone_bootstrap(); + slab_bootstrap(); vm_object_bootstrap(); vm_map_init(); kmem_init(start, end); pmap_init(); - zone_init(); + slab_init(); kalloc_init(); vm_fault_init(); vm_page_module_init(); diff --git a/vm/vm_map.c b/vm/vm_map.c index ce83403..1cae7db 100644 --- a/vm/vm_map.c +++ b/vm/vm_map.c @@ -41,7 +41,7 @@ #include <mach/vm_param.h> #include <kern/assert.h> #include <kern/debug.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <vm/pmap.h> #include <vm/vm_fault.h> #include <vm/vm_map.h> @@ -70,7 +70,7 @@ void vm_map_copy_page_discard (vm_map_copy_t copy); * map entry to the same memory - the wired count in the new entry * must be set to zero. vm_map_entry_copy_full() creates a new * entry that is identical to the old entry. This preserves the - * wire count; it's used for map splitting and zone changing in + * wire count; it's used for map splitting and cache changing in * vm_map_copyout. */ #define vm_map_entry_copy(NEW,OLD) \ @@ -130,10 +130,10 @@ MACRO_END * vm_object_copy_strategically() in vm_object.c. */ -zone_t vm_map_zone; /* zone for vm_map structures */ -zone_t vm_map_entry_zone; /* zone for vm_map_entry structures */ -zone_t vm_map_kentry_zone; /* zone for kernel entry structures */ -zone_t vm_map_copy_zone; /* zone for vm_map_copy structures */ +struct kmem_cache vm_map_cache; /* cache for vm_map structures */ +struct kmem_cache vm_map_entry_cache; /* cache for vm_map_entry structures */ +struct kmem_cache vm_map_kentry_cache; /* cache for kernel entry structures */ +struct kmem_cache vm_map_copy_cache; /* cache for vm_map_copy structures */ boolean_t vm_map_lookup_entry(); /* forward declaration */ @@ -151,14 +151,14 @@ vm_object_t vm_submap_object; * Initialize the vm_map module. Must be called before * any other vm_map routines. * - * Map and entry structures are allocated from zones -- we must - * initialize those zones. + * Map and entry structures are allocated from caches -- we must + * initialize those caches. * - * There are three zones of interest: + * There are three caches of interest: * - * vm_map_zone: used to allocate maps. - * vm_map_entry_zone: used to allocate map entries. - * vm_map_kentry_zone: used to allocate map entries for the kernel. + * vm_map_cache: used to allocate maps. + * vm_map_entry_cache: used to allocate map entries. + * vm_map_kentry_cache: used to allocate map entries for the kernel. * * The kernel allocates map entries from a special zone that is initially * "crammed" with memory. It would be difficult (perhaps impossible) for @@ -173,23 +173,16 @@ int kentry_count = 256; /* to init kentry_data_size */ void vm_map_init(void) { - vm_map_zone = zinit((vm_size_t) sizeof(struct vm_map), 0, 40*1024, - PAGE_SIZE, 0, "maps"); - vm_map_entry_zone = zinit((vm_size_t) sizeof(struct vm_map_entry), - 0, 1024*1024, PAGE_SIZE*5, - 0, "non-kernel map entries"); - vm_map_kentry_zone = zinit((vm_size_t) sizeof(struct vm_map_entry), 0, - kentry_data_size, kentry_data_size, - ZONE_FIXED /* XXX */, "kernel map entries"); - - vm_map_copy_zone = zinit((vm_size_t) sizeof(struct vm_map_copy), - 0, 16*1024, PAGE_SIZE, 0, - "map copies"); - - /* - * Cram the kentry zone with initial data. - */ - zcram(vm_map_kentry_zone, kentry_data, kentry_data_size); + kmem_cache_init(&vm_map_cache, "vm_map", sizeof(struct vm_map), 0, + NULL, NULL, NULL, 0); + kmem_cache_init(&vm_map_entry_cache, "vm_map_entry", + sizeof(struct vm_map_entry), 0, NULL, NULL, NULL, 0); + kmem_cache_init(&vm_map_kentry_cache, "vm_map_kentry", + sizeof(struct vm_map_entry), 0, NULL, kentry_pagealloc, + NULL, KMEM_CACHE_NOCPUPOOL | KMEM_CACHE_NOOFFSLAB + | KMEM_CACHE_NORECLAIM); + kmem_cache_init(&vm_map_copy_cache, "vm_map_copy", + sizeof(struct vm_map_copy), 0, NULL, NULL, NULL, 0); /* * Submap object is initialized by vm_object_init. @@ -210,7 +203,7 @@ vm_map_t vm_map_create(pmap, min, max, pageable) { register vm_map_t result; - result = (vm_map_t) zalloc(vm_map_zone); + result = (vm_map_t) kmem_cache_alloc(&vm_map_cache); if (result == VM_MAP_NULL) panic("vm_map_create"); @@ -250,15 +243,15 @@ vm_map_t vm_map_create(pmap, min, max, pageable) vm_map_entry_t _vm_map_entry_create(map_header) register struct vm_map_header *map_header; { - register zone_t zone; + register kmem_cache_t cache; register vm_map_entry_t entry; if (map_header->entries_pageable) - zone = vm_map_entry_zone; + cache = &vm_map_entry_cache; else - zone = vm_map_kentry_zone; + cache = &vm_map_kentry_cache; - entry = (vm_map_entry_t) zalloc(zone); + entry = (vm_map_entry_t) kmem_cache_alloc(cache); if (entry == VM_MAP_ENTRY_NULL) panic("vm_map_entry_create"); @@ -280,14 +273,14 @@ void _vm_map_entry_dispose(map_header, entry) register struct vm_map_header *map_header; register vm_map_entry_t entry; { - register zone_t zone; + register kmem_cache_t cache; if (map_header->entries_pageable) - zone = vm_map_entry_zone; + cache = &vm_map_entry_cache; else - zone = vm_map_kentry_zone; + cache = &vm_map_kentry_cache; - zfree(zone, (vm_offset_t) entry); + kmem_cache_free(cache, (vm_offset_t) entry); } /* @@ -368,7 +361,7 @@ void vm_map_deallocate(map) pmap_destroy(map->pmap); - zfree(vm_map_zone, (vm_offset_t) map); + kmem_cache_free(&vm_map_cache, (vm_offset_t) map); } /* @@ -1907,7 +1900,7 @@ free_next_copy: register vm_map_copy_t new_copy; new_copy = (vm_map_copy_t) copy->cpy_cont_args; - zfree(vm_map_copy_zone, (vm_offset_t) copy); + kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy); copy = new_copy; goto free_next_copy; } @@ -1918,7 +1911,7 @@ free_next_copy: break; } - zfree(vm_map_copy_zone, (vm_offset_t) copy); + kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy); } /* @@ -1952,7 +1945,7 @@ vm_map_copy_copy(copy) * from the old one into it. */ - new_copy = (vm_map_copy_t) zalloc(vm_map_copy_zone); + new_copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache); *new_copy = *copy; if (copy->type == VM_MAP_COPY_ENTRY_LIST) { @@ -2160,7 +2153,7 @@ start_pass_1: /* * XXXO If there are no permanent objects in the destination, - * XXXO and the source and destination map entry zones match, + * XXXO and the source and destination map entry caches match, * XXXO and the destination map entry is not shared, * XXXO then the map entries can be deleted and replaced * XXXO with those from the copy. The following code is the @@ -2403,7 +2396,7 @@ start_pass_1: ((where)->vme_next = vm_map_copy_first_entry(copy)) \ ->vme_prev = (where); \ (map)->hdr.nentries += (copy)->cpy_hdr.nentries; \ - zfree(vm_map_copy_zone, (vm_offset_t) copy); \ + kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy); \ MACRO_END /* @@ -2459,7 +2452,7 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy) VM_INHERIT_DEFAULT); if (kr != KERN_SUCCESS) return(kr); - zfree(vm_map_copy_zone, (vm_offset_t) copy); + kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy); return(KERN_SUCCESS); } @@ -2516,15 +2509,15 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy) * Mismatches occur when dealing with the default * pager. */ - zone_t old_zone; + kmem_cache_t old_cache; vm_map_entry_t next, new; /* - * Find the zone that the copies were allocated from + * Find the cache that the copies were allocated from */ - old_zone = (copy->cpy_hdr.entries_pageable) - ? vm_map_entry_zone - : vm_map_kentry_zone; + old_cache = (copy->cpy_hdr.entries_pageable) + ? &vm_map_entry_cache + : &vm_map_kentry_cache; entry = vm_map_copy_first_entry(copy); /* @@ -2547,7 +2540,7 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy) vm_map_copy_last_entry(copy), new); next = entry->vme_next; - zfree(old_zone, (vm_offset_t) entry); + kmem_cache_free(old_cache, (vm_offset_t) entry); entry = next; } } @@ -3036,10 +3029,10 @@ error: * Consume on success logic. */ if (copy != orig_copy) { - zfree(vm_map_copy_zone, (vm_offset_t) copy); + kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy); } if (result == KERN_SUCCESS) { - zfree(vm_map_copy_zone, (vm_offset_t) orig_copy); + kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) orig_copy); } return(result); @@ -3116,7 +3109,7 @@ kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) * remember the endpoints prior to rounding. */ - copy = (vm_map_copy_t) zalloc(vm_map_copy_zone); + copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache); vm_map_copy_first_entry(copy) = vm_map_copy_last_entry(copy) = vm_map_copy_to_entry(copy); copy->type = VM_MAP_COPY_ENTRY_LIST; @@ -3443,7 +3436,7 @@ kern_return_t vm_map_copyin_object(object, offset, size, copy_result) * and null links. */ - copy = (vm_map_copy_t) zalloc(vm_map_copy_zone); + copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache); vm_map_copy_first_entry(copy) = vm_map_copy_last_entry(copy) = VM_MAP_ENTRY_NULL; copy->type = VM_MAP_COPY_OBJECT; @@ -3598,7 +3591,7 @@ kern_return_t vm_map_copyin_page_list(src_map, src_addr, len, src_destroy, * be page-aligned. */ - copy = (vm_map_copy_t) zalloc(vm_map_copy_zone); + copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache); copy->type = VM_MAP_COPY_PAGE_LIST; copy->cpy_npages = 0; copy->offset = src_addr; diff --git a/vm/vm_object.c b/vm/vm_object.c index 9057973..1c6e431 100644 --- a/vm/vm_object.c +++ b/vm/vm_object.c @@ -47,7 +47,7 @@ #include <kern/lock.h> #include <kern/queue.h> #include <kern/xpr.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <vm/memory_object.h> #include <vm/vm_fault.h> #include <vm/vm_map.h> @@ -141,7 +141,7 @@ void vm_object_deactivate_pages(vm_object_t); * ZZZ Continue this comment. */ -zone_t vm_object_zone; /* vm backing store zone */ +struct kmem_cache vm_object_cache; /* vm backing store cache */ /* * All wired-down kernel memory belongs to a single virtual @@ -211,7 +211,7 @@ vm_object_t _vm_object_allocate( { register vm_object_t object; - object = (vm_object_t) zalloc(vm_object_zone); + object = (vm_object_t) kmem_cache_alloc(&vm_object_cache); *object = *vm_object_template; queue_init(&object->memq); @@ -244,10 +244,8 @@ vm_object_t vm_object_allocate( */ void vm_object_bootstrap(void) { - vm_object_zone = zinit((vm_size_t) sizeof(struct vm_object), 0, - round_page(512*1024), - round_page(12*1024), - 0, "objects"); + kmem_cache_init(&vm_object_cache, "vm_object", + sizeof(struct vm_object), 0, NULL, NULL, NULL, 0); queue_init(&vm_object_cached_list); simple_lock_init(&vm_object_cached_lock_data); @@ -256,7 +254,7 @@ void vm_object_bootstrap(void) * Fill in a template object, for quick initialization */ - vm_object_template = (vm_object_t) zalloc(vm_object_zone); + vm_object_template = (vm_object_t) kmem_cache_alloc(&vm_object_cache); memset(vm_object_template, 0, sizeof *vm_object_template); vm_object_template->ref_count = 1; @@ -660,7 +658,7 @@ void vm_object_terminate( * Free the space for the object. */ - zfree(vm_object_zone, (vm_offset_t) object); + kmem_cache_free(&vm_object_cache, (vm_offset_t) object); } /* @@ -2618,7 +2616,7 @@ void vm_object_collapse( vm_object_unlock(object); if (old_name_port != IP_NULL) ipc_port_dealloc_kernel(old_name_port); - zfree(vm_object_zone, (vm_offset_t) backing_object); + kmem_cache_free(&vm_object_cache, (vm_offset_t) backing_object); vm_object_lock(object); object_collapses++; diff --git a/vm/vm_page.h b/vm/vm_page.h index f13b0af..4536d1c 100644 --- a/vm/vm_page.h +++ b/vm/vm_page.h @@ -41,7 +41,6 @@ #include <vm/vm_types.h> #include <kern/queue.h> #include <kern/lock.h> -#include <kern/zalloc.h> #include <kern/macro_help.h> #include <kern/sched_prim.h> /* definitions of wait/wakeup */ diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c index 7a755bf..77c1cfe 100644 --- a/vm/vm_pageout.c +++ b/vm/vm_pageout.c @@ -43,6 +43,7 @@ #include <mach/vm_statistics.h> #include <kern/counters.h> #include <kern/debug.h> +#include <kern/slab.h> #include <kern/task.h> #include <kern/thread.h> #include <vm/pmap.h> @@ -544,8 +545,8 @@ void vm_pageout_scan() * into an internal object and then immediately double-page it, * sending it to the default pager. * - * consider_zone_gc should be last, because the other operations - * might return memory to zones. When we pause we use + * slab_collect should be last, because the other operations + * might return memory to caches. When we pause we use * vm_pageout_scan_continue as our continuation, so we will * reenter vm_pageout_scan periodically and attempt to reclaim * internal memory even if we never reach vm_page_free_target. @@ -555,7 +556,7 @@ void vm_pageout_scan() net_kmsg_collect(); consider_task_collect(); consider_thread_collect(); - consider_zone_gc(); + slab_collect(); for (burst_count = 0;;) { register vm_page_t m; diff --git a/vm/vm_resident.c b/vm/vm_resident.c index 96354a4..5a63ad4 100644 --- a/vm/vm_resident.c +++ b/vm/vm_resident.c @@ -45,7 +45,7 @@ #include <mach/vm_statistics.h> #include <machine/vm_param.h> #include <kern/xpr.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <vm/pmap.h> #include <vm/vm_map.h> #include <vm/vm_page.h> @@ -58,10 +58,6 @@ #include <vm/vm_user.h> #endif -/* in zalloc.c XXX */ -extern vm_offset_t zdata; -extern vm_size_t zdata_size; - /* * Associated with eacn page of user-allocatable memory is a * page structure. @@ -126,7 +122,7 @@ unsigned int vm_page_free_count_minimum; /* debugging */ * These page structures are allocated the way * most other kernel structures are. */ -zone_t vm_page_zone; +struct kmem_cache vm_page_cache; /* * Fictitious pages don't have a physical address, @@ -239,14 +235,12 @@ void vm_page_bootstrap( vm_page_free_wanted = 0; /* - * Steal memory for the zone system. + * Steal memory for the kernel map entries. */ kentry_data_size = kentry_count * sizeof(struct vm_map_entry); kentry_data = pmap_steal_memory(kentry_data_size); - zdata = pmap_steal_memory(zdata_size); - /* * Allocate (and initialize) the virtual-to-physical * table hash buckets. @@ -430,10 +424,8 @@ void pmap_startup( */ void vm_page_module_init(void) { - vm_page_zone = zinit((vm_size_t) sizeof(struct vm_page), 0, - VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS, - PAGE_SIZE, - 0, "vm pages"); + kmem_cache_init(&vm_page_cache, "vm_page", sizeof(struct vm_page), 0, + NULL, NULL, NULL, 0); } /* @@ -455,7 +447,7 @@ void vm_page_create( for (paddr = round_page(start); paddr < trunc_page(end); paddr += PAGE_SIZE) { - m = (vm_page_t) zalloc(vm_page_zone); + m = (vm_page_t) kmem_cache_alloc(&vm_page_cache); if (m == VM_PAGE_NULL) panic("vm_page_create"); @@ -810,7 +802,7 @@ void vm_page_more_fictitious(void) int i; for (i = 0; i < vm_page_fictitious_quantum; i++) { - m = (vm_page_t) zalloc(vm_page_zone); + m = (vm_page_t) kmem_cache_alloc(&vm_page_cache); if (m == VM_PAGE_NULL) panic("vm_page_more_fictitious"); |