diff options
author | Richard Braun <rbraun@sceen.net> | 2011-12-17 15:24:05 +0000 |
---|---|---|
committer | Richard Braun <rbraun@sceen.net> | 2011-12-17 22:12:40 +0000 |
commit | db2078e4f1802434f791f4f1c333725c42fe172b (patch) | |
tree | 39961f1164f12e1496a84d2f40451dcb78609b7e /device | |
parent | 7bc54a622e0c57a1085cd2990a1deedc8bd4743d (diff) |
Adjust the kernel to use the slab allocator
* device/dev_lookup.c: Replace zalloc header, types and function calls
with their slab counterparts.
* device/dev_pager.c: Likewise.
* device/ds_routines.c: Likewise.
* device/io_req.h: Likewise.
* device/net_io.c: Likewise.
* i386/i386/fpu.c: Likewise.
* i386/i386/io_perm.c: Likewise.
* i386/i386/machine_task.c: Likewise.
* i386/i386/pcb.c: Likewise.
* i386/i386/task.h: Likewise.
* i386/intel/pmap.c: Likewise.
* i386/intel/pmap.h: Remove #include <kernel/zalloc.h>.
* include/mach_debug/mach_debug.defs (host_zone_info): Replace
routine declaration with skip directive.
(host_slab_info): New routine declaration.
* include/mach_debug/mach_debug_types.defs (zone_name_t)
(zone_name_array_t, zone_info_t, zone_info_array_t): Remove types.
(cache_info_t, cache_info_array_t): New types.
* include/mach_debug/mach_debug_types.h: Replace #include
<mach_debug/zone_info.h> with <mach_debug/slab_info.h>.
* ipc/ipc_entry.c: Replace zalloc header, types and function calls with
their slab counterparts.
* ipc/ipc_entry.h: Likewise.
* ipc/ipc_init.c: Likewise.
* ipc/ipc_marequest.c: Likewise.
* ipc/ipc_object.c: Likewise.
* ipc/ipc_object.h: Likewise.
* ipc/ipc_space.c: Likewise.
* ipc/ipc_space.h: Likewise.
* ipc/ipc_table.c (kalloc_map): Remove extern declaration.
* kern/act.c: Replace zalloc header, types and function calls with their
slab counterparts.
* kern/kalloc.h: Add #include <vm/vm_types.h>.
(MINSIZE): Remove definition.
(kalloc_map): Add extern declaration.
(kget): Remove prototype.
* kern/mach_clock.c: Adjust comment.
* kern/processor.c: Replace zalloc header, types and function calls with
their slab counterparts.
* kern/startup.c: Remove #include <kernel/zalloc.h>.
* kern/task.c: Replace zalloc header, types and function calls with
their slab counterparts.
* kern/thread.c: Likewise.
* vm/memory_object_proxy.c: Likewise.
* vm/vm_external.c: Likewise.
* vm/vm_fault.c: Likewise.
* vm/vm_init.c: Likewise.
* vm/vm_map.c: Likewise.
* vm/vm_object.c: Likewise.
* vm/vm_page.h: Remove #include <kernel/zalloc.h>.
* vm/vm_pageout.c: Replace zalloc header, types and function calls with
their slab counterparts.
* vm/vm_resident.c: Likewise.
(zdata, zdata_size): Remove declarations.
(vm_page_bootstrap): Don't steal memory for the zone system.
Diffstat (limited to 'device')
-rw-r--r-- | device/dev_lookup.c | 17 | ||||
-rw-r--r-- | device/dev_pager.c | 33 | ||||
-rw-r--r-- | device/ds_routines.c | 38 | ||||
-rw-r--r-- | device/io_req.h | 5 | ||||
-rw-r--r-- | device/net_io.c | 32 |
5 files changed, 51 insertions, 74 deletions
diff --git a/device/dev_lookup.c b/device/dev_lookup.c index 2391e8d..98a2d02 100644 --- a/device/dev_lookup.c +++ b/device/dev_lookup.c @@ -32,7 +32,7 @@ #include <mach/vm_param.h> #include <kern/queue.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <device/device_types.h> #include <device/dev_hdr.h> @@ -62,7 +62,7 @@ queue_head_t dev_number_hash_table[NDEVHASH]; decl_simple_lock_data(, dev_number_lock) -zone_t dev_hdr_zone; +struct kmem_cache dev_hdr_cache; /* * Enter device in the number lookup table. @@ -151,7 +151,7 @@ device_lookup(name) simple_unlock(&dev_number_lock); - new_device = (mach_device_t) zalloc(dev_hdr_zone); + new_device = (mach_device_t) kmem_cache_alloc(&dev_hdr_cache); simple_lock_init(&new_device->ref_lock); new_device->ref_count = 1; simple_lock_init(&new_device->lock); @@ -187,7 +187,7 @@ device_lookup(name) simple_unlock(&dev_number_lock); if (new_device != MACH_DEVICE_NULL) - zfree(dev_hdr_zone, (vm_offset_t)new_device); + kmem_cache_free(&dev_hdr_cache, (vm_offset_t)new_device); } return (device); @@ -233,7 +233,7 @@ mach_device_deallocate(device) simple_unlock(&device->ref_lock); simple_unlock(&dev_number_lock); - zfree(dev_hdr_zone, (vm_offset_t)device); + kmem_cache_free(&dev_hdr_cache, (vm_offset_t)device); } /* @@ -376,9 +376,6 @@ dev_lookup_init() simple_lock_init(&dev_port_lock); - dev_hdr_zone = zinit(sizeof(struct mach_device), 0, - sizeof(struct mach_device) * NDEVICES, - PAGE_SIZE, - FALSE, - "open device entry"); + kmem_cache_init(&dev_hdr_cache, "mach_device", + sizeof(struct mach_device), 0, NULL, NULL, NULL, 0); } diff --git a/device/dev_pager.c b/device/dev_pager.c index 447781e..dc5ba73 100644 --- a/device/dev_pager.c +++ b/device/dev_pager.c @@ -44,7 +44,7 @@ #include <kern/debug.h> #include <kern/printf.h> #include <kern/queue.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <kern/kalloc.h> #include <vm/vm_page.h> @@ -126,7 +126,7 @@ typedef struct dev_pager *dev_pager_t; #define DEV_PAGER_NULL ((dev_pager_t)0) -zone_t dev_pager_zone; +struct kmem_cache dev_pager_cache; void dev_pager_reference(register dev_pager_t ds) { @@ -144,7 +144,7 @@ void dev_pager_deallocate(register dev_pager_t ds) } simple_unlock(&ds->lock); - zfree(dev_pager_zone, (vm_offset_t)ds); + kmem_cache_free(&dev_pager_cache, (vm_offset_t)ds); } /* @@ -161,7 +161,7 @@ struct dev_pager_entry { typedef struct dev_pager_entry *dev_pager_entry_t; queue_head_t dev_pager_hashtable[DEV_PAGER_HASH_COUNT]; -zone_t dev_pager_hash_zone; +struct kmem_cache dev_pager_hash_cache; decl_simple_lock_data(, dev_pager_hash_lock) @@ -174,13 +174,8 @@ void dev_pager_hash_init(void) register vm_size_t size; size = sizeof(struct dev_pager_entry); - dev_pager_hash_zone = zinit( - size, - 0, - size * 1000, - PAGE_SIZE, - FALSE, - "dev_pager port hash"); + kmem_cache_init(&dev_pager_hash_cache, "dev_pager_entry", size, 0, + NULL, NULL, NULL, 0); for (i = 0; i < DEV_PAGER_HASH_COUNT; i++) queue_init(&dev_pager_hashtable[i]); simple_lock_init(&dev_pager_hash_lock); @@ -192,7 +187,7 @@ void dev_pager_hash_insert( { register dev_pager_entry_t new_entry; - new_entry = (dev_pager_entry_t) zalloc(dev_pager_hash_zone); + new_entry = (dev_pager_entry_t) kmem_cache_alloc(&dev_pager_hash_cache); new_entry->name = name_port; new_entry->pager_rec = rec; @@ -220,7 +215,7 @@ void dev_pager_hash_delete(ipc_port_t name_port) } simple_unlock(&dev_pager_hash_lock); if (entry) - zfree(dev_pager_hash_zone, (vm_offset_t)entry); + kmem_cache_free(&dev_pager_hash_cache, (vm_offset_t)entry); } dev_pager_t dev_pager_hash_lookup(ipc_port_t name_port) @@ -273,7 +268,7 @@ kern_return_t device_pager_setup( return (D_SUCCESS); } - d = (dev_pager_t) zalloc(dev_pager_zone); + d = (dev_pager_t) kmem_cache_alloc(&dev_pager_cache); if (d == DEV_PAGER_NULL) return (KERN_RESOURCE_SHORTAGE); @@ -726,15 +721,11 @@ void device_pager_init(void) register vm_size_t size; /* - * Initialize zone of paging structures. + * Initialize cache of paging structures. */ size = sizeof(struct dev_pager); - dev_pager_zone = zinit(size, - 0, - (vm_size_t) size * 1000, - PAGE_SIZE, - FALSE, - "device pager structures"); + kmem_cache_init(&dev_pager_cache, "dev_pager", size, 0, + NULL, NULL, NULL, 0); /* * Initialize the name port hashing stuff. diff --git a/device/ds_routines.c b/device/ds_routines.c index d4a08fb..f0f8c59 100644 --- a/device/ds_routines.c +++ b/device/ds_routines.c @@ -73,7 +73,7 @@ #include <kern/debug.h> #include <kern/printf.h> #include <kern/queue.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <kern/thread.h> #include <kern/task.h> #include <kern/sched_prim.h> @@ -855,7 +855,7 @@ device_write_get(ior, wait) */ if (ior->io_op & IO_INBAND) { assert(ior->io_count <= sizeof (io_buf_ptr_inband_t)); - new_addr = zalloc(io_inband_zone); + new_addr = kmem_cache_alloc(&io_inband_cache); memcpy((void*)new_addr, ior->io_data, ior->io_count); ior->io_data = (io_buf_ptr_t)new_addr; ior->io_alloc_size = sizeof (io_buf_ptr_inband_t); @@ -935,7 +935,7 @@ device_write_dealloc(ior) * Inband case. */ if (ior->io_op & IO_INBAND) { - zfree(io_inband_zone, (vm_offset_t)ior->io_data); + kmem_cache_free(&io_inband_cache, (vm_offset_t)ior->io_data); return (TRUE); } @@ -1245,7 +1245,7 @@ kern_return_t device_read_alloc(ior, size) return (KERN_SUCCESS); if (ior->io_op & IO_INBAND) { - ior->io_data = (io_buf_ptr_t) zalloc(io_inband_zone); + ior->io_data = (io_buf_ptr_t) kmem_cache_alloc(&io_inband_cache); ior->io_alloc_size = sizeof(io_buf_ptr_inband_t); } else { size = round_page(size); @@ -1338,7 +1338,7 @@ boolean_t ds_read_done(ior) if (ior->io_count != 0) { if (ior->io_op & IO_INBAND) { if (ior->io_alloc_size > 0) - zfree(io_inband_zone, (vm_offset_t)ior->io_data); + kmem_cache_free(&io_inband_cache, (vm_offset_t)ior->io_data); } else { register vm_offset_t end_alloc; @@ -1575,11 +1575,8 @@ void mach_device_init() */ device_io_map->wait_for_space = TRUE; - io_inband_zone = zinit(sizeof(io_buf_ptr_inband_t), 0, - 1000 * sizeof(io_buf_ptr_inband_t), - 10 * sizeof(io_buf_ptr_inband_t), - FALSE, - "io inband read buffers"); + kmem_cache_init(&io_inband_cache, "io_buf_ptr_inband", + sizeof(io_buf_ptr_inband_t), 0, NULL, NULL, NULL, 0); mach_device_trap_init(); } @@ -1615,7 +1612,7 @@ void iowait(ior) */ #define IOTRAP_REQSIZE 2048 -zone_t io_trap_zone; +struct kmem_cache io_trap_cache; /* * Initialization. Called from mach_device_init(). @@ -1623,24 +1620,21 @@ zone_t io_trap_zone; static void mach_device_trap_init(void) { - io_trap_zone = zinit(IOTRAP_REQSIZE, 0, - 256 * IOTRAP_REQSIZE, - 16 * IOTRAP_REQSIZE, - FALSE, - "wired device trap buffers"); + kmem_cache_init(&io_trap_cache, "io_req", IOTRAP_REQSIZE, 0, + NULL, NULL, NULL, 0); } /* * Allocate an io_req_t. - * Currently zalloc's from io_trap_zone. + * Currently allocates from io_trap_cache. * - * Could have lists of different size zones. + * Could have lists of different size caches. * Could call a device-specific routine. */ io_req_t ds_trap_req_alloc(mach_device_t device, vm_size_t data_size) { - return (io_req_t) zalloc(io_trap_zone); + return (io_req_t) kmem_cache_alloc(&io_trap_cache); } /* @@ -1656,7 +1650,7 @@ ds_trap_write_done(io_req_t ior) /* * Should look at reply port and maybe send a message. */ - zfree(io_trap_zone, (vm_offset_t) ior); + kmem_cache_free(&io_trap_cache, (vm_offset_t) ior); /* * Give up device reference from ds_write_trap. @@ -1732,7 +1726,7 @@ device_write_trap (mach_device_t device, dev_mode_t mode, */ mach_device_deallocate(device); - zfree(io_trap_zone, (vm_offset_t) ior); + kmem_cache_free(&io_trap_cache, (vm_offset_t) ior); return (result); } @@ -1823,7 +1817,7 @@ device_writev_trap (mach_device_t device, dev_mode_t mode, */ mach_device_deallocate(device); - zfree(io_trap_zone, (vm_offset_t) ior); + kmem_cache_free(&io_trap_cache, (vm_offset_t) ior); return (result); } diff --git a/device/io_req.h b/device/io_req.h index 162524d..65e23e6 100644 --- a/device/io_req.h +++ b/device/io_req.h @@ -35,6 +35,7 @@ #include <mach/port.h> #include <mach/message.h> #include <mach/vm_param.h> +#include <kern/slab.h> #include <kern/kalloc.h> #include <kern/lock.h> #include <vm/vm_page.h> @@ -124,7 +125,7 @@ struct io_req { void iodone(io_req_t); /* - * Macros to allocate and free IORs - will convert to zones later. + * Macros to allocate and free IORs - will convert to caches later. */ #define io_req_alloc(ior,size) \ MACRO_BEGIN \ @@ -136,6 +137,6 @@ void iodone(io_req_t); (kfree((vm_offset_t)(ior), sizeof(struct io_req))) -zone_t io_inband_zone; /* for inband reads */ +struct kmem_cache io_inband_cache; /* for inband reads */ #endif /* _IO_REQ_ */ diff --git a/device/net_io.c b/device/net_io.c index 8446395..52a0716 100644 --- a/device/net_io.c +++ b/device/net_io.c @@ -61,6 +61,7 @@ #include <kern/printf.h> #include <kern/queue.h> #include <kern/sched_prim.h> +#include <kern/slab.h> #include <kern/thread.h> #include <machine/machspl.h> @@ -302,7 +303,7 @@ struct net_rcv_port { }; typedef struct net_rcv_port *net_rcv_port_t; -zone_t net_rcv_zone; /* zone of net_rcv_port structs */ +struct kmem_cache net_rcv_cache; /* cache of net_rcv_port structs */ #define NET_HASH_SIZE 256 @@ -324,7 +325,7 @@ struct net_hash_entry { }; typedef struct net_hash_entry *net_hash_entry_t; -zone_t net_hash_entry_zone; +struct kmem_cache net_hash_entry_cache; /* * This structure represents a packet filter with multiple sessions. @@ -1195,7 +1196,7 @@ net_set_filter(ifp, rcv_port, priority, filter, filter_count) * If there is no match instruction, we allocate * a normal packet filter structure. */ - my_infp = (net_rcv_port_t) zalloc(net_rcv_zone); + my_infp = (net_rcv_port_t) kmem_cache_alloc(&net_rcv_cache); my_infp->rcv_port = rcv_port; is_new_infp = TRUE; } else { @@ -1205,7 +1206,7 @@ net_set_filter(ifp, rcv_port, priority, filter, filter_count) * a hash table to deal with them. */ my_infp = 0; - hash_entp = (net_hash_entry_t) zalloc(net_hash_entry_zone); + hash_entp = (net_hash_entry_t) kmem_cache_alloc(&net_hash_entry_cache); is_new_infp = FALSE; } @@ -1310,7 +1311,8 @@ net_set_filter(ifp, rcv_port, priority, filter, filter_count) ipc_port_release_send(rcv_port); if (match != 0) - zfree (net_hash_entry_zone, (vm_offset_t)hash_entp); + kmem_cache_free(&net_hash_entry_cache, + (vm_offset_t)hash_entp); rval = D_NO_MEMORY; goto clean_and_return; @@ -1526,20 +1528,12 @@ net_io_init() register vm_size_t size; size = sizeof(struct net_rcv_port); - net_rcv_zone = zinit(size, - 0, - size * 1000, - PAGE_SIZE, - FALSE, - "net_rcv_port"); + kmem_cache_init(&net_rcv_cache, "net_rcv_port", size, 0, + NULL, NULL, NULL, 0); size = sizeof(struct net_hash_entry); - net_hash_entry_zone = zinit(size, - 0, - size * 100, - PAGE_SIZE, - FALSE, - "net_hash_entry"); + kmem_cache_init(&net_hash_entry_cache, "net_hash_entry", size, 0, + NULL, NULL, NULL, 0); size = ikm_plus_overhead(sizeof(struct net_rcv_msg)); net_kmsg_size = round_page(size); @@ -2167,7 +2161,7 @@ net_free_dead_infp (dead_infp) nextfp = (net_rcv_port_t) queue_next(&infp->input); ipc_port_release_send(infp->rcv_port); net_del_q_info(infp->rcv_qlimit); - zfree(net_rcv_zone, (vm_offset_t) infp); + kmem_cache_free(&net_rcv_cache, (vm_offset_t) infp); } } @@ -2190,7 +2184,7 @@ net_free_dead_entp (dead_entp) ipc_port_release_send(entp->rcv_port); net_del_q_info(entp->rcv_qlimit); - zfree(net_hash_entry_zone, (vm_offset_t) entp); + kmem_cache_free(&net_hash_entry_cache, (vm_offset_t) entp); } } |