summaryrefslogtreecommitdiff
path: root/device
diff options
context:
space:
mode:
Diffstat (limited to 'device')
-rw-r--r--device/dev_lookup.c17
-rw-r--r--device/dev_pager.c34
-rw-r--r--device/ds_routines.c49
-rw-r--r--device/io_req.h5
-rw-r--r--device/net_io.c32
5 files changed, 56 insertions, 81 deletions
diff --git a/device/dev_lookup.c b/device/dev_lookup.c
index 2391e8d..98a2d02 100644
--- a/device/dev_lookup.c
+++ b/device/dev_lookup.c
@@ -32,7 +32,7 @@
#include <mach/vm_param.h>
#include <kern/queue.h>
-#include <kern/zalloc.h>
+#include <kern/slab.h>
#include <device/device_types.h>
#include <device/dev_hdr.h>
@@ -62,7 +62,7 @@ queue_head_t dev_number_hash_table[NDEVHASH];
decl_simple_lock_data(,
dev_number_lock)
-zone_t dev_hdr_zone;
+struct kmem_cache dev_hdr_cache;
/*
* Enter device in the number lookup table.
@@ -151,7 +151,7 @@ device_lookup(name)
simple_unlock(&dev_number_lock);
- new_device = (mach_device_t) zalloc(dev_hdr_zone);
+ new_device = (mach_device_t) kmem_cache_alloc(&dev_hdr_cache);
simple_lock_init(&new_device->ref_lock);
new_device->ref_count = 1;
simple_lock_init(&new_device->lock);
@@ -187,7 +187,7 @@ device_lookup(name)
simple_unlock(&dev_number_lock);
if (new_device != MACH_DEVICE_NULL)
- zfree(dev_hdr_zone, (vm_offset_t)new_device);
+ kmem_cache_free(&dev_hdr_cache, (vm_offset_t)new_device);
}
return (device);
@@ -233,7 +233,7 @@ mach_device_deallocate(device)
simple_unlock(&device->ref_lock);
simple_unlock(&dev_number_lock);
- zfree(dev_hdr_zone, (vm_offset_t)device);
+ kmem_cache_free(&dev_hdr_cache, (vm_offset_t)device);
}
/*
@@ -376,9 +376,6 @@ dev_lookup_init()
simple_lock_init(&dev_port_lock);
- dev_hdr_zone = zinit(sizeof(struct mach_device), 0,
- sizeof(struct mach_device) * NDEVICES,
- PAGE_SIZE,
- FALSE,
- "open device entry");
+ kmem_cache_init(&dev_hdr_cache, "mach_device",
+ sizeof(struct mach_device), 0, NULL, NULL, NULL, 0);
}
diff --git a/device/dev_pager.c b/device/dev_pager.c
index 447781e..bc58a15 100644
--- a/device/dev_pager.c
+++ b/device/dev_pager.c
@@ -44,8 +44,7 @@
#include <kern/debug.h>
#include <kern/printf.h>
#include <kern/queue.h>
-#include <kern/zalloc.h>
-#include <kern/kalloc.h>
+#include <kern/slab.h>
#include <vm/vm_page.h>
#include <vm/vm_kern.h>
@@ -126,7 +125,7 @@ typedef struct dev_pager *dev_pager_t;
#define DEV_PAGER_NULL ((dev_pager_t)0)
-zone_t dev_pager_zone;
+struct kmem_cache dev_pager_cache;
void dev_pager_reference(register dev_pager_t ds)
{
@@ -144,7 +143,7 @@ void dev_pager_deallocate(register dev_pager_t ds)
}
simple_unlock(&ds->lock);
- zfree(dev_pager_zone, (vm_offset_t)ds);
+ kmem_cache_free(&dev_pager_cache, (vm_offset_t)ds);
}
/*
@@ -161,7 +160,7 @@ struct dev_pager_entry {
typedef struct dev_pager_entry *dev_pager_entry_t;
queue_head_t dev_pager_hashtable[DEV_PAGER_HASH_COUNT];
-zone_t dev_pager_hash_zone;
+struct kmem_cache dev_pager_hash_cache;
decl_simple_lock_data(,
dev_pager_hash_lock)
@@ -174,13 +173,8 @@ void dev_pager_hash_init(void)
register vm_size_t size;
size = sizeof(struct dev_pager_entry);
- dev_pager_hash_zone = zinit(
- size,
- 0,
- size * 1000,
- PAGE_SIZE,
- FALSE,
- "dev_pager port hash");
+ kmem_cache_init(&dev_pager_hash_cache, "dev_pager_entry", size, 0,
+ NULL, NULL, NULL, 0);
for (i = 0; i < DEV_PAGER_HASH_COUNT; i++)
queue_init(&dev_pager_hashtable[i]);
simple_lock_init(&dev_pager_hash_lock);
@@ -192,7 +186,7 @@ void dev_pager_hash_insert(
{
register dev_pager_entry_t new_entry;
- new_entry = (dev_pager_entry_t) zalloc(dev_pager_hash_zone);
+ new_entry = (dev_pager_entry_t) kmem_cache_alloc(&dev_pager_hash_cache);
new_entry->name = name_port;
new_entry->pager_rec = rec;
@@ -220,7 +214,7 @@ void dev_pager_hash_delete(ipc_port_t name_port)
}
simple_unlock(&dev_pager_hash_lock);
if (entry)
- zfree(dev_pager_hash_zone, (vm_offset_t)entry);
+ kmem_cache_free(&dev_pager_hash_cache, (vm_offset_t)entry);
}
dev_pager_t dev_pager_hash_lookup(ipc_port_t name_port)
@@ -273,7 +267,7 @@ kern_return_t device_pager_setup(
return (D_SUCCESS);
}
- d = (dev_pager_t) zalloc(dev_pager_zone);
+ d = (dev_pager_t) kmem_cache_alloc(&dev_pager_cache);
if (d == DEV_PAGER_NULL)
return (KERN_RESOURCE_SHORTAGE);
@@ -726,15 +720,11 @@ void device_pager_init(void)
register vm_size_t size;
/*
- * Initialize zone of paging structures.
+ * Initialize cache of paging structures.
*/
size = sizeof(struct dev_pager);
- dev_pager_zone = zinit(size,
- 0,
- (vm_size_t) size * 1000,
- PAGE_SIZE,
- FALSE,
- "device pager structures");
+ kmem_cache_init(&dev_pager_cache, "dev_pager", size, 0,
+ NULL, NULL, NULL, 0);
/*
* Initialize the name port hashing stuff.
diff --git a/device/ds_routines.c b/device/ds_routines.c
index d4a08fb..5a6fdd2 100644
--- a/device/ds_routines.c
+++ b/device/ds_routines.c
@@ -73,7 +73,7 @@
#include <kern/debug.h>
#include <kern/printf.h>
#include <kern/queue.h>
-#include <kern/zalloc.h>
+#include <kern/slab.h>
#include <kern/thread.h>
#include <kern/task.h>
#include <kern/sched_prim.h>
@@ -130,7 +130,8 @@ static struct device_emulation_ops *emulation_list[] =
&mach_device_emulation_ops,
};
-vm_map_t device_io_map;
+static struct vm_map device_io_map_store;
+vm_map_t device_io_map = &device_io_map_store;
#define NUM_EMULATION (sizeof (emulation_list) / sizeof (emulation_list[0]))
@@ -855,7 +856,7 @@ device_write_get(ior, wait)
*/
if (ior->io_op & IO_INBAND) {
assert(ior->io_count <= sizeof (io_buf_ptr_inband_t));
- new_addr = zalloc(io_inband_zone);
+ new_addr = kmem_cache_alloc(&io_inband_cache);
memcpy((void*)new_addr, ior->io_data, ior->io_count);
ior->io_data = (io_buf_ptr_t)new_addr;
ior->io_alloc_size = sizeof (io_buf_ptr_inband_t);
@@ -935,7 +936,7 @@ device_write_dealloc(ior)
* Inband case.
*/
if (ior->io_op & IO_INBAND) {
- zfree(io_inband_zone, (vm_offset_t)ior->io_data);
+ kmem_cache_free(&io_inband_cache, (vm_offset_t)ior->io_data);
return (TRUE);
}
@@ -1245,7 +1246,7 @@ kern_return_t device_read_alloc(ior, size)
return (KERN_SUCCESS);
if (ior->io_op & IO_INBAND) {
- ior->io_data = (io_buf_ptr_t) zalloc(io_inband_zone);
+ ior->io_data = (io_buf_ptr_t) kmem_cache_alloc(&io_inband_cache);
ior->io_alloc_size = sizeof(io_buf_ptr_inband_t);
} else {
size = round_page(size);
@@ -1338,7 +1339,7 @@ boolean_t ds_read_done(ior)
if (ior->io_count != 0) {
if (ior->io_op & IO_INBAND) {
if (ior->io_alloc_size > 0)
- zfree(io_inband_zone, (vm_offset_t)ior->io_data);
+ kmem_cache_free(&io_inband_cache, (vm_offset_t)ior->io_data);
} else {
register vm_offset_t end_alloc;
@@ -1551,11 +1552,9 @@ void mach_device_init()
queue_init(&io_done_list);
simple_lock_init(&io_done_list_lock);
- device_io_map = kmem_suballoc(kernel_map,
- &device_io_min,
- &device_io_max,
- DEVICE_IO_MAP_SIZE,
- FALSE);
+ kmem_submap(device_io_map, kernel_map, &device_io_min, &device_io_max,
+ DEVICE_IO_MAP_SIZE, FALSE);
+
/*
* If the kernel receives many device_write requests, the
* device_io_map might run out of space. To prevent
@@ -1575,11 +1574,8 @@ void mach_device_init()
*/
device_io_map->wait_for_space = TRUE;
- io_inband_zone = zinit(sizeof(io_buf_ptr_inband_t), 0,
- 1000 * sizeof(io_buf_ptr_inband_t),
- 10 * sizeof(io_buf_ptr_inband_t),
- FALSE,
- "io inband read buffers");
+ kmem_cache_init(&io_inband_cache, "io_buf_ptr_inband",
+ sizeof(io_buf_ptr_inband_t), 0, NULL, NULL, NULL, 0);
mach_device_trap_init();
}
@@ -1615,7 +1611,7 @@ void iowait(ior)
*/
#define IOTRAP_REQSIZE 2048
-zone_t io_trap_zone;
+struct kmem_cache io_trap_cache;
/*
* Initialization. Called from mach_device_init().
@@ -1623,24 +1619,21 @@ zone_t io_trap_zone;
static void
mach_device_trap_init(void)
{
- io_trap_zone = zinit(IOTRAP_REQSIZE, 0,
- 256 * IOTRAP_REQSIZE,
- 16 * IOTRAP_REQSIZE,
- FALSE,
- "wired device trap buffers");
+ kmem_cache_init(&io_trap_cache, "io_req", IOTRAP_REQSIZE, 0,
+ NULL, NULL, NULL, 0);
}
/*
* Allocate an io_req_t.
- * Currently zalloc's from io_trap_zone.
+ * Currently allocates from io_trap_cache.
*
- * Could have lists of different size zones.
+ * Could have lists of different size caches.
* Could call a device-specific routine.
*/
io_req_t
ds_trap_req_alloc(mach_device_t device, vm_size_t data_size)
{
- return (io_req_t) zalloc(io_trap_zone);
+ return (io_req_t) kmem_cache_alloc(&io_trap_cache);
}
/*
@@ -1656,7 +1649,7 @@ ds_trap_write_done(io_req_t ior)
/*
* Should look at reply port and maybe send a message.
*/
- zfree(io_trap_zone, (vm_offset_t) ior);
+ kmem_cache_free(&io_trap_cache, (vm_offset_t) ior);
/*
* Give up device reference from ds_write_trap.
@@ -1732,7 +1725,7 @@ device_write_trap (mach_device_t device, dev_mode_t mode,
*/
mach_device_deallocate(device);
- zfree(io_trap_zone, (vm_offset_t) ior);
+ kmem_cache_free(&io_trap_cache, (vm_offset_t) ior);
return (result);
}
@@ -1823,7 +1816,7 @@ device_writev_trap (mach_device_t device, dev_mode_t mode,
*/
mach_device_deallocate(device);
- zfree(io_trap_zone, (vm_offset_t) ior);
+ kmem_cache_free(&io_trap_cache, (vm_offset_t) ior);
return (result);
}
diff --git a/device/io_req.h b/device/io_req.h
index 162524d..65e23e6 100644
--- a/device/io_req.h
+++ b/device/io_req.h
@@ -35,6 +35,7 @@
#include <mach/port.h>
#include <mach/message.h>
#include <mach/vm_param.h>
+#include <kern/slab.h>
#include <kern/kalloc.h>
#include <kern/lock.h>
#include <vm/vm_page.h>
@@ -124,7 +125,7 @@ struct io_req {
void iodone(io_req_t);
/*
- * Macros to allocate and free IORs - will convert to zones later.
+ * Macros to allocate and free IORs - will convert to caches later.
*/
#define io_req_alloc(ior,size) \
MACRO_BEGIN \
@@ -136,6 +137,6 @@ void iodone(io_req_t);
(kfree((vm_offset_t)(ior), sizeof(struct io_req)))
-zone_t io_inband_zone; /* for inband reads */
+struct kmem_cache io_inband_cache; /* for inband reads */
#endif /* _IO_REQ_ */
diff --git a/device/net_io.c b/device/net_io.c
index 8446395..52a0716 100644
--- a/device/net_io.c
+++ b/device/net_io.c
@@ -61,6 +61,7 @@
#include <kern/printf.h>
#include <kern/queue.h>
#include <kern/sched_prim.h>
+#include <kern/slab.h>
#include <kern/thread.h>
#include <machine/machspl.h>
@@ -302,7 +303,7 @@ struct net_rcv_port {
};
typedef struct net_rcv_port *net_rcv_port_t;
-zone_t net_rcv_zone; /* zone of net_rcv_port structs */
+struct kmem_cache net_rcv_cache; /* cache of net_rcv_port structs */
#define NET_HASH_SIZE 256
@@ -324,7 +325,7 @@ struct net_hash_entry {
};
typedef struct net_hash_entry *net_hash_entry_t;
-zone_t net_hash_entry_zone;
+struct kmem_cache net_hash_entry_cache;
/*
* This structure represents a packet filter with multiple sessions.
@@ -1195,7 +1196,7 @@ net_set_filter(ifp, rcv_port, priority, filter, filter_count)
* If there is no match instruction, we allocate
* a normal packet filter structure.
*/
- my_infp = (net_rcv_port_t) zalloc(net_rcv_zone);
+ my_infp = (net_rcv_port_t) kmem_cache_alloc(&net_rcv_cache);
my_infp->rcv_port = rcv_port;
is_new_infp = TRUE;
} else {
@@ -1205,7 +1206,7 @@ net_set_filter(ifp, rcv_port, priority, filter, filter_count)
* a hash table to deal with them.
*/
my_infp = 0;
- hash_entp = (net_hash_entry_t) zalloc(net_hash_entry_zone);
+ hash_entp = (net_hash_entry_t) kmem_cache_alloc(&net_hash_entry_cache);
is_new_infp = FALSE;
}
@@ -1310,7 +1311,8 @@ net_set_filter(ifp, rcv_port, priority, filter, filter_count)
ipc_port_release_send(rcv_port);
if (match != 0)
- zfree (net_hash_entry_zone, (vm_offset_t)hash_entp);
+ kmem_cache_free(&net_hash_entry_cache,
+ (vm_offset_t)hash_entp);
rval = D_NO_MEMORY;
goto clean_and_return;
@@ -1526,20 +1528,12 @@ net_io_init()
register vm_size_t size;
size = sizeof(struct net_rcv_port);
- net_rcv_zone = zinit(size,
- 0,
- size * 1000,
- PAGE_SIZE,
- FALSE,
- "net_rcv_port");
+ kmem_cache_init(&net_rcv_cache, "net_rcv_port", size, 0,
+ NULL, NULL, NULL, 0);
size = sizeof(struct net_hash_entry);
- net_hash_entry_zone = zinit(size,
- 0,
- size * 100,
- PAGE_SIZE,
- FALSE,
- "net_hash_entry");
+ kmem_cache_init(&net_hash_entry_cache, "net_hash_entry", size, 0,
+ NULL, NULL, NULL, 0);
size = ikm_plus_overhead(sizeof(struct net_rcv_msg));
net_kmsg_size = round_page(size);
@@ -2167,7 +2161,7 @@ net_free_dead_infp (dead_infp)
nextfp = (net_rcv_port_t) queue_next(&infp->input);
ipc_port_release_send(infp->rcv_port);
net_del_q_info(infp->rcv_qlimit);
- zfree(net_rcv_zone, (vm_offset_t) infp);
+ kmem_cache_free(&net_rcv_cache, (vm_offset_t) infp);
}
}
@@ -2190,7 +2184,7 @@ net_free_dead_entp (dead_entp)
ipc_port_release_send(entp->rcv_port);
net_del_q_info(entp->rcv_qlimit);
- zfree(net_hash_entry_zone, (vm_offset_t) entp);
+ kmem_cache_free(&net_hash_entry_cache, (vm_offset_t) entp);
}
}