summaryrefslogtreecommitdiff
path: root/kern
diff options
context:
space:
mode:
Diffstat (limited to 'kern')
-rw-r--r--kern/act.c17
-rw-r--r--kern/kalloc.h4
-rw-r--r--kern/mach_clock.c2
-rw-r--r--kern/processor.c14
-rw-r--r--kern/startup.c1
-rw-r--r--kern/task.c15
-rw-r--r--kern/thread.c15
7 files changed, 29 insertions, 39 deletions
diff --git a/kern/act.c b/kern/act.c
index d0a03a3..f4f1e31 100644
--- a/kern/act.c
+++ b/kern/act.c
@@ -31,7 +31,7 @@
#include <mach/kern_return.h>
#include <mach/alert.h>
#include <kern/mach_param.h> /* XXX INCALL_... */
-#include <kern/zalloc.h>
+#include <kern/slab.h>
#include <kern/thread.h>
#include <kern/task.h>
#include <kern/debug.h>
@@ -47,7 +47,7 @@ static void special_handler(ReturnHandler *rh, struct Act *act);
#endif
#ifndef ACT_STATIC_KLUDGE
-static zone_t act_zone;
+static struct kmem_cache act_cache;
#else
static Act *act_freelist;
static Act free_acts[ACT_STATIC_KLUDGE];
@@ -68,11 +68,8 @@ void
global_act_init()
{
#ifndef ACT_STATIC_KLUDGE
- act_zone = zinit(
- sizeof(struct Act), 0,
- ACT_MAX * sizeof(struct Act), /* XXX */
- ACT_CHUNK * sizeof(struct Act),
- 0, "activations");
+ kmem_cache_init(&act_cache, "Act", sizeof(struct Act), 0,
+ NULL, NULL, NULL, 0);
#else
int i;
@@ -104,7 +101,7 @@ kern_return_t act_create(task_t task, vm_offset_t user_stack,
int rc;
#ifndef ACT_STATIC_KLUDGE
- act = (Act*)zalloc(act_zone);
+ act = (Act*)kmem_cache_alloc(&act_cache);
if (act == 0)
return(KERN_RESOURCE_SHORTAGE);
#else
@@ -170,9 +167,9 @@ static void act_free(Act *inc)
/* Drop the task reference. */
task_deallocate(inc->task);
- /* Put the act back on the act zone */
+ /* Put the act back on the act cache */
#ifndef ACT_STATIC_KLUDGE
- zfree(act_zone, (vm_offset_t)inc);
+ kmem_cache_free(&act_cache, (vm_offset_t)inc);
#else
/* XXX ipt_lock(act_freelist); */
inc->ipt_next = act_freelist;
diff --git a/kern/kalloc.h b/kern/kalloc.h
index a80f6db..1330b54 100644
--- a/kern/kalloc.h
+++ b/kern/kalloc.h
@@ -28,11 +28,11 @@
#define _KERN_KALLOC_H_
#include <mach/machine/vm_types.h>
+#include <vm/vm_types.h>
-#define MINSIZE 16
+extern vm_map_t kalloc_map;
extern vm_offset_t kalloc (vm_size_t size);
-extern vm_offset_t kget (vm_size_t size);
extern void kfree (vm_offset_t data, vm_size_t size);
extern void kalloc_init (void);
diff --git a/kern/mach_clock.c b/kern/mach_clock.c
index 4ba7c08..050f088 100644
--- a/kern/mach_clock.c
+++ b/kern/mach_clock.c
@@ -514,7 +514,7 @@ int timeclose()
/*
* Compatibility for device drivers.
* New code should use set_timeout/reset_timeout and private timers.
- * These code can't use a zone to allocate timers, because
+ * These code can't use a cache to allocate timers, because
* it can be called from interrupt handlers.
*/
diff --git a/kern/processor.c b/kern/processor.c
index 718ff3a..3ece341 100644
--- a/kern/processor.c
+++ b/kern/processor.c
@@ -46,8 +46,8 @@
#include <ipc/ipc_port.h>
#if MACH_HOST
-#include <kern/zalloc.h>
-zone_t pset_zone;
+#include <kern/slab.h>
+struct kmem_cache pset_cache;
#endif /* MACH_HOST */
@@ -112,10 +112,10 @@ void pset_sys_init(void)
register processor_t processor;
/*
- * Allocate the zone for processor sets.
+ * Allocate the cache for processor sets.
*/
- pset_zone = zinit(sizeof(struct processor_set), 0, 128*PAGE_SIZE,
- PAGE_SIZE, 0, "processor sets");
+ kmem_cache_init(&pset_cache, "processor_set",
+ sizeof(struct processor_set), 0, NULL, NULL, NULL, 0);
/*
* Give each processor a control port.
@@ -394,7 +394,7 @@ void pset_deallocate(
/*
* That's it, free data structure.
*/
- zfree(pset_zone, (vm_offset_t)pset);
+ kmem_cache_free(&pset_cache, (vm_offset_t)pset);
#endif /* MACH_HOST */
}
@@ -538,7 +538,7 @@ processor_set_create(
if (host == HOST_NULL)
return KERN_INVALID_ARGUMENT;
- pset = (processor_set_t) zalloc(pset_zone);
+ pset = (processor_set_t) kmem_cache_alloc(&pset_cache);
pset_init(pset);
pset_reference(pset); /* for new_set out argument */
pset_reference(pset); /* for new_name out argument */
diff --git a/kern/startup.c b/kern/startup.c
index a4b5a6f..3bdda16 100644
--- a/kern/startup.c
+++ b/kern/startup.c
@@ -48,7 +48,6 @@
#include <kern/timer.h>
#include <kern/xpr.h>
#include <kern/time_stamp.h>
-#include <kern/zalloc.h>
#include <vm/vm_kern.h>
#include <vm/vm_map.h>
#include <vm/vm_object.h>
diff --git a/kern/task.c b/kern/task.c
index 88da16e..2e9fd76 100644
--- a/kern/task.c
+++ b/kern/task.c
@@ -43,7 +43,7 @@
#include <kern/mach_param.h>
#include <kern/task.h>
#include <kern/thread.h>
-#include <kern/zalloc.h>
+#include <kern/slab.h>
#include <kern/kalloc.h>
#include <kern/processor.h>
#include <kern/sched_prim.h> /* for thread_wakeup */
@@ -52,7 +52,7 @@
#include <machine/machspl.h> /* for splsched */
task_t kernel_task = TASK_NULL;
-zone_t task_zone;
+struct kmem_cache task_cache;
extern void eml_init(void);
extern void eml_task_reference(task_t, task_t);
@@ -60,11 +60,8 @@ extern void eml_task_deallocate(task_t);
void task_init(void)
{
- task_zone = zinit(
- sizeof(struct task), 0,
- TASK_MAX * sizeof(struct task),
- TASK_CHUNK * sizeof(struct task),
- 0, "tasks");
+ kmem_cache_init(&task_cache, "task", sizeof(struct task), 0,
+ NULL, NULL, NULL, 0);
eml_init();
machine_task_module_init ();
@@ -120,7 +117,7 @@ kern_return_t task_create(
int i;
#endif
- new_task = (task_t) zalloc(task_zone);
+ new_task = (task_t) kmem_cache_alloc(&task_cache);
if (new_task == TASK_NULL) {
panic("task_create: no memory for task structure");
}
@@ -235,7 +232,7 @@ void task_deallocate(
pset_deallocate(pset);
vm_map_deallocate(task->map);
is_release(task->itk_space);
- zfree(task_zone, (vm_offset_t) task);
+ kmem_cache_free(&task_cache, (vm_offset_t) task);
}
void task_reference(
diff --git a/kern/thread.c b/kern/thread.c
index bf2df94..f23af58 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -54,7 +54,7 @@
#include <kern/thread.h>
#include <kern/thread_swap.h>
#include <kern/host.h>
-#include <kern/zalloc.h>
+#include <kern/slab.h>
#include <kern/mach_clock.h>
#include <vm/vm_kern.h>
#include <ipc/ipc_kmsg.h>
@@ -67,7 +67,7 @@
thread_t active_threads[NCPUS];
vm_offset_t active_stacks[NCPUS];
-struct zone *thread_zone;
+struct kmem_cache thread_cache;
queue_head_t reaper_queue;
decl_simple_lock_data(, reaper_lock)
@@ -300,11 +300,8 @@ void stack_privilege(
void thread_init(void)
{
- thread_zone = zinit(
- sizeof(struct thread), 0,
- THREAD_MAX * sizeof(struct thread),
- THREAD_CHUNK * sizeof(struct thread),
- 0, "threads");
+ kmem_cache_init(&thread_cache, "thread", sizeof(struct thread), 0,
+ NULL, NULL, NULL, 0);
/*
* Fill in a template thread for fast initialization.
@@ -414,7 +411,7 @@ kern_return_t thread_create(
* Allocate a thread and initialize static fields
*/
- new_thread = (thread_t) zalloc(thread_zone);
+ new_thread = (thread_t) kmem_cache_alloc(&thread_cache);
if (new_thread == THREAD_NULL)
return KERN_RESOURCE_SHORTAGE;
@@ -710,7 +707,7 @@ void thread_deallocate(
evc_notify_abort(thread);
pcb_terminate(thread);
- zfree(thread_zone, (vm_offset_t) thread);
+ kmem_cache_free(&thread_cache, (vm_offset_t) thread);
}
void thread_reference(