diff options
Diffstat (limited to 'i386')
-rw-r--r-- | i386/i386/fpu.c | 25 | ||||
-rw-r--r-- | i386/i386/io_perm.c | 6 | ||||
-rw-r--r-- | i386/i386/machine_task.c | 19 | ||||
-rw-r--r-- | i386/i386/pcb.c | 13 | ||||
-rw-r--r-- | i386/i386/task.h | 4 | ||||
-rw-r--r-- | i386/intel/pmap.c | 30 | ||||
-rw-r--r-- | i386/intel/pmap.h | 1 |
7 files changed, 47 insertions, 51 deletions
diff --git a/i386/i386/fpu.c b/i386/i386/fpu.c index 2626a38..75bf655 100644 --- a/i386/i386/fpu.c +++ b/i386/i386/fpu.c @@ -47,7 +47,7 @@ #include <kern/mach_param.h> #include <kern/printf.h> #include <kern/thread.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <i386/thread.h> #include <i386/fpu.h> @@ -72,7 +72,7 @@ extern void i386_exception(); int fp_kind = FP_387; /* 80387 present */ -zone_t ifps_zone; /* zone for FPU save area */ +struct kmem_cache ifps_cache; /* cache for FPU save area */ static unsigned long mxcsr_feature_mask = 0xffffffff; /* Always AND user-provided mxcsr with this security mask */ void fp_save(thread_t thread); @@ -193,10 +193,9 @@ init_fpu() void fpu_module_init() { - ifps_zone = zinit(sizeof(struct i386_fpsave_state), 16, - THREAD_MAX * sizeof(struct i386_fpsave_state), - THREAD_CHUNK * sizeof(struct i386_fpsave_state), - 0, "i386 fpsave state"); + kmem_cache_init(&ifps_cache, "i386_fpsave_state", + sizeof(struct i386_fpsave_state), 16, + NULL, NULL, NULL, 0); } /* @@ -221,7 +220,7 @@ ASSERT_IPL(SPL0); clear_fpu(); } #endif /* NCPUS == 1 */ - zfree(ifps_zone, (vm_offset_t) fps); + kmem_cache_free(&ifps_cache, (vm_offset_t) fps); } /* The two following functions were stolen from Linux's i387.c */ @@ -335,7 +334,7 @@ ASSERT_IPL(SPL0); simple_unlock(&pcb->lock); if (ifps != 0) { - zfree(ifps_zone, (vm_offset_t) ifps); + kmem_cache_free(&ifps_cache, (vm_offset_t) ifps); } } else { @@ -356,7 +355,7 @@ ASSERT_IPL(SPL0); if (ifps == 0) { if (new_ifps == 0) { simple_unlock(&pcb->lock); - new_ifps = (struct i386_fpsave_state *) zalloc(ifps_zone); + new_ifps = (struct i386_fpsave_state *) kmem_cache_alloc(&ifps_cache); goto Retry; } ifps = new_ifps; @@ -396,7 +395,7 @@ ASSERT_IPL(SPL0); simple_unlock(&pcb->lock); if (new_ifps != 0) - zfree(ifps_zone, (vm_offset_t) new_ifps); + kmem_cache_free(&ifps_cache, (vm_offset_t) new_ifps); } return KERN_SUCCESS; @@ -609,7 +608,7 @@ fpextovrflt() clear_fpu(); if (ifps) - zfree(ifps_zone, (vm_offset_t) ifps); + kmem_cache_free(&ifps_cache, (vm_offset_t) ifps); /* * Raise exception. @@ -785,7 +784,7 @@ fp_load(thread) ASSERT_IPL(SPL0); ifps = pcb->ims.ifps; if (ifps == 0) { - ifps = (struct i386_fpsave_state *) zalloc(ifps_zone); + ifps = (struct i386_fpsave_state *) kmem_cache_alloc(&ifps_cache); memset(ifps, 0, sizeof *ifps); pcb->ims.ifps = ifps; fpinit(); @@ -836,7 +835,7 @@ fp_state_alloc() pcb_t pcb = current_thread()->pcb; struct i386_fpsave_state *ifps; - ifps = (struct i386_fpsave_state *)zalloc(ifps_zone); + ifps = (struct i386_fpsave_state *)kmem_cache_alloc(&ifps_cache); memset(ifps, 0, sizeof *ifps); pcb->ims.ifps = ifps; diff --git a/i386/i386/io_perm.c b/i386/i386/io_perm.c index df25cc6..7dcb858 100644 --- a/i386/i386/io_perm.c +++ b/i386/i386/io_perm.c @@ -54,7 +54,7 @@ #include <ipc/ipc_port.h> #include <ipc/ipc_space.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <kern/lock.h> #include <kern/queue.h> #include <kern/thread.h> @@ -257,12 +257,12 @@ i386_io_perm_modify (task_t target_task, io_perm_t io_perm, boolean_t enable) if (!iopb) { simple_unlock (&target_task->machine.iopb_lock); - iopb = (unsigned char *) zalloc (machine_task_iopb_zone); + iopb = (unsigned char *) kmem_cache_alloc (&machine_task_iopb_cache); simple_lock (&target_task->machine.iopb_lock); if (target_task->machine.iopb) { if (iopb) - zfree (machine_task_iopb_zone, (vm_offset_t) iopb); + kmem_cache_free (&machine_task_iopb_cache, (vm_offset_t) iopb); iopb = target_task->machine.iopb; iopb_size = target_task->machine.iopb_size; } diff --git a/i386/i386/machine_task.c b/i386/i386/machine_task.c index 35b89e0..689bf04 100644 --- a/i386/i386/machine_task.c +++ b/i386/i386/machine_task.c @@ -22,15 +22,15 @@ #include <kern/lock.h> #include <mach/mach_types.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <kern/mach_param.h> #include <machine/task.h> #include <machine/io_perm.h> -/* The zone which holds our IO permission bitmaps. */ -zone_t machine_task_iopb_zone; +/* The cache which holds our IO permission bitmaps. */ +struct kmem_cache machine_task_iopb_cache; /* Initialize the machine task module. The function is called once at @@ -38,11 +38,8 @@ zone_t machine_task_iopb_zone; void machine_task_module_init (void) { - machine_task_iopb_zone = zinit (IOPB_BYTES, 0, - TASK_MAX * IOPB_BYTES, - IOPB_BYTES, - ZONE_COLLECTABLE | ZONE_EXHAUSTIBLE, - "i386 machine task iopb"); + kmem_cache_init (&machine_task_iopb_cache, "i386_task_iopb", IOPB_BYTES, 0, + NULL, NULL, NULL, 0); } @@ -62,7 +59,8 @@ void machine_task_terminate (task_t task) { if (task->machine.iopb) - zfree (machine_task_iopb_zone, (vm_offset_t) task->machine.iopb); + kmem_cache_free (&machine_task_iopb_cache, + (vm_offset_t) task->machine.iopb); } @@ -74,7 +72,8 @@ machine_task_collect (task_t task) simple_lock (&task->machine.iopb_lock); if (task->machine.iopb_size == 0 && task->machine.iopb) { - zfree (machine_task_iopb_zone, (vm_offset_t) task->machine.iopb); + kmem_cache_free (&machine_task_iopb_cache, + (vm_offset_t) task->machine.iopb); task->machine.iopb = 0; } simple_unlock (&task->machine.iopb_lock); diff --git a/i386/i386/pcb.c b/i386/i386/pcb.c index fffa92a..11ef5e7 100644 --- a/i386/i386/pcb.c +++ b/i386/i386/pcb.c @@ -39,6 +39,7 @@ #include <kern/mach_param.h> #include <kern/thread.h> #include <kern/sched_prim.h> +#include <kern/slab.h> #include <vm/vm_kern.h> #include <vm/pmap.h> @@ -65,7 +66,7 @@ extern void Thread_continue(); extern void user_ldt_free(); -zone_t pcb_zone; +struct kmem_cache pcb_cache; vm_offset_t kernel_stack[NCPUS]; /* top of active_stack */ @@ -369,10 +370,8 @@ thread_t switch_context(old, continuation, new) void pcb_module_init() { - pcb_zone = zinit(sizeof(struct pcb), 0, - THREAD_MAX * sizeof(struct pcb), - THREAD_CHUNK * sizeof(struct pcb), - 0, "i386 pcb state"); + kmem_cache_init(&pcb_cache, "pcb", sizeof(struct pcb), 0, + NULL, NULL, NULL, 0); fpu_module_init(); } @@ -382,7 +381,7 @@ void pcb_init(thread) { register pcb_t pcb; - pcb = (pcb_t) zalloc(pcb_zone); + pcb = (pcb_t) kmem_cache_alloc(&pcb_cache); if (pcb == 0) panic("pcb_init"); @@ -422,7 +421,7 @@ void pcb_terminate(thread) fp_free(pcb->ims.ifps); if (pcb->ims.ldt != 0) user_ldt_free(pcb->ims.ldt); - zfree(pcb_zone, (vm_offset_t) pcb); + kmem_cache_free(&pcb_cache, (vm_offset_t) pcb); thread->pcb = 0; } diff --git a/i386/i386/task.h b/i386/i386/task.h index ca8de04..0060ad4 100644 --- a/i386/i386/task.h +++ b/i386/i386/task.h @@ -24,7 +24,7 @@ #define _I386_TASK_H_ #include <kern/kern_types.h> -#include <kern/zalloc.h> +#include <kern/slab.h> /* The machine specific data of a task. */ struct machine_task @@ -41,7 +41,7 @@ struct machine_task typedef struct machine_task machine_task_t; -extern zone_t machine_task_iopb_zone; +extern struct kmem_cache machine_task_iopb_cache; /* Initialize the machine task module. The function is called once at start up by task_init in kern/task.c. */ diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c index 9f34f2d..94d2e9c 100644 --- a/i386/intel/pmap.c +++ b/i386/intel/pmap.c @@ -63,7 +63,7 @@ #include <kern/debug.h> #include <kern/printf.h> #include <kern/thread.h> -#include <kern/zalloc.h> +#include <kern/slab.h> #include <kern/lock.h> @@ -113,7 +113,7 @@ pv_entry_t pv_head_table; /* array of entries, one per page */ /* * pv_list entries are kept on a list that can only be accessed * with the pmap system locked (at SPLVM, not in the cpus_active set). - * The list is refilled from the pv_list_zone if it becomes empty. + * The list is refilled from the pv_list_cache if it becomes empty. */ pv_entry_t pv_free_list; /* free list at SPLVM */ decl_simple_lock_data(, pv_free_list_lock) @@ -133,7 +133,7 @@ decl_simple_lock_data(, pv_free_list_lock) simple_unlock(&pv_free_list_lock); \ } -zone_t pv_list_zone; /* zone of pv_entry structures */ +struct kmem_cache pv_list_cache; /* cache of pv_entry structures */ /* * Each entry in the pv_head_table is locked by a bit in the @@ -400,7 +400,7 @@ struct pmap_update_list cpu_update_list[NCPUS]; struct pmap kernel_pmap_store; pmap_t kernel_pmap; -struct zone *pmap_zone; /* zone of pmap structures */ +struct kmem_cache pmap_cache; /* cache of pmap structures */ int pmap_debug = 0; /* flag for debugging prints */ @@ -937,13 +937,13 @@ void pmap_init() pmap_phys_attributes = (char *) addr; /* - * Create the zone of physical maps, + * Create the cache of physical maps, * and of the physical-to-virtual entries. */ s = (vm_size_t) sizeof(struct pmap); - pmap_zone = zinit(s, 0, 400*s, 4096, 0, "pmap"); /* XXX */ + kmem_cache_init(&pmap_cache, "pmap", s, 0, NULL, NULL, NULL, 0); s = (vm_size_t) sizeof(struct pv_entry); - pv_list_zone = zinit(s, 0, 10000*s, 4096, 0, "pv_list"); /* XXX */ + kmem_cache_init(&pv_list_cache, "pv_entry", s, 0, NULL, NULL, NULL, 0); #if NCPUS > 1 /* @@ -1009,7 +1009,7 @@ pmap_page_table_page_alloc() /* * We cannot allocate the pmap_object in pmap_init, - * because it is called before the zone package is up. + * because it is called before the cache package is up. * Allocate it now if it is missing. */ if (pmap_object == VM_OBJECT_NULL) @@ -1113,11 +1113,11 @@ pmap_t pmap_create(size) } /* - * Allocate a pmap struct from the pmap_zone. Then allocate - * the page descriptor table from the pd_zone. + * Allocate a pmap struct from the pmap_cache. Then allocate + * the page descriptor table. */ - p = (pmap_t) zalloc(pmap_zone); + p = (pmap_t) kmem_cache_alloc(&pmap_cache); if (p == PMAP_NULL) panic("pmap_create"); @@ -1232,7 +1232,7 @@ void pmap_destroy(p) #endif /* MACH_XEN */ kmem_free(kernel_map, (vm_offset_t)p->pdpbase, INTEL_PGBYTES); #endif /* PAE */ - zfree(pmap_zone, (vm_offset_t) p); + kmem_cache_free(&pmap_cache, (vm_offset_t) p); } /* @@ -1782,7 +1782,7 @@ if (pmap_debug) printf("pmap(%x, %x)\n", v, pa); /* * Must allocate a new pvlist entry while we're unlocked; - * zalloc may cause pageout (which will lock the pmap system). + * Allocating may cause pageout (which will lock the pmap system). * If we determine we need a pvlist entry, we will unlock * and allocate one. Then we will retry, throughing away * the allocated entry later (if we no longer need it). @@ -1966,9 +1966,9 @@ Retry: PMAP_READ_UNLOCK(pmap, spl); /* - * Refill from zone. + * Refill from cache. */ - pv_e = (pv_entry_t) zalloc(pv_list_zone); + pv_e = (pv_entry_t) kmem_cache_alloc(&pv_list_cache); goto Retry; } } diff --git a/i386/intel/pmap.h b/i386/intel/pmap.h index 7ba7d2c..e02ad36 100644 --- a/i386/intel/pmap.h +++ b/i386/intel/pmap.h @@ -37,7 +37,6 @@ #ifndef __ASSEMBLER__ -#include <kern/zalloc.h> #include <kern/lock.h> #include <mach/machine/vm_param.h> #include <mach/vm_statistics.h> |