summaryrefslogtreecommitdiff
path: root/kern
diff options
context:
space:
mode:
authorSamuel Thibault <samuel.thibault@ens-lyon.org>2007-01-04 23:51:02 +0000
committerThomas Schwinge <tschwinge@gnu.org>2009-06-18 00:27:00 +0200
commit818dca5f6e64e5db7ff40c860a17b8a6bbe1af30 (patch)
tree51d50599f61b7e810dd99cc91b369ced423e2cb6 /kern
parentf9107bcab72087acb32ab2800be04b6506b0780a (diff)
2006-12-30 Richard Braun <syn@hurdfr.org>
Add alignment support in the zone allocator. * kern/zalloc.c (ALIGN_SIZE_UP): New macro. (zinit): New `align' parameter. (zget_space): Likewise. (zalloc): Updated call to zget_space() with the zone alignment. * kern/zalloc.h (zone): New member `align'. (zinit): Declaration updated as required. * device/dev_lookup.c (dev_lookup_init): Updated call to zinit() with alignment of 0. * device/dev_pager.c (dev_pager_hash_init): Likewise. (device_pager_init): Likewise. * device/ds_routines.c (ds_init): Likewise. (ds_trap_init): Likewise. * device/net_io.c (net_io_init): Likewise. * i386/i386/fpu.c (fpu_module_init): Likewise. * i386/i386/pcb.c (pcb_module_init): Likewise. * i386/intel/pmap.c (pmap_init): Likewise. * ipc/ipc_init.c (ipc_bootstrap): Likewise. * ipc/ipc_marequest.c (ipc_marequest_init): Likewise. * kern/act.c (global_act_init): Likewise. * kern/kalloc.c (kalloc_init): Likewise. * kern/processor.c (pset_sys_init): Likewise. * kern/task.c (task_init): Likewise. * kern/thread.c (thread_init): Likewise. * kern/zalloc.c (zone_bootstrap): Likewise. * vm/vm_external.c (vm_external_module_initialize): Likewise. * vm/vm_fault.c (vm_fault_init): Likewise. * vm/vm_map.c (vm_map_init): Likewise. * vm/vm_object.c (vm_object_bootstrap): Likewise. * vm/vm_resident.c (vm_page_module_init): Likewise.
Diffstat (limited to 'kern')
-rw-r--r--kern/act.c2
-rw-r--r--kern/kalloc.c2
-rw-r--r--kern/processor.c2
-rw-r--r--kern/task.c2
-rw-r--r--kern/thread.c2
-rw-r--r--kern/zalloc.c29
-rw-r--r--kern/zalloc.h5
7 files changed, 30 insertions, 14 deletions
diff --git a/kern/act.c b/kern/act.c
index ffbce28..d0a03a3 100644
--- a/kern/act.c
+++ b/kern/act.c
@@ -69,7 +69,7 @@ global_act_init()
{
#ifndef ACT_STATIC_KLUDGE
act_zone = zinit(
- sizeof(struct Act),
+ sizeof(struct Act), 0,
ACT_MAX * sizeof(struct Act), /* XXX */
ACT_CHUNK * sizeof(struct Act),
0, "activations");
diff --git a/kern/kalloc.c b/kern/kalloc.c
index b24eaa3..4460d59 100644
--- a/kern/kalloc.c
+++ b/kern/kalloc.c
@@ -145,7 +145,7 @@ void kalloc_init()
if (size == MINSIZE) {
first_k_zone = i;
}
- k_zone[i] = zinit(size, k_zone_max[i] * size, size,
+ k_zone[i] = zinit(size, 0, k_zone_max[i] * size, size,
size >= PAGE_SIZE ? ZONE_COLLECTABLE : 0,
k_zone_name[i]);
}
diff --git a/kern/processor.c b/kern/processor.c
index 4035ad9..d645051 100644
--- a/kern/processor.c
+++ b/kern/processor.c
@@ -113,7 +113,7 @@ void pset_sys_init(void)
/*
* Allocate the zone for processor sets.
*/
- pset_zone = zinit(sizeof(struct processor_set), 128*PAGE_SIZE,
+ pset_zone = zinit(sizeof(struct processor_set), 0, 128*PAGE_SIZE,
PAGE_SIZE, 0, "processor sets");
/*
diff --git a/kern/task.c b/kern/task.c
index db61522..1c4c673 100644
--- a/kern/task.c
+++ b/kern/task.c
@@ -61,7 +61,7 @@ extern void eml_task_deallocate(task_t);
void task_init(void)
{
task_zone = zinit(
- sizeof(struct task),
+ sizeof(struct task), 0,
TASK_MAX * sizeof(struct task),
TASK_CHUNK * sizeof(struct task),
0, "tasks");
diff --git a/kern/thread.c b/kern/thread.c
index 8c08dc4..6ca91a7 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -299,7 +299,7 @@ void stack_privilege(
void thread_init(void)
{
thread_zone = zinit(
- sizeof(struct thread),
+ sizeof(struct thread), 0,
THREAD_MAX * sizeof(struct thread),
THREAD_CHUNK * sizeof(struct thread),
0, "threads");
diff --git a/kern/zalloc.c b/kern/zalloc.c
index dba6190..1f415f7 100644
--- a/kern/zalloc.c
+++ b/kern/zalloc.c
@@ -72,6 +72,9 @@ MACRO_BEGIN \
} \
MACRO_END
+#define ALIGN_SIZE_UP(size, align) \
+((size) = (((size) + ((align) - 1)) & ~((align) - 1)))
+
/*
* Support for garbage collection of unused zone pages:
*/
@@ -146,7 +149,7 @@ MACRO_BEGIN \
} \
MACRO_END
-static vm_offset_t zget_space();
+static vm_offset_t zget_space(vm_offset_t size, vm_size_t align);
decl_simple_lock_data(,zget_space_lock)
vm_offset_t zalloc_next_space;
@@ -182,8 +185,9 @@ int num_zones;
* are stored in a zone, which is initially a static structure that
* is initialized by zone_init.
*/
-zone_t zinit(size, max, alloc, memtype, name)
+zone_t zinit(size, align, max, alloc, memtype, name)
vm_size_t size; /* the size of an element */
+ vm_size_t align; /* alignment of elements */
vm_size_t max; /* maximum memory to use */
vm_size_t alloc; /* allocation size */
unsigned int memtype; /* flags specifying type of memory */
@@ -192,12 +196,11 @@ zone_t zinit(size, max, alloc, memtype, name)
register zone_t z;
if (zone_zone == ZONE_NULL)
- z = (zone_t) zget_space(sizeof(struct zone));
+ z = (zone_t) zget_space(sizeof(struct zone), 0);
else
z = (zone_t) zalloc(zone_zone);
if (z == ZONE_NULL)
panic("zinit");
-
if (alloc == 0)
alloc = PAGE_SIZE;
@@ -210,11 +213,18 @@ zone_t zinit(size, max, alloc, memtype, name)
if ((max = round_page(max)) < (alloc = round_page(alloc)))
max = alloc;
+ if (align > 0) {
+ if (align >= PAGE_SIZE)
+ panic("zinit");
+ ALIGN_SIZE_UP(size, align);
+ }
+
z->free_elements = 0;
z->cur_size = 0;
z->max_size = max;
z->elem_size = ((size-1) + sizeof(z->free_elements)) -
((size-1) % sizeof(z->free_elements));
+ z->align = align;
z->alloc_size = alloc;
z->type = memtype;
@@ -268,13 +278,18 @@ void zcram(zone_t zone, vm_offset_t newmem, vm_size_t size)
* of memory from zone_map.
*/
-static vm_offset_t zget_space(vm_offset_t size)
+static vm_offset_t zget_space(vm_offset_t size, vm_size_t align)
{
vm_offset_t new_space = 0;
vm_offset_t result;
vm_size_t space_to_add = 0; /*'=0' to quiet gcc warnings */
simple_lock(&zget_space_lock);
+ if (align > 0) {
+ assert(align < PAGE_SIZE);
+ ALIGN_SIZE_UP(zalloc_next_space, align);
+ }
+
while ((zalloc_next_space + size) > zalloc_end_of_space) {
/*
* Add at least one page to allocation area.
@@ -359,7 +374,7 @@ void zone_bootstrap()
zalloc_wasted_space = 0;
zone_zone = ZONE_NULL;
- zone_zone = zinit(sizeof(struct zone), 128 * sizeof(struct zone),
+ zone_zone = zinit(sizeof(struct zone), 0, 128 * sizeof(struct zone),
sizeof(struct zone), 0, "zones");
}
@@ -487,7 +502,7 @@ vm_offset_t zalloc(zone_t zone)
zone_lock(zone);
REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
} else {
- addr = zget_space(zone->elem_size);
+ addr = zget_space(zone->elem_size, zone->align);
if (addr == 0)
panic("zalloc: zone %s exhausted",
zone->zone_name);
diff --git a/kern/zalloc.h b/kern/zalloc.h
index 9e44a6d..3f9bf21 100644
--- a/kern/zalloc.h
+++ b/kern/zalloc.h
@@ -59,6 +59,7 @@ struct zone {
vm_size_t cur_size; /* current memory utilization */
vm_size_t max_size; /* how large can this zone grow */
vm_size_t elem_size; /* size of an element */
+ vm_size_t align; /* alignment of elements */
vm_size_t alloc_size; /* size used for more memory */
boolean_t doing_alloc; /* is zone expanding now? */
char *zone_name; /* a name for the zone */
@@ -71,8 +72,8 @@ typedef struct zone *zone_t;
#define ZONE_NULL ((zone_t) 0)
/* Exported to everyone */
-zone_t zinit(vm_size_t size, vm_size_t max, vm_size_t alloc,
- unsigned int memtype, char *name);
+zone_t zinit(vm_size_t size, vm_size_t align, vm_size_t max,
+ vm_size_t alloc, unsigned int memtype, char *name);
vm_offset_t zalloc(zone_t zone);
vm_offset_t zget(zone_t zone);
void zfree(zone_t zone, vm_offset_t elem);