summaryrefslogtreecommitdiff
path: root/mach-defpager/kalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mach-defpager/kalloc.c')
-rw-r--r--mach-defpager/kalloc.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/mach-defpager/kalloc.c b/mach-defpager/kalloc.c
index d9b18c02..8efb85f9 100644
--- a/mach-defpager/kalloc.c
+++ b/mach-defpager/kalloc.c
@@ -33,7 +33,7 @@
*/
#include <mach.h>
-#include <cthreads.h> /* for spin locks */
+#include <pthread.h> /* for spin locks */
#include <malloc.h> /* for malloc_hook/free_hook */
#include "wiring.h"
@@ -61,10 +61,10 @@ vm_size_t kalloc_max; /* max before we use vm_allocate */
#define MINSIZE 4 /* minimum allocation size */
struct free_list {
- spin_lock_t lock;
- vm_offset_t head; /* head of free list */
+ pthread_spinlock_t lock;
+ vm_offset_t head; /* head of free list */
#ifdef DEBUG
- int count;
+ int count;
#endif /*DEBUG*/
};
@@ -74,7 +74,7 @@ struct free_list {
2048, 4096, 8192, 16384 */
struct free_list kfree_list[KLIST_MAX];
-spin_lock_t kget_space_lock;
+pthread_spinlock_t kget_space_lock;
vm_offset_t kalloc_next_space = 0;
vm_offset_t kalloc_end_of_space = 0;
@@ -107,10 +107,10 @@ void kalloc_init(void)
kalloc_max = vm_page_size;
for (i = 0; i < KLIST_MAX; i++) {
- spin_lock_init(&kfree_list[i].lock);
+ pthread_spin_init(&kfree_list[i].lock, PTHREAD_PROCESS_PRIVATE);
kfree_list[i].head = 0;
}
- spin_lock_init(&kget_space_lock);
+ pthread_spin_init(&kget_space_lock, PTHREAD_PROCESS_PRIVATE);
/*
* Do not allocate memory at address 0.
@@ -128,7 +128,7 @@ vm_offset_t kget_space(vm_offset_t size)
vm_offset_t new_space = 0;
vm_offset_t addr;
- spin_lock(&kget_space_lock);
+ pthread_spin_lock(&kget_space_lock);
while (kalloc_next_space + size > kalloc_end_of_space) {
/*
* Add at least one page to allocation area.
@@ -141,7 +141,7 @@ vm_offset_t kget_space(vm_offset_t size)
* Try to make it contiguous with the last
* allocation area.
*/
- spin_unlock(&kget_space_lock);
+ pthread_spin_unlock(&kget_space_lock);
new_space = kalloc_end_of_space;
if (vm_map(mach_task_self(),
@@ -152,7 +152,7 @@ vm_offset_t kget_space(vm_offset_t size)
return 0;
wire_memory(new_space, space_to_add,
VM_PROT_READ|VM_PROT_WRITE);
- spin_lock(&kget_space_lock);
+ pthread_spin_lock(&kget_space_lock);
continue;
}
@@ -177,7 +177,7 @@ vm_offset_t kget_space(vm_offset_t size)
addr = kalloc_next_space;
kalloc_next_space += size;
- spin_unlock(&kget_space_lock);
+ pthread_spin_unlock(&kget_space_lock);
if (new_space != 0)
(void) vm_deallocate(mach_task_self(), new_space, space_to_add);
@@ -214,16 +214,16 @@ void *kalloc(vm_size_t size)
*/
if (allocsize < kalloc_max) {
- spin_lock(&fl->lock);
+ pthread_spin_lock(&fl->lock);
if ((addr = fl->head) != 0) {
fl->head = *(vm_offset_t *)addr;
#ifdef DEBUG
fl->count--;
#endif
- spin_unlock(&fl->lock);
+ pthread_spin_unlock(&fl->lock);
}
else {
- spin_unlock(&fl->lock);
+ pthread_spin_unlock(&fl->lock);
addr = kget_space(allocsize);
}
}
@@ -253,13 +253,13 @@ kfree( void *data,
}
if (freesize < kalloc_max) {
- spin_lock(&fl->lock);
+ pthread_spin_lock(&fl->lock);
*(vm_offset_t *)data = fl->head;
fl->head = (vm_offset_t) data;
#ifdef DEBUG
fl->count++;
#endif
- spin_unlock(&fl->lock);
+ pthread_spin_unlock(&fl->lock);
}
else {
(void) vm_deallocate(mach_task_self(), (vm_offset_t)data, freesize);