summaryrefslogtreecommitdiff
path: root/vm/vm_page.c
diff options
context:
space:
mode:
Diffstat (limited to 'vm/vm_page.c')
-rw-r--r--vm/vm_page.c152
1 files changed, 109 insertions, 43 deletions
diff --git a/vm/vm_page.c b/vm/vm_page.c
index cc184ca..f2a11e1 100644
--- a/vm/vm_page.c
+++ b/vm/vm_page.c
@@ -29,24 +29,27 @@
* The symmetric case is handled likewise.
*/
+#include <string.h>
#include <kern/assert.h>
-#include <kern/init.h>
#include <kern/list.h>
#include <kern/macros.h>
-#include <kern/mutex.h>
-#include <kern/panic.h>
-#include <kern/param.h>
-#include <kern/printk.h>
-#include <kern/sprintf.h>
-#include <kern/stddef.h>
-#include <kern/string.h>
+#include <kern/lock.h>
+#include <kern/printf.h>
#include <kern/thread.h>
-#include <kern/types.h>
-#include <machine/cpu.h>
#include <machine/pmap.h>
-#include <vm/vm_kmem.h>
#include <vm/vm_page.h>
+/* XXX Mach glue. */
+#define CPU_L1_SIZE (1 << CPU_L1_SHIFT)
+#define MAX_CPUS NCPUS
+#define __read_mostly
+#define __initdata
+#define __init
+#define cpu_id() cpu_number()
+#define thread_pin()
+#define thread_unpin()
+#define printk printf
+
/*
* Number of free block lists per segment.
*/
@@ -73,7 +76,7 @@
* Per-processor cache of pages.
*/
struct vm_page_cpu_pool {
- struct mutex lock;
+ decl_simple_lock_data(,lock);
int size;
int transfer_size;
int nr_pages;
@@ -109,7 +112,7 @@ struct vm_page_seg {
phys_addr_t end;
struct vm_page *pages;
struct vm_page *pages_end;
- struct mutex lock;
+ decl_simple_lock_data(,lock);
struct vm_page_free_list free_lists[VM_PAGE_NR_FREE_LISTS];
unsigned long nr_free_pages;
};
@@ -154,16 +157,66 @@ static struct vm_page_boot_seg vm_page_boot_segs[VM_PAGE_MAX_SEGS] __initdata;
*/
static unsigned int vm_page_segs_size __read_mostly;
+/*
+ * Resident page structures are initialized from
+ * a template (see vm_page_initialize).
+ */
+static struct vm_page vm_page_template =
+ {
+ .type = VM_PAGE_RESERVED,
+ .order = VM_PAGE_ORDER_UNLISTED,
+ .object = VM_OBJECT_NULL, /* reset later */
+ .offset = 0, /* reset later */
+ .wire_count = 0,
+
+ .inactive = FALSE,
+ .active = FALSE,
+ .laundry = FALSE,
+ .external = FALSE,
+
+ .busy = TRUE,
+ .wanted = FALSE,
+ .tabled = FALSE,
+ .fictitious = FALSE,
+ .private = FALSE,
+ .absent = FALSE,
+ .error = FALSE,
+ .dirty = FALSE,
+ .precious = FALSE,
+ .reference = FALSE,
+
+ .phys_addr = 0, /* reset later */
+
+ .page_lock = VM_PROT_NONE,
+ .unlock_request = VM_PROT_NONE,
+ };
+
+
static void __init
-vm_page_init(struct vm_page *page, unsigned short seg_index, phys_addr_t pa)
+vm_page_initialize(struct vm_page *page, unsigned short seg_index,
+ phys_addr_t pa)
{
- memset(page, 0, sizeof(*page));
- page->type = VM_PAGE_RESERVED;
+ memcpy(page, &vm_page_template, VM_PAGE_HEADER_SIZE);
page->seg_index = seg_index;
- page->order = VM_PAGE_ORDER_UNLISTED;
page->phys_addr = pa;
}
+/* XXX legacy mach interface */
+void
+vm_page_init_mach(struct vm_page *page)
+{
+ memcpy(&page->vm_page_header,
+ &vm_page_template.vm_page_header,
+ sizeof *page - VM_PAGE_HEADER_SIZE);
+}
+
+void
+vm_page_init(vm_page_t mem,
+ vm_offset_t phys_addr)
+{
+ vm_page_initialize(mem, mem->seg_index, phys_addr);
+}
+
void
vm_page_set_type(struct vm_page *page, unsigned int order, unsigned short type)
{
@@ -278,7 +331,7 @@ vm_page_seg_free_to_buddy(struct vm_page_seg *seg, struct vm_page *page,
static void __init
vm_page_cpu_pool_init(struct vm_page_cpu_pool *cpu_pool, int size)
{
- mutex_init(&cpu_pool->lock);
+ simple_lock_init(&cpu_pool->lock);
cpu_pool->size = size;
cpu_pool->transfer_size = (size + VM_PAGE_CPU_POOL_TRANSFER_RATIO - 1)
/ VM_PAGE_CPU_POOL_TRANSFER_RATIO;
@@ -321,7 +374,7 @@ vm_page_cpu_pool_fill(struct vm_page_cpu_pool *cpu_pool,
assert(cpu_pool->nr_pages == 0);
- mutex_lock(&seg->lock);
+ simple_lock(&seg->lock);
for (i = 0; i < cpu_pool->transfer_size; i++) {
page = vm_page_seg_alloc_from_buddy(seg, 0);
@@ -332,7 +385,7 @@ vm_page_cpu_pool_fill(struct vm_page_cpu_pool *cpu_pool,
vm_page_cpu_pool_push(cpu_pool, page);
}
- mutex_unlock(&seg->lock);
+ simple_unlock(&seg->lock);
return i;
}
@@ -346,14 +399,14 @@ vm_page_cpu_pool_drain(struct vm_page_cpu_pool *cpu_pool,
assert(cpu_pool->nr_pages == cpu_pool->size);
- mutex_lock(&seg->lock);
+ simple_lock(&seg->lock);
for (i = cpu_pool->transfer_size; i > 0; i--) {
page = vm_page_cpu_pool_pop(cpu_pool);
vm_page_seg_free_to_buddy(seg, page, 0);
}
- mutex_unlock(&seg->lock);
+ simple_unlock(&seg->lock);
}
static phys_addr_t __init
@@ -394,7 +447,7 @@ vm_page_seg_init(struct vm_page_seg *seg, phys_addr_t start, phys_addr_t end,
seg->pages = pages;
seg->pages_end = pages + vm_page_atop(vm_page_seg_size(seg));
- mutex_init(&seg->lock);
+ simple_lock_init(&seg->lock);
for (i = 0; i < ARRAY_SIZE(seg->free_lists); i++)
vm_page_free_list_init(&seg->free_lists[i]);
@@ -403,7 +456,7 @@ vm_page_seg_init(struct vm_page_seg *seg, phys_addr_t start, phys_addr_t end,
i = seg - vm_page_segs;
for (pa = seg->start; pa < seg->end; pa += PAGE_SIZE)
- vm_page_init(&pages[vm_page_atop(pa - seg->start)], i, pa);
+ vm_page_initialize(&pages[vm_page_atop(pa - seg->start)], i, pa);
}
static struct vm_page *
@@ -419,29 +472,30 @@ vm_page_seg_alloc(struct vm_page_seg *seg, unsigned int order,
if (order == 0) {
thread_pin();
cpu_pool = vm_page_cpu_pool_get(seg);
- mutex_lock(&cpu_pool->lock);
+ simple_lock(&cpu_pool->lock);
if (cpu_pool->nr_pages == 0) {
filled = vm_page_cpu_pool_fill(cpu_pool, seg);
if (!filled) {
- mutex_unlock(&cpu_pool->lock);
+ simple_unlock(&cpu_pool->lock);
thread_unpin();
return NULL;
}
}
page = vm_page_cpu_pool_pop(cpu_pool);
- mutex_unlock(&cpu_pool->lock);
+ simple_unlock(&cpu_pool->lock);
thread_unpin();
} else {
- mutex_lock(&seg->lock);
+ simple_lock(&seg->lock);
page = vm_page_seg_alloc_from_buddy(seg, order);
- mutex_unlock(&seg->lock);
+ simple_unlock(&seg->lock);
}
- assert(page->type == VM_PAGE_FREE);
+ assert(page->type == VM_PAGE_UNUSED);
vm_page_set_type(page, order, type);
+ update_vm_page_counts();
return page;
}
@@ -451,27 +505,28 @@ vm_page_seg_free(struct vm_page_seg *seg, struct vm_page *page,
{
struct vm_page_cpu_pool *cpu_pool;
- assert(page->type != VM_PAGE_FREE);
+ assert(page->type != VM_PAGE_UNUSED);
assert(order < VM_PAGE_NR_FREE_LISTS);
- vm_page_set_type(page, order, VM_PAGE_FREE);
+ vm_page_set_type(page, order, VM_PAGE_UNUSED);
if (order == 0) {
thread_pin();
cpu_pool = vm_page_cpu_pool_get(seg);
- mutex_lock(&cpu_pool->lock);
+ simple_lock(&cpu_pool->lock);
if (cpu_pool->nr_pages == cpu_pool->size)
vm_page_cpu_pool_drain(cpu_pool, seg);
vm_page_cpu_pool_push(cpu_pool, page);
- mutex_unlock(&cpu_pool->lock);
+ simple_unlock(&cpu_pool->lock);
thread_unpin();
} else {
- mutex_lock(&seg->lock);
+ simple_lock(&seg->lock);
vm_page_seg_free_to_buddy(seg, page, order);
- mutex_unlock(&seg->lock);
+ simple_unlock(&seg->lock);
}
+ update_vm_page_counts();
}
void __init
@@ -610,7 +665,7 @@ vm_page_setup(void)
nr_pages += vm_page_atop(vm_page_boot_seg_size(&vm_page_boot_segs[i]));
table_size = vm_page_round(nr_pages * sizeof(struct vm_page));
- printk("vm_page: page table size: %zu entries (%zuk)\n", nr_pages,
+ printk("vm_page: page table size: %u entries (%uk)\n", nr_pages,
table_size >> 10);
table = vm_page_bootalloc(table_size);
va = (unsigned long)table;
@@ -630,7 +685,7 @@ vm_page_setup(void)
- boot_seg->start);
while (page < end) {
- page->type = VM_PAGE_FREE;
+ page->type = VM_PAGE_UNUSED;
vm_page_seg_free_to_buddy(seg, page, 0);
page++;
}
@@ -640,7 +695,7 @@ vm_page_setup(void)
while (va < (unsigned long)table) {
pa = vm_page_direct_pa(va);
- page = vm_page_lookup(pa);
+ page = vm_page_lookup_pa(pa);
assert((page != NULL) && (page->type == VM_PAGE_RESERVED));
page->type = VM_PAGE_TABLE;
va += PAGE_SIZE;
@@ -655,12 +710,12 @@ vm_page_manage(struct vm_page *page)
assert(page->seg_index < ARRAY_SIZE(vm_page_segs));
assert(page->type == VM_PAGE_RESERVED);
- vm_page_set_type(page, 0, VM_PAGE_FREE);
+ vm_page_set_type(page, 0, VM_PAGE_UNUSED);
vm_page_seg_free_to_buddy(&vm_page_segs[page->seg_index], page, 0);
}
struct vm_page *
-vm_page_lookup(phys_addr_t pa)
+vm_page_lookup_pa(phys_addr_t pa)
{
struct vm_page_seg *seg;
unsigned int i;
@@ -676,7 +731,7 @@ vm_page_lookup(phys_addr_t pa)
}
struct vm_page *
-vm_page_alloc(unsigned int order, unsigned int selector, unsigned short type)
+vm_page_alloc_p(unsigned int order, unsigned int selector, unsigned short type)
{
struct vm_page *page;
unsigned int i;
@@ -695,7 +750,7 @@ vm_page_alloc(unsigned int order, unsigned int selector, unsigned short type)
}
void
-vm_page_free(struct vm_page *page, unsigned int order)
+vm_page_free_p(struct vm_page *page, unsigned int order)
{
assert(page->seg_index < ARRAY_SIZE(vm_page_segs));
@@ -733,3 +788,14 @@ vm_page_info(void)
seg->nr_free_pages, seg->nr_free_pages >> (20 - PAGE_SHIFT));
}
}
+
+void
+update_vm_page_counts(void)
+{
+ unsigned long pages;
+ unsigned int i;
+
+ for (i = 0, pages = 0; i < vm_page_segs_size; i++)
+ pages += vm_page_segs[i].nr_free_pages;
+ vm_page_free_count = pages;
+}