summaryrefslogtreecommitdiff
path: root/vm/vm_resident.c
diff options
context:
space:
mode:
Diffstat (limited to 'vm/vm_resident.c')
-rw-r--r--vm/vm_resident.c722
1 files changed, 72 insertions, 650 deletions
diff --git a/vm/vm_resident.c b/vm/vm_resident.c
index d3b5a8e..88880ef 100644
--- a/vm/vm_resident.c
+++ b/vm/vm_resident.c
@@ -46,6 +46,7 @@
#include <machine/vm_param.h>
#include <kern/xpr.h>
#include <kern/slab.h>
+#include <kern/rdxtree.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_page.h>
@@ -54,7 +55,6 @@
#if MACH_VM_DEBUG
#include <mach/kern_return.h>
-#include <mach_debug/hash_info.h>
#include <vm/vm_user.h>
#endif
@@ -79,33 +79,6 @@ vm_offset_t virtual_space_start;
vm_offset_t virtual_space_end;
/*
- * The vm_page_lookup() routine, which provides for fast
- * (virtual memory object, offset) to page lookup, employs
- * the following hash table. The vm_page_{insert,remove}
- * routines install and remove associations in the table.
- * [This table is often called the virtual-to-physical,
- * or VP, table.]
- */
-typedef struct {
- decl_simple_lock_data(,lock)
- vm_page_t pages;
-} vm_page_bucket_t;
-
-vm_page_bucket_t *vm_page_buckets; /* Array of buckets */
-unsigned int vm_page_bucket_count = 0; /* How big is array? */
-unsigned int vm_page_hash_mask; /* Mask for hash function */
-
-/*
- * Resident page structures are initialized from
- * a template (see vm_page_alloc).
- *
- * When adding a new field to the virtual memory
- * object structure, be sure to add initialization
- * (see vm_page_bootstrap).
- */
-struct vm_page vm_page_template;
-
-/*
* Resident pages that represent real memory
* are allocated from a free list.
*/
@@ -117,8 +90,6 @@ int vm_page_free_count;
int vm_page_fictitious_count;
int vm_page_external_count;
-unsigned int vm_page_free_count_minimum; /* debugging */
-
/*
* Occasionally, the virtual memory system uses
* resident page structures that do not refer to
@@ -182,9 +153,6 @@ boolean_t vm_page_deactivate_hint = TRUE;
*
* Initializes the resident memory module.
*
- * Allocates memory for the page cells, and
- * for the object/offset-to-page hash table headers.
- * Each page cell is initialized and placed on the free list.
* Returns the range of available kernel virtual memory.
*/
@@ -192,40 +160,6 @@ void vm_page_bootstrap(
vm_offset_t *startp,
vm_offset_t *endp)
{
- vm_page_t m;
- int i;
-
- /*
- * Initialize the vm_page template.
- */
-
- m = &vm_page_template;
- m->object = VM_OBJECT_NULL; /* reset later */
- m->offset = 0; /* reset later */
- m->wire_count = 0;
-
- m->inactive = FALSE;
- m->active = FALSE;
- m->laundry = FALSE;
- m->free = FALSE;
- m->external = FALSE;
-
- m->busy = TRUE;
- m->wanted = FALSE;
- m->tabled = FALSE;
- m->fictitious = FALSE;
- m->private = FALSE;
- m->absent = FALSE;
- m->error = FALSE;
- m->dirty = FALSE;
- m->precious = FALSE;
- m->reference = FALSE;
-
- m->phys_addr = 0; /* reset later */
-
- m->page_lock = VM_PROT_NONE;
- m->unlock_request = VM_PROT_NONE;
-
/*
* Initialize the page queues.
*/
@@ -241,46 +175,6 @@ void vm_page_bootstrap(
vm_page_free_wanted = 0;
/*
- * Steal memory for the kernel map entries.
- */
-
- kentry_data = pmap_steal_memory(kentry_data_size);
-
- /*
- * Allocate (and initialize) the virtual-to-physical
- * table hash buckets.
- *
- * The number of buckets should be a power of two to
- * get a good hash function. The following computation
- * chooses the first power of two that is greater
- * than the number of physical pages in the system.
- */
-
- if (vm_page_bucket_count == 0) {
- unsigned int npages = pmap_free_pages();
-
- vm_page_bucket_count = 1;
- while (vm_page_bucket_count < npages)
- vm_page_bucket_count <<= 1;
- }
-
- vm_page_hash_mask = vm_page_bucket_count - 1;
-
- if (vm_page_hash_mask & vm_page_bucket_count)
- printf("vm_page_bootstrap: WARNING -- strange page hash\n");
-
- vm_page_buckets = (vm_page_bucket_t *)
- pmap_steal_memory(vm_page_bucket_count *
- sizeof(vm_page_bucket_t));
-
- for (i = 0; i < vm_page_bucket_count; i++) {
- vm_page_bucket_t *bucket = &vm_page_buckets[i];
-
- bucket->pages = VM_PAGE_NULL;
- simple_lock_init(&bucket->lock);
- }
-
- /*
* Machine-dependent code allocates the resident page table.
* It uses vm_page_init to initialize the page frames.
* The code also returns to us the virtual space available
@@ -296,125 +190,20 @@ void vm_page_bootstrap(
*endp = virtual_space_end;
/* printf("vm_page_bootstrap: %d free pages\n", vm_page_free_count);*/
- vm_page_free_count_minimum = vm_page_free_count;
}
#ifndef MACHINE_PAGES
-/*
- * We implement pmap_steal_memory and pmap_startup with the help
- * of two simpler functions, pmap_virtual_space and pmap_next_page.
- */
-
-vm_offset_t pmap_steal_memory(
- vm_size_t size)
-{
- vm_offset_t addr, vaddr, paddr;
-
- /*
- * We round the size to an integer multiple.
- */
-
- size = (size + 3) &~ 3;
-
- /*
- * If this is the first call to pmap_steal_memory,
- * we have to initialize ourself.
- */
-
- if (virtual_space_start == virtual_space_end) {
- pmap_virtual_space(&virtual_space_start, &virtual_space_end);
-
- /*
- * The initial values must be aligned properly, and
- * we don't trust the pmap module to do it right.
- */
-
- virtual_space_start = round_page(virtual_space_start);
- virtual_space_end = trunc_page(virtual_space_end);
- }
-
- /*
- * Allocate virtual memory for this request.
- */
-
- addr = virtual_space_start;
- virtual_space_start += size;
-
- /*
- * Allocate and map physical pages to back new virtual pages.
- */
-
- for (vaddr = round_page(addr);
- vaddr < addr + size;
- vaddr += PAGE_SIZE) {
- if (!pmap_next_page(&paddr))
- panic("pmap_steal_memory");
-
- /*
- * XXX Logically, these mappings should be wired,
- * but some pmap modules barf if they are.
- */
-
- pmap_enter(kernel_pmap, vaddr, paddr,
- VM_PROT_READ|VM_PROT_WRITE, FALSE);
- }
-
- return addr;
-}
-
void pmap_startup(
vm_offset_t *startp,
vm_offset_t *endp)
{
- unsigned int i, npages, pages_initialized;
- vm_page_t pages;
- vm_offset_t paddr;
-
- /*
- * We calculate how many page frames we will have
- * and then allocate the page structures in one chunk.
- */
-
- npages = ((PAGE_SIZE * pmap_free_pages() +
- (round_page(virtual_space_start) - virtual_space_start)) /
- (PAGE_SIZE + sizeof *pages));
-
- pages = (vm_page_t) pmap_steal_memory(npages * sizeof *pages);
-
- /*
- * Initialize the page frames.
- */
-
- for (i = 0, pages_initialized = 0; i < npages; i++) {
- if (!pmap_next_page(&paddr))
- break;
-
- vm_page_init(&pages[i], paddr);
- pages_initialized++;
- }
- i = 0;
- while (pmap_next_page(&paddr))
- i++;
- if (i)
- printf("%u memory page(s) left away\n", i);
-
- /*
- * Release pages in reverse order so that physical pages
- * initially get allocated in ascending addresses. This keeps
- * the devices (which must address physical memory) happy if
- * they require several consecutive pages.
- */
-
- for (i = pages_initialized; i > 0; i--) {
- vm_page_release(&pages[i - 1], FALSE);
- }
-
+ pmap_virtual_space(&virtual_space_start, &virtual_space_end);
/*
- * We have to re-align virtual_space_start,
- * because pmap_steal_memory has been using it.
+ * The initial values must be aligned properly, and
+ * we don't trust the pmap module to do it right.
*/
-
virtual_space_start = round_page(virtual_space_start);
+ virtual_space_end = trunc_page(virtual_space_end);
*startp = virtual_space_start;
*endp = virtual_space_end;
@@ -448,6 +237,8 @@ void vm_page_create(
vm_offset_t start,
vm_offset_t end)
{
+ printf ("XXX: vm_page_create stubbed out\n");
+ return;
vm_offset_t paddr;
vm_page_t m;
@@ -463,17 +254,11 @@ void vm_page_create(
}
}
-/*
- * vm_page_hash:
- *
- * Distributes the object/offset key pair among hash buckets.
- *
- * NOTE: To get a good hash function, the bucket count should
- * be a power of two.
- */
-#define vm_page_hash(object, offset) \
- (((unsigned int)(vm_offset_t)object + (unsigned int)atop(offset)) \
- & vm_page_hash_mask)
+static rdxtree_key_t
+offset_key(vm_offset_t offset)
+{
+ return (rdxtree_key_t) atop(offset);
+}
/*
* vm_page_insert: [ internal use only ]
@@ -489,8 +274,6 @@ void vm_page_insert(
vm_object_t object,
vm_offset_t offset)
{
- vm_page_bucket_t *bucket;
-
VM_PAGE_CHECK(mem);
if (mem->tabled)
@@ -504,20 +287,10 @@ void vm_page_insert(
mem->offset = offset;
/*
- * Insert it into the object_object/offset hash table
- */
-
- bucket = &vm_page_buckets[vm_page_hash(object, offset)];
- simple_lock(&bucket->lock);
- mem->next = bucket->pages;
- bucket->pages = mem;
- simple_unlock(&bucket->lock);
-
- /*
- * Now link into the object's list of backed pages.
+ * Insert it into the objects radix tree.
*/
- queue_enter(&object->memq, mem, vm_page_t, listq);
+ rdxtree_insert(&object->memt, offset_key(offset), mem);
mem->tabled = TRUE;
/*
@@ -561,7 +334,8 @@ void vm_page_replace(
vm_object_t object,
vm_offset_t offset)
{
- vm_page_bucket_t *bucket;
+ struct vm_page *old;
+ void **slot;
VM_PAGE_CHECK(mem);
@@ -576,54 +350,23 @@ void vm_page_replace(
mem->offset = offset;
/*
- * Insert it into the object_object/offset hash table,
- * replacing any page that might have been there.
+ * Insert it into the objects radix tree, replacing any
+ * page that might have been there.
*/
+ slot = rdxtree_lookup_slot(&object->memt, offset_key(offset));
+ old = rdxtree_replace_slot(slot, mem);
+ if (old != VM_PAGE_NULL) {
+ old->tabled = FALSE;
+ object->resident_page_count--;
- bucket = &vm_page_buckets[vm_page_hash(object, offset)];
- simple_lock(&bucket->lock);
- if (bucket->pages) {
- vm_page_t *mp = &bucket->pages;
- vm_page_t m = *mp;
- do {
- if (m->object == object && m->offset == offset) {
- /*
- * Remove page from bucket and from object,
- * and return it to the free list.
- */
- *mp = m->next;
- queue_remove(&object->memq, m, vm_page_t,
- listq);
- m->tabled = FALSE;
- object->resident_page_count--;
-
- if (object->can_persist
- && (object->ref_count == 0))
- vm_object_cached_pages_update(-1);
-
- /*
- * Return page to the free list.
- * Note the page is not tabled now, so this
- * won't self-deadlock on the bucket lock.
- */
-
- vm_page_free(m);
- break;
- }
- mp = &m->next;
- } while ((m = *mp) != 0);
- mem->next = bucket->pages;
- } else {
- mem->next = VM_PAGE_NULL;
- }
- bucket->pages = mem;
- simple_unlock(&bucket->lock);
+ if (object->can_persist
+ && (object->ref_count == 0))
+ vm_object_cached_pages_update(-1);
- /*
- * Now link into the object's list of backed pages.
- */
+ /* And free it. */
+ vm_page_free(old);
+ }
- queue_enter(&object->memq, mem, vm_page_t, listq);
mem->tabled = TRUE;
/*
@@ -650,38 +393,11 @@ void vm_page_replace(
void vm_page_remove(
vm_page_t mem)
{
- vm_page_bucket_t *bucket;
- vm_page_t this;
-
assert(mem->tabled);
VM_PAGE_CHECK(mem);
- /*
- * Remove from the object_object/offset hash table
- */
-
- bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
- simple_lock(&bucket->lock);
- if ((this = bucket->pages) == mem) {
- /* optimize for common case */
-
- bucket->pages = mem->next;
- } else {
- vm_page_t *prev;
-
- for (prev = &this->next;
- (this = *prev) != mem;
- prev = &this->next)
- continue;
- *prev = this->next;
- }
- simple_unlock(&bucket->lock);
-
- /*
- * Now remove from the object's list of backed pages.
- */
-
- queue_remove(&mem->object->memq, mem, vm_page_t, listq);
+ /* Remove from the objects radix tree. */
+ rdxtree_remove(&mem->object->memt, offset_key(mem->offset));
/*
* And show that the object has one fewer resident
@@ -709,23 +425,7 @@ vm_page_t vm_page_lookup(
vm_object_t object,
vm_offset_t offset)
{
- vm_page_t mem;
- vm_page_bucket_t *bucket;
-
- /*
- * Search the hash table for this object/offset pair
- */
-
- bucket = &vm_page_buckets[vm_page_hash(object, offset)];
-
- simple_lock(&bucket->lock);
- for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem->next) {
- VM_PAGE_CHECK(mem);
- if ((mem->object == object) && (mem->offset == offset))
- break;
- }
- simple_unlock(&bucket->lock);
- return mem;
+ return rdxtree_lookup(&object->memt, offset_key(offset));
}
/*
@@ -753,21 +453,6 @@ void vm_page_rename(
}
/*
- * vm_page_init:
- *
- * Initialize the fields in a new page.
- * This takes a structure with random values and initializes it
- * so that it can be given to vm_page_release or vm_page_insert.
- */
-void vm_page_init(
- vm_page_t mem,
- vm_offset_t phys_addr)
-{
- *mem = vm_page_template;
- mem->phys_addr = phys_addr;
-}
-
-/*
* vm_page_grab_fictitious:
*
* Remove a fictitious page from the free list.
@@ -783,10 +468,10 @@ vm_page_t vm_page_grab_fictitious(void)
if (m != VM_PAGE_NULL) {
vm_page_fictitious_count--;
vm_page_queue_fictitious = (vm_page_t) m->pageq.next;
- m->free = FALSE;
+ assert(m->fictitious);
+ assert(! m->tabled);
}
simple_unlock(&vm_page_queue_free_lock);
-
return m;
}
@@ -799,10 +484,9 @@ vm_page_t vm_page_grab_fictitious(void)
void vm_page_release_fictitious(
vm_page_t m)
{
+ assert(m->fictitious);
+ assert(! m->tabled);
simple_lock(&vm_page_queue_free_lock);
- if (m->free)
- panic("vm_page_release_fictitious");
- m->free = TRUE;
m->pageq.next = (queue_entry_t) vm_page_queue_fictitious;
vm_page_queue_fictitious = m;
vm_page_fictitious_count++;
@@ -841,22 +525,43 @@ void vm_page_more_fictitious(void)
*/
boolean_t vm_page_convert(
- vm_page_t m,
+ struct vm_page **mp,
boolean_t external)
{
- vm_page_t real_m;
+ struct vm_page *real_m, *fict_m, *old;
+ void **slot;
+
+ fict_m = *mp;
+
+ assert(fict_m->fictitious);
+ assert(fict_m->phys_addr == vm_page_fictitious_addr);
+ assert(! fict_m->active);
+ assert(! fict_m->inactive);
real_m = vm_page_grab(external);
if (real_m == VM_PAGE_NULL)
return FALSE;
- m->phys_addr = real_m->phys_addr;
- m->fictitious = FALSE;
+ memcpy(&real_m->vm_page_header,
+ &fict_m->vm_page_header,
+ sizeof *fict_m - VM_PAGE_HEADER_SIZE);
+
+ real_m->fictitious = FALSE;
+ fict_m->tabled = FALSE;
+
+ /* Fix radix tree entry. */
+ /* XXX is the object locked? */
+ slot = rdxtree_lookup_slot(&fict_m->object->memt,
+ offset_key(fict_m->offset));
+ old = rdxtree_replace_slot(slot, real_m);
+ assert(old == fict_m);
- real_m->phys_addr = vm_page_fictitious_addr;
- real_m->fictitious = TRUE;
+ assert(real_m->phys_addr != vm_page_fictitious_addr);
+ assert(fict_m->fictitious);
+ assert(fict_m->phys_addr == vm_page_fictitious_addr);
- vm_page_release_fictitious(real_m);
+ vm_page_release_fictitious(fict_m);
+ *mp = real_m;
return TRUE;
}
@@ -888,16 +593,15 @@ vm_page_t vm_page_grab(
return VM_PAGE_NULL;
}
- if (vm_page_queue_free == VM_PAGE_NULL)
- panic("vm_page_grab");
-
- if (--vm_page_free_count < vm_page_free_count_minimum)
- vm_page_free_count_minimum = vm_page_free_count;
if (external)
vm_page_external_count++;
- mem = vm_page_queue_free;
- vm_page_queue_free = (vm_page_t) mem->pageq.next;
- mem->free = FALSE;
+
+ mem = vm_page_alloc_p(0, VM_PAGE_SEL_DIRECTMAP, VM_PAGE_OBJECT);
+ if (! mem) {
+ simple_unlock(&vm_page_queue_free_lock);
+ return VM_PAGE_NULL;
+ }
+ vm_page_init_mach(mem);
mem->extcounted = mem->external = external;
simple_unlock(&vm_page_queue_free_lock);
@@ -930,237 +634,6 @@ vm_offset_t vm_page_grab_phys_addr(void)
}
/*
- * vm_page_grab_contiguous_pages:
- *
- * Take N pages off the free list, the pages should
- * cover a contiguous range of physical addresses.
- * [Used by device drivers to cope with DMA limitations]
- *
- * Returns the page descriptors in ascending order, or
- * Returns KERN_RESOURCE_SHORTAGE if it could not.
- */
-
-/* Biggest phys page number for the pages we handle in VM */
-
-vm_size_t vm_page_big_pagenum = 0; /* Set this before call! */
-
-kern_return_t
-vm_page_grab_contiguous_pages(
- int npages,
- vm_page_t pages[],
- natural_t *bits,
- boolean_t external)
-{
- int first_set;
- int size, alloc_size;
- kern_return_t ret;
- vm_page_t mem, *prevmemp;
-
-#ifndef NBBY
-#define NBBY 8 /* size in bits of sizeof()`s unity */
-#endif
-
-#define NBPEL (sizeof(natural_t)*NBBY)
-
- size = (vm_page_big_pagenum + NBPEL - 1)
- & ~(NBPEL - 1); /* in bits */
-
- size = size / NBBY; /* in bytes */
-
- /*
- * If we are called before the VM system is fully functional
- * the invoker must provide us with the work space. [one bit
- * per page starting at phys 0 and up to vm_page_big_pagenum]
- */
- if (bits == 0) {
- alloc_size = round_page(size);
- if (kmem_alloc_wired(kernel_map,
- (vm_offset_t *)&bits,
- alloc_size)
- != KERN_SUCCESS)
- return KERN_RESOURCE_SHORTAGE;
- } else
- alloc_size = 0;
-
- memset(bits, 0, size);
-
- /*
- * A very large granularity call, its rare so that is ok
- */
- simple_lock(&vm_page_queue_free_lock);
-
- /*
- * Do not dip into the reserved pool.
- */
-
- if ((vm_page_free_count < vm_page_free_reserved)
- || (vm_page_external_count >= vm_page_external_limit)) {
- printf_once("no more room for vm_page_grab_contiguous_pages");
- simple_unlock(&vm_page_queue_free_lock);
- return KERN_RESOURCE_SHORTAGE;
- }
-
- /*
- * First pass through, build a big bit-array of
- * the pages that are free. It is not going to
- * be too large anyways, in 4k we can fit info
- * for 32k pages.
- */
- mem = vm_page_queue_free;
- while (mem) {
- int word_index, bit_index;
-
- bit_index = (mem->phys_addr >> PAGE_SHIFT);
- word_index = bit_index / NBPEL;
- bit_index = bit_index - (word_index * NBPEL);
- bits[word_index] |= 1 << bit_index;
-
- mem = (vm_page_t) mem->pageq.next;
- }
-
- /*
- * Second loop. Scan the bit array for NPAGES
- * contiguous bits. That gives us, if any,
- * the range of pages we will be grabbing off
- * the free list.
- */
- {
- int bits_so_far = 0, i;
-
- first_set = 0;
-
- for (i = 0; i < size; i += sizeof(natural_t)) {
-
- natural_t v = bits[i / sizeof(natural_t)];
- int bitpos;
-
- /*
- * Bitscan this one word
- */
- if (v) {
- /*
- * keep counting them beans ?
- */
- bitpos = 0;
-
- if (bits_so_far) {
-count_ones:
- while (v & 1) {
- bitpos++;
- /*
- * got enough beans ?
- */
- if (++bits_so_far == npages)
- goto found_em;
- v >>= 1;
- }
- /* if we are being lucky, roll again */
- if (bitpos == NBPEL)
- continue;
- }
-
- /*
- * search for beans here
- */
- bits_so_far = 0;
- while ((bitpos < NBPEL) && ((v & 1) == 0)) {
- bitpos++;
- v >>= 1;
- }
- if (v & 1) {
- first_set = (i * NBBY) + bitpos;
- goto count_ones;
- }
- }
- /*
- * No luck
- */
- bits_so_far = 0;
- }
- }
-
- /*
- * We could not find enough contiguous pages.
- */
- simple_unlock(&vm_page_queue_free_lock);
-
- printf_once("no contiguous room for vm_page_grab_contiguous_pages");
- ret = KERN_RESOURCE_SHORTAGE;
- goto out;
-
- /*
- * Final pass. Now we know which pages we want.
- * Scan the list until we find them all, grab
- * pages as we go. FIRST_SET tells us where
- * in the bit-array our pages start.
- */
-found_em:
- vm_page_free_count -= npages;
- if (vm_page_free_count < vm_page_free_count_minimum)
- vm_page_free_count_minimum = vm_page_free_count;
- if (external)
- vm_page_external_count += npages;
- {
- vm_offset_t first_phys, last_phys;
-
- /* cache values for compare */
- first_phys = first_set << PAGE_SHIFT;
- last_phys = first_phys + (npages << PAGE_SHIFT);/* not included */
-
- /* running pointers */
- mem = vm_page_queue_free;
- prevmemp = &vm_page_queue_free;
-
- while (mem) {
-
- vm_offset_t addr;
-
- addr = mem->phys_addr;
-
- if ((addr >= first_phys) &&
- (addr < last_phys)) {
- *prevmemp = (vm_page_t) mem->pageq.next;
- pages[(addr - first_phys) >> PAGE_SHIFT] = mem;
- mem->free = FALSE;
- mem->extcounted = mem->external = external;
- /*
- * Got them all ?
- */
- if (--npages == 0) break;
- } else
- prevmemp = (vm_page_t *) &mem->pageq.next;
-
- mem = (vm_page_t) mem->pageq.next;
- }
- }
-
- simple_unlock(&vm_page_queue_free_lock);
-
- /*
- * Decide if we should poke the pageout daemon.
- * We do this if the free count is less than the low
- * water mark, or if the free count is less than the high
- * water mark (but above the low water mark) and the inactive
- * count is less than its target.
- *
- * We don't have the counts locked ... if they change a little,
- * it doesn't really matter.
- */
-
- if ((vm_page_free_count < vm_page_free_min) ||
- ((vm_page_free_count < vm_page_free_target) &&
- (vm_page_inactive_count < vm_page_inactive_target)))
- thread_wakeup(&vm_page_free_wanted);
-
- ret = KERN_SUCCESS;
-out:
- if (alloc_size)
- kmem_free(kernel_map, (vm_offset_t) bits, alloc_size);
-
- return ret;
-}
-
-/*
* vm_page_release:
*
* Return a page to the free list.
@@ -1171,12 +644,7 @@ void vm_page_release(
boolean_t external)
{
simple_lock(&vm_page_queue_free_lock);
- if (mem->free)
- panic("vm_page_release");
- mem->free = TRUE;
- mem->pageq.next = (queue_entry_t) vm_page_queue_free;
- vm_page_queue_free = mem;
- vm_page_free_count++;
+ vm_page_free_p(mem, 0);
if (external)
vm_page_external_count--;
@@ -1283,9 +751,6 @@ vm_page_t vm_page_alloc(
void vm_page_free(
vm_page_t mem)
{
- if (mem->free)
- panic("vm_page_free");
-
if (mem->tabled)
vm_page_remove(mem);
VM_PAGE_QUEUES_REMOVE(mem);
@@ -1459,47 +924,6 @@ void vm_page_copy(
pmap_copy_page(src_m->phys_addr, dest_m->phys_addr);
}
-#if MACH_VM_DEBUG
-/*
- * Routine: vm_page_info
- * Purpose:
- * Return information about the global VP table.
- * Fills the buffer with as much information as possible
- * and returns the desired size of the buffer.
- * Conditions:
- * Nothing locked. The caller should provide
- * possibly-pageable memory.
- */
-
-unsigned int
-vm_page_info(
- hash_info_bucket_t *info,
- unsigned int count)
-{
- int i;
-
- if (vm_page_bucket_count < count)
- count = vm_page_bucket_count;
-
- for (i = 0; i < count; i++) {
- vm_page_bucket_t *bucket = &vm_page_buckets[i];
- unsigned int bucket_count = 0;
- vm_page_t m;
-
- simple_lock(&bucket->lock);
- for (m = bucket->pages; m != VM_PAGE_NULL; m = m->next)
- bucket_count++;
- simple_unlock(&bucket->lock);
-
- /* don't touch pageable memory while holding locks */
- info[i].hib_count = bucket_count;
- }
-
- return vm_page_bucket_count;
-}
-#endif /* MACH_VM_DEBUG */
-
-
#if MACH_KDB
#define printf kdbprintf
@@ -1514,8 +938,6 @@ void vm_page_print(p)
printf("wire_count %d,", p->wire_count);
printf(" %s",
(p->active ? "active" : (p->inactive ? "inactive" : "loose")));
- printf("%s",
- (p->free ? " free" : ""));
printf("%s ",
(p->laundry ? " laundry" : ""));
printf("%s",