summaryrefslogtreecommitdiff
path: root/vm
diff options
context:
space:
mode:
Diffstat (limited to 'vm')
-rw-r--r--vm/vm_fault.c8
-rw-r--r--vm/vm_map.c2
-rw-r--r--vm/vm_page.h17
-rw-r--r--vm/vm_pageout.c83
-rw-r--r--vm/vm_resident.c47
5 files changed, 123 insertions, 34 deletions
diff --git a/vm/vm_fault.c b/vm/vm_fault.c
index e45687c..a74d41b 100644
--- a/vm/vm_fault.c
+++ b/vm/vm_fault.c
@@ -443,7 +443,7 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
* need to allocate a real page.
*/
- real_m = vm_page_grab();
+ real_m = vm_page_grab(!object->internal);
if (real_m == VM_PAGE_NULL) {
vm_fault_cleanup(object, first_m);
return(VM_FAULT_MEMORY_SHORTAGE);
@@ -625,7 +625,7 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
* won't block for pages.
*/
- if (m->fictitious && !vm_page_convert(m)) {
+ if (m->fictitious && !vm_page_convert(m, FALSE)) {
VM_PAGE_FREE(m);
vm_fault_cleanup(object, first_m);
return(VM_FAULT_MEMORY_SHORTAGE);
@@ -742,7 +742,7 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
assert(m->object == object);
first_m = VM_PAGE_NULL;
- if (m->fictitious && !vm_page_convert(m)) {
+ if (m->fictitious && !vm_page_convert(m, !object->internal)) {
VM_PAGE_FREE(m);
vm_fault_cleanup(object, VM_PAGE_NULL);
return(VM_FAULT_MEMORY_SHORTAGE);
@@ -828,7 +828,7 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
/*
* Allocate a page for the copy
*/
- copy_m = vm_page_grab();
+ copy_m = vm_page_grab(!first_object->internal);
if (copy_m == VM_PAGE_NULL) {
RELEASE_PAGE(m);
vm_fault_cleanup(object, first_m);
diff --git a/vm/vm_map.c b/vm/vm_map.c
index c71b858..8d17a49 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -1828,7 +1828,7 @@ vm_map_copy_t copy;
* Page was not stolen, get a new
* one and do the copy now.
*/
- while ((new_m = vm_page_grab()) == VM_PAGE_NULL) {
+ while ((new_m = vm_page_grab(FALSE)) == VM_PAGE_NULL) {
VM_PAGE_WAIT((void(*)()) 0);
}
diff --git a/vm/vm_page.h b/vm/vm_page.h
index f7fa80a..6ca1c0c 100644
--- a/vm/vm_page.h
+++ b/vm/vm_page.h
@@ -93,6 +93,8 @@ struct vm_page {
laundry:1, /* page is being cleaned now (P)*/
free:1, /* page is on free list (P) */
reference:1, /* page has been used (P) */
+ external:1, /* page considered external (P) */
+ extcounted:1, /* page counted in ext counts (P) */
:0; /* (force to 'long' boundary) */
#ifdef ns32000
int pad; /* extra space for ns32000 bit ops */
@@ -184,6 +186,17 @@ extern
int vm_page_free_reserved; /* How many pages reserved to do pageout */
extern
int vm_page_laundry_count; /* How many pages being laundered? */
+extern
+int vm_page_external_limit; /* Max number of pages for external objects */
+
+/* Only objects marked with the extcounted bit are included in this total.
+ Pages which we scan for possible pageout, but which are not actually
+ dirty, don't get considered against the external page limits any more
+ in this way. */
+extern
+int vm_page_external_count; /* How many pages for external objects? */
+
+
decl_simple_lock_data(extern,vm_page_queue_lock)/* lock on active and inactive
page queues */
@@ -209,9 +222,9 @@ extern vm_page_t vm_page_lookup(
vm_offset_t offset);
extern vm_page_t vm_page_grab_fictitious(void);
extern void vm_page_release_fictitious(vm_page_t);
-extern boolean_t vm_page_convert(vm_page_t);
+extern boolean_t vm_page_convert(vm_page_t, boolean_t);
extern void vm_page_more_fictitious(void);
-extern vm_page_t vm_page_grab(void);
+extern vm_page_t vm_page_grab(boolean_t);
extern void vm_page_release(vm_page_t);
extern void vm_page_wait(void (*)(void));
extern vm_page_t vm_page_alloc(
diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c
index 411531b..c36e2b3 100644
--- a/vm/vm_pageout.c
+++ b/vm/vm_pageout.c
@@ -63,11 +63,11 @@
#endif VM_PAGEOUT_BURST_MIN
#ifndef VM_PAGEOUT_BURST_WAIT
-#define VM_PAGEOUT_BURST_WAIT 30 /* milliseconds per page */
+#define VM_PAGEOUT_BURST_WAIT 10 /* milliseconds per page */
#endif VM_PAGEOUT_BURST_WAIT
#ifndef VM_PAGEOUT_EMPTY_WAIT
-#define VM_PAGEOUT_EMPTY_WAIT 200 /* milliseconds */
+#define VM_PAGEOUT_EMPTY_WAIT 75 /* milliseconds */
#endif VM_PAGEOUT_EMPTY_WAIT
#ifndef VM_PAGEOUT_PAUSE_MAX
@@ -108,16 +108,30 @@
#define VM_PAGE_FREE_MIN(free) (10 + (free) / 100)
#endif VM_PAGE_FREE_MIN
+/* When vm_page_external_count exceeds vm_page_external_limit,
+ * allocations of externally paged pages stops.
+ */
+
+#ifndef VM_PAGE_EXTERNAL_LIMIT
+#define VM_PAGE_EXTERNAL_LIMIT(free) ((free) / 2)
+#endif VM_PAGE_EXTERNAL_LIMIT
+
+/* Attempt to keep the number of externally paged pages less
+ * than vm_pages_external_target.
+ */
+#ifndef VM_PAGE_EXTERNAL_TARGET
+#define VM_PAGE_EXTERNAL_TARGET(free) ((free) / 4)
+#endif VM_PAGE_EXTERNAL_TARGET
+
/*
* When vm_page_free_count falls below vm_page_free_reserved,
* only vm-privileged threads can allocate pages. vm-privilege
* allows the pageout daemon and default pager (and any other
* associated threads needed for default pageout) to continue
- * operation by dipping into the reserved pool of pages.
- */
+ * operation by dipping into the reserved pool of pages. */
#ifndef VM_PAGE_FREE_RESERVED
-#define VM_PAGE_FREE_RESERVED 15
+#define VM_PAGE_FREE_RESERVED 50
#endif VM_PAGE_FREE_RESERVED
/*
@@ -129,7 +143,7 @@
*/
#ifndef VM_PAGEOUT_RESERVED_INTERNAL
-#define VM_PAGEOUT_RESERVED_INTERNAL(reserve) ((reserve) - 5)
+#define VM_PAGEOUT_RESERVED_INTERNAL(reserve) ((reserve) - 25)
#endif VM_PAGEOUT_RESERVED_INTERNAL
/*
@@ -141,7 +155,7 @@
*/
#ifndef VM_PAGEOUT_RESERVED_REALLY
-#define VM_PAGEOUT_RESERVED_REALLY(reserve) ((reserve) - 10)
+#define VM_PAGEOUT_RESERVED_REALLY(reserve) ((reserve) - 40)
#endif VM_PAGEOUT_RESERVED_REALLY
extern void vm_pageout_continue();
@@ -150,6 +164,8 @@ extern void vm_pageout_scan_continue();
unsigned int vm_pageout_reserved_internal = 0;
unsigned int vm_pageout_reserved_really = 0;
+unsigned int vm_pageout_external_target = 0;
+
unsigned int vm_pageout_burst_max = 0;
unsigned int vm_pageout_burst_min = 0;
unsigned int vm_pageout_burst_wait = 0; /* milliseconds per page */
@@ -172,6 +188,7 @@ unsigned int vm_pageout_inactive_used = 0; /* debugging */
unsigned int vm_pageout_inactive_clean = 0; /* debugging */
unsigned int vm_pageout_inactive_dirty = 0; /* debugging */
unsigned int vm_pageout_inactive_double = 0; /* debugging */
+unsigned int vm_pageout_inactive_cleaned_external = 0;
#if NORMA_VM
/*
@@ -503,6 +520,7 @@ vm_pageout_page(m, initial, flush)
void vm_pageout_scan()
{
unsigned int burst_count;
+ unsigned int want_pages;
/*
* We want to gradually dribble pages from the active queue
@@ -616,17 +634,20 @@ void vm_pageout_scan()
}
/*
- * We are done if we have met our target *and*
+ * We are done if we have met our targets *and*
* nobody is still waiting for a page.
*/
simple_lock(&vm_page_queue_free_lock);
free_count = vm_page_free_count;
- if ((free_count >= vm_page_free_target) &
+ if ((free_count >= vm_page_free_target) &&
+ (vm_page_external_count <= vm_page_external_target) &&
(vm_page_free_wanted == 0)) {
vm_page_unlock_queues();
break;
}
+ want_pages = ((free_count < vm_page_free_target) ||
+ vm_page_free_wanted);
simple_unlock(&vm_page_queue_free_lock);
/*
@@ -680,8 +701,10 @@ void vm_pageout_scan()
}
vm_pageout_inactive++;
- m = (vm_page_t) queue_first(&vm_page_queue_inactive);
- assert(!m->active && m->inactive);
+ for (m = (vm_page_t) queue_first(&vm_page_queue_inactive);
+ want_pages || m->external;
+ m = m->queue_next(m))
+ assert(!m->active && m->inactive);
object = m->object;
/*
@@ -727,7 +750,7 @@ void vm_pageout_scan()
* If it's absent, we can reclaim the page.
*/
- if (m->absent) {
+ if (want_pages && m->absent) {
vm_pageout_inactive_absent++;
reclaim_page:
vm_page_free(m);
@@ -760,6 +783,34 @@ void vm_pageout_scan()
if (!m->dirty)
m->dirty = pmap_is_modified(m->phys_addr);
+ if (m->external) {
+ /* Figure out if we still care about this
+ page in the limit of externally managed pages.
+ Clean pages don't actually cause system hosage,
+ so it's ok to stop considering them as
+ "consumers" of memory. */
+ if (m->dirty && !m->extcounted) {
+ m->extcounted = TRUE;
+ vm_page_external_count++;
+ else if (!m->dirty && m->extcounted) {
+ m->extcounted = FALSE;
+ vm_page_external_count--;
+ }
+ }
+
+ /* If we don't actually need more memory, and the page
+ is not dirty, put it on the tail of the inactive queue
+ and move on to the next page. */
+ if (!want_pages && !m->dirty) {
+ queue_remove (&vm_page_queue_inactive, m,
+ vm_page_t, pageq);
+ queue_enter (&vm_page_queue_inactive, m,
+ vm_page_t, pageq);
+ vm_page_unlock_queues();
+ vm_pageout_inactive_cleaned_external++;
+ continue;
+ }
+
/*
* If it's clean and not precious, we can free the page.
*/
@@ -904,6 +955,14 @@ void vm_pageout()
free_after_reserve = vm_page_free_count - vm_page_free_reserved;
+ if (vm_page_external_limit == 0)
+ vm_page_external_limit =
+ VM_PAGE_EXTERNAL_LIMIT (free_after_reserve);
+
+ if (vm_page_external_target == 0)
+ vm_page_external_target =
+ VM_PAGE_EXTERNAL_TARGET (free_after_reserve);
+
if (vm_page_free_min == 0)
vm_page_free_min = vm_page_free_reserved +
VM_PAGE_FREE_MIN(free_after_reserve);
diff --git a/vm/vm_resident.c b/vm/vm_resident.c
index eba0157..5014da4 100644
--- a/vm/vm_resident.c
+++ b/vm/vm_resident.c
@@ -111,6 +111,7 @@ decl_simple_lock_data(,vm_page_queue_free_lock)
unsigned int vm_page_free_wanted;
int vm_page_free_count;
int vm_page_fictitious_count;
+int vm_page_external_count;
unsigned int vm_page_free_count_minimum; /* debugging */
@@ -201,6 +202,7 @@ void vm_page_bootstrap(
m->active = FALSE;
m->laundry = FALSE;
m->free = FALSE;
+ m->external = FALSE;
m->busy = TRUE;
m->wanted = FALSE;
@@ -396,7 +398,7 @@ void pmap_startup(
*/
for (i = pages_initialized; i > 0; i--) {
- vm_page_release(&pages[i - 1]);
+ vm_page_release(&pages[i - 1], FALSE);
}
/*
@@ -449,7 +451,7 @@ void vm_page_create(
panic("vm_page_create");
vm_page_init(m, paddr);
- vm_page_release(m);
+ vm_page_release(m, FALSE);
}
}
@@ -816,11 +818,12 @@ void vm_page_more_fictitious(void)
*/
boolean_t vm_page_convert(
- register vm_page_t m)
+ register vm_page_t m,
+ boolean_t external)
{
register vm_page_t real_m;
- real_m = vm_page_grab();
+ real_m = vm_page_grab(external);
if (real_m == VM_PAGE_NULL)
return FALSE;
@@ -841,7 +844,8 @@ boolean_t vm_page_convert(
* Returns VM_PAGE_NULL if the free list is too small.
*/
-vm_page_t vm_page_grab(void)
+vm_page_t vm_page_grab(
+ boolean_t external)
{
register vm_page_t mem;
@@ -849,10 +853,12 @@ vm_page_t vm_page_grab(void)
/*
* Only let privileged threads (involved in pageout)
- * dip into the reserved pool.
+ * dip into the reserved pool or exceed the limit
+ * for externally-managed pages.
*/
- if ((vm_page_free_count < vm_page_free_reserved) &&
+ if (((vm_page_free_count < vm_page_free_reserved) ||
+ (vm_page_external_count >= vm_page_external_limit)) &&
!current_thread()->vm_privilege) {
simple_unlock(&vm_page_queue_free_lock);
return VM_PAGE_NULL;
@@ -863,9 +869,12 @@ vm_page_t vm_page_grab(void)
if (--vm_page_free_count < vm_page_free_count_minimum)
vm_page_free_count_minimum = vm_page_free_count;
+ if (external)
+ vm_page_external_count++;
mem = vm_page_queue_free;
vm_page_queue_free = (vm_page_t) mem->pageq.next;
mem->free = FALSE;
+ mem->extcounted = mem->external = external;
simple_unlock(&vm_page_queue_free_lock);
/*
@@ -887,9 +896,9 @@ vm_page_t vm_page_grab(void)
return mem;
}
-vm_offset_t vm_page_grab_phys_addr(void)
+vm_offset_t vm_page_grab_phys_addr()
{
- vm_page_t p = vm_page_grab();
+ vm_page_t p = vm_page_grab(FALSE);
if (p == VM_PAGE_NULL)
return -1;
else
@@ -915,7 +924,8 @@ kern_return_t
vm_page_grab_contiguous_pages(
int npages,
vm_page_t pages[],
- natural_t *bits)
+ natural_t *bits,
+ boolean_t external)
{
register int first_set;
int size, alloc_size;
@@ -959,7 +969,8 @@ vm_page_grab_contiguous_pages(
* Do not dip into the reserved pool.
*/
- if (vm_page_free_count < vm_page_free_reserved) {
+ if ((vm_page_free_count < vm_page_free_reserved)
+ || (vm_page_external_count >= vm_page_external_limit)) {
simple_unlock(&vm_page_queue_free_lock);
return KERN_RESOURCE_SHORTAGE;
}
@@ -1063,7 +1074,8 @@ found_em:
vm_page_free_count -= npages;
if (vm_page_free_count < vm_page_free_count_minimum)
vm_page_free_count_minimum = vm_page_free_count;
-
+ if (external)
+ vm_page_external_count += npages;
{
register vm_offset_t first_phys, last_phys;
@@ -1087,6 +1099,7 @@ found_em:
prevmem->pageq.next = mem->pageq.next;
pages[(addr - first_phys) >> PAGE_SHIFT] = mem;
mem->free = FALSE;
+ mem->extcounted = mem->external = external;
/*
* Got them all ?
*/
@@ -1131,7 +1144,8 @@ out:
*/
void vm_page_release(
- register vm_page_t mem)
+ register vm_page_t mem,
+ boolean_t external)
{
simple_lock(&vm_page_queue_free_lock);
if (mem->free)
@@ -1140,6 +1154,8 @@ void vm_page_release(
mem->pageq.next = (queue_entry_t) vm_page_queue_free;
vm_page_queue_free = mem;
vm_page_free_count++;
+ if (external)
+ vm_page_external_count--;
/*
* Check if we should wake up someone waiting for page.
@@ -1225,7 +1241,7 @@ vm_page_t vm_page_alloc(
{
register vm_page_t mem;
- mem = vm_page_grab();
+ mem = vm_page_grab(!object->internal);
if (mem == VM_PAGE_NULL)
return VM_PAGE_NULL;
@@ -1280,8 +1296,9 @@ void vm_page_free(
mem->fictitious = TRUE;
vm_page_release_fictitious(mem);
} else {
+ int external = mem->external && mem->extcounted;
vm_page_init(mem, mem->phys_addr);
- vm_page_release(mem);
+ vm_page_release(mem, external);
}
}