From caef262a89136bdbef9dc750e46912a98086c557 Mon Sep 17 00:00:00 2001 From: Justus Winter <4winter@informatik.uni-hamburg.de> Date: Fri, 22 May 2015 15:40:38 +0200 Subject: add patch series --- debian/patches/series | 3 + ...m-cache-policy0001-VM-cache-policy-change.patch | 346 +++++++++++++++++++++ ...e-policy0002-vm-keep-track-of-clean-pages.patch | 329 ++++++++++++++++++++ ...che-policy0003-vm-evict-clean-pages-first.patch | 57 ++++ debian/patches/vm_cache_policy.patch | 346 --------------------- debian/patches/vm_page_cleanq.patch | 336 -------------------- 6 files changed, 735 insertions(+), 682 deletions(-) create mode 100644 debian/patches/vm-cache-policy0001-VM-cache-policy-change.patch create mode 100644 debian/patches/vm-cache-policy0002-vm-keep-track-of-clean-pages.patch create mode 100644 debian/patches/vm-cache-policy0003-vm-evict-clean-pages-first.patch delete mode 100644 debian/patches/vm_cache_policy.patch delete mode 100644 debian/patches/vm_page_cleanq.patch diff --git a/debian/patches/series b/debian/patches/series index 3393753..fa6ae31 100644 --- a/debian/patches/series +++ b/debian/patches/series @@ -9,3 +9,6 @@ task-load.patch reorder-ipc_port.patch sysenter0001-yyy-sysenter-prototype.patch +vm-cache-policy0001-VM-cache-policy-change.patch +vm-cache-policy0002-vm-keep-track-of-clean-pages.patch +vm-cache-policy0003-vm-evict-clean-pages-first.patch diff --git a/debian/patches/vm-cache-policy0001-VM-cache-policy-change.patch b/debian/patches/vm-cache-policy0001-VM-cache-policy-change.patch new file mode 100644 index 0000000..dc6e867 --- /dev/null +++ b/debian/patches/vm-cache-policy0001-VM-cache-policy-change.patch @@ -0,0 +1,346 @@ +From 7b75bc7ec43a5897629da30fca43b9060c7b436a Mon Sep 17 00:00:00 2001 +From: Richard Braun +Date: Wed, 9 Oct 2013 11:51:54 +0200 +Subject: [PATCH gnumach 1/3] VM cache policy change + +This patch lets the kernel unconditionnally cache non empty unreferenced +objects instead of using a fixed arbitrary limit. As the pageout daemon +evicts pages, it collects cached objects that have become empty. The +effective result is a graceful adjustment of the number of objects +related to memory management (virtual memory objects, their associated +ports, and potentially objects maintained in the external memory +managers). Physical memory can now be almost entirely filled up with +cached pages. In addition, these cached pages are not automatically +deactivated as objects can quickly be referenced again. + +There are problems with this patch however. The first is that, on +machines with a large amount of physical memory (above 1 GiB but it also +depends on usage patterns), scalability issues are exposed. For example, +file systems which don't throttle their writeback requests can create +thread storms, strongly reducing system responsiveness. Other issues +such as linear scans of memory objects also add visible CPU overhead. + +The second is that, as most memory is used, it increases the chances of +swapping deadlocks. Applications that map large objects and quickly +cause lots of page faults can still easily bring the system to its +knees. +--- + vm/vm_object.c | 166 ++++++++++++++++++------------------------------------- + vm/vm_object.h | 7 ++- + vm/vm_pageout.c | 7 ++- + vm/vm_resident.c | 4 +- + 4 files changed, 68 insertions(+), 116 deletions(-) + +diff --git a/vm/vm_object.c b/vm/vm_object.c +index 8c6bbab..2384b08 100644 +--- a/vm/vm_object.c ++++ b/vm/vm_object.c +@@ -59,6 +59,11 @@ + #include + #endif /* MACH_KDB */ + ++void memory_object_release( ++ ipc_port_t pager, ++ pager_request_t pager_request, ++ ipc_port_t pager_name); /* forward */ ++ + /* + * Virtual memory objects maintain the actual data + * associated with allocated virtual memory. A given +@@ -159,8 +164,9 @@ vm_object_t kernel_object = &kernel_object_store; + * + * The kernel may choose to terminate objects from this + * queue in order to reclaim storage. The current policy +- * is to permit a fixed maximum number of unreferenced +- * objects (vm_object_cached_max). ++ * is to let memory pressure dynamically adjust the number ++ * of unreferenced objects. The pageout daemon attempts to ++ * collect objects after removing pages from them. + * + * A simple lock (accessed by routines + * vm_object_cache_{lock,lock_try,unlock}) governs the +@@ -176,7 +182,6 @@ vm_object_t kernel_object = &kernel_object_store; + */ + queue_head_t vm_object_cached_list; + int vm_object_cached_count; +-int vm_object_cached_max = 4000; /* may be patched*/ + + decl_simple_lock_data(,vm_object_cached_lock_data) + +@@ -339,6 +344,33 @@ void vm_object_init(void) + IKOT_PAGING_NAME); + } + ++void vm_object_collect( ++ register vm_object_t object) ++{ ++ vm_object_unlock(object); ++ ++ /* ++ * The cache lock must be acquired in the proper order. ++ */ ++ ++ vm_object_cache_lock(); ++ vm_object_lock(object); ++ ++ /* ++ * If the object was referenced while the lock was ++ * dropped, cancel the termination. ++ */ ++ ++ if (!vm_object_collectable(object)) { ++ vm_object_unlock(object); ++ vm_object_cache_unlock(); ++ return; ++ } ++ ++ queue_remove(&vm_object_cached_list, object, vm_object_t, cached_list); ++ vm_object_terminate(object); ++} ++ + /* + * vm_object_reference: + * +@@ -399,103 +431,35 @@ void vm_object_deallocate( + + /* + * See whether this object can persist. If so, enter +- * it in the cache, then deactivate all of its +- * pages. ++ * it in the cache. + */ +- if (object->can_persist) { +- boolean_t overflow; +- +- /* +- * Enter the object onto the queue +- * of "cached" objects. Remember whether +- * we've caused the queue to overflow, +- * as a hint. +- */ +- ++ if (object->can_persist && (object->resident_page_count > 0)) { + queue_enter(&vm_object_cached_list, object, + vm_object_t, cached_list); +- overflow = (++vm_object_cached_count > vm_object_cached_max); ++ vm_object_cached_count++; + vm_object_cached_pages_update(object->resident_page_count); + vm_object_cache_unlock(); + +- vm_object_deactivate_pages(object); + vm_object_unlock(object); ++ return; ++ } + +- /* +- * If we didn't overflow, or if the queue has +- * been reduced back to below the specified +- * minimum, then quit. +- */ +- if (!overflow) +- return; +- +- while (TRUE) { +- vm_object_cache_lock(); +- if (vm_object_cached_count <= +- vm_object_cached_max) { +- vm_object_cache_unlock(); +- return; +- } +- +- /* +- * If we must trim down the queue, take +- * the first object, and proceed to +- * terminate it instead of the original +- * object. Have to wait for pager init. +- * if it's in progress. +- */ +- object= (vm_object_t) +- queue_first(&vm_object_cached_list); +- vm_object_lock(object); +- +- if (!(object->pager_created && +- !object->pager_initialized)) { +- +- /* +- * Ok to terminate, hang on to lock. +- */ +- break; +- } +- +- vm_object_assert_wait(object, +- VM_OBJECT_EVENT_INITIALIZED, FALSE); +- vm_object_unlock(object); +- vm_object_cache_unlock(); +- thread_block((void (*)()) 0); +- +- /* +- * Continue loop to check if cache still +- * needs to be trimmed. +- */ +- } ++ if (object->pager_created && ++ !object->pager_initialized) { + + /* +- * Actually remove object from cache. ++ * Have to wait for initialization. ++ * Put reference back and retry ++ * when it's initialized. + */ + +- queue_remove(&vm_object_cached_list, object, +- vm_object_t, cached_list); +- vm_object_cached_count--; +- +- assert(object->ref_count == 0); +- } +- else { +- if (object->pager_created && +- !object->pager_initialized) { +- +- /* +- * Have to wait for initialization. +- * Put reference back and retry +- * when it's initialized. +- */ +- object->ref_count++; +- vm_object_assert_wait(object, +- VM_OBJECT_EVENT_INITIALIZED, FALSE); +- vm_object_unlock(object); +- vm_object_cache_unlock(); +- thread_block((void (*)()) 0); +- continue; +- } ++ object->ref_count++; ++ vm_object_assert_wait(object, ++ VM_OBJECT_EVENT_INITIALIZED, FALSE); ++ vm_object_unlock(object); ++ vm_object_cache_unlock(); ++ thread_block((void (*)()) 0); ++ continue; + } + + /* +@@ -868,28 +832,6 @@ kern_return_t memory_object_destroy( + } + + /* +- * vm_object_deactivate_pages +- * +- * Deactivate all pages in the specified object. (Keep its pages +- * in memory even though it is no longer referenced.) +- * +- * The object must be locked. +- */ +-void vm_object_deactivate_pages( +- vm_object_t object) +-{ +- vm_page_t p; +- +- queue_iterate(&object->memq, p, vm_page_t, listq) { +- vm_page_lock_queues(); +- if (!p->busy) +- vm_page_deactivate(p); +- vm_page_unlock_queues(); +- } +-} +- +- +-/* + * Routine: vm_object_pmap_protect + * + * Purpose: +@@ -2732,7 +2674,7 @@ void vm_object_page_remove( + * It balances vm_object_lookup vs iteration. + */ + +- if (atop(end - start) < (unsigned)object->resident_page_count/16) { ++ if (atop(end - start) < object->resident_page_count/16) { + vm_object_page_remove_lookup++; + + for (; start < end; start += PAGE_SIZE) { +@@ -2956,7 +2898,7 @@ void vm_object_print( + + iprintf("Object 0x%X: size=0x%X", + (vm_offset_t) object, (vm_offset_t) object->size); +- printf(", %d references, %d resident pages,", object->ref_count, ++ printf(", %d references, %lu resident pages,", object->ref_count, + object->resident_page_count); + printf(" %d absent pages,", object->absent_count); + printf(" %d paging ops\n", object->paging_in_progress); +diff --git a/vm/vm_object.h b/vm/vm_object.h +index 3bfc67a..fa208aa 100644 +--- a/vm/vm_object.h ++++ b/vm/vm_object.h +@@ -72,7 +72,7 @@ struct vm_object { + */ + + int ref_count; /* Number of references */ +- int resident_page_count; ++ unsigned long resident_page_count; + /* number of resident pages */ + + struct vm_object *copy; /* Object that should receive +@@ -169,6 +169,7 @@ vm_object_t kernel_object; /* the single kernel object */ + + extern void vm_object_bootstrap(void); + extern void vm_object_init(void); ++extern void vm_object_collect(vm_object_t); + extern void vm_object_terminate(vm_object_t); + extern vm_object_t vm_object_allocate(vm_size_t); + extern void vm_object_reference(vm_object_t); +@@ -290,6 +291,10 @@ vm_object_t vm_object_copy_delayed( + * Routines implemented as macros + */ + ++#define vm_object_collectable(object) \ ++ (((object)->ref_count == 0) \ ++ && ((object)->resident_page_count == 0)) ++ + #define vm_object_paging_begin(object) \ + ((object)->paging_in_progress++) + +diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c +index aff823a..c4aba96 100644 +--- a/vm/vm_pageout.c ++++ b/vm/vm_pageout.c +@@ -748,7 +748,12 @@ void vm_pageout_scan(void) + reclaim_page: + vm_page_free(m); + vm_page_unlock_queues(); +- vm_object_unlock(object); ++ ++ if (vm_object_collectable(object)) ++ vm_object_collect(object); ++ else ++ vm_object_unlock(object); ++ + continue; + } + +diff --git a/vm/vm_resident.c b/vm/vm_resident.c +index c70fa73..b65b756 100644 +--- a/vm/vm_resident.c ++++ b/vm/vm_resident.c +@@ -523,7 +523,7 @@ void vm_page_insert( + */ + + object->resident_page_count++; +- assert(object->resident_page_count >= 0); ++ assert(object->resident_page_count != 0); + + if (object->can_persist && (object->ref_count == 0)) + vm_object_cached_pages_update(1); +@@ -630,7 +630,7 @@ void vm_page_replace( + */ + + object->resident_page_count++; +- assert(object->resident_page_count >= 0); ++ assert(object->resident_page_count != 0); + + if (object->can_persist && (object->ref_count == 0)) + vm_object_cached_pages_update(1); +-- +2.1.4 + diff --git a/debian/patches/vm-cache-policy0002-vm-keep-track-of-clean-pages.patch b/debian/patches/vm-cache-policy0002-vm-keep-track-of-clean-pages.patch new file mode 100644 index 0000000..b875aff --- /dev/null +++ b/debian/patches/vm-cache-policy0002-vm-keep-track-of-clean-pages.patch @@ -0,0 +1,329 @@ +From 0f953bd092eb28c63000cca51bbb0720141d99f9 Mon Sep 17 00:00:00 2001 +From: Justus Winter <4winter@informatik.uni-hamburg.de> +Date: Sat, 21 Feb 2015 00:05:31 +0100 +Subject: [PATCH gnumach 2/3] vm: keep track of clean pages + +* vm/vm_page.h (struct vm_page): New field `cleanq'. +(vm_page_queue_clean): New declaration. +(vm_page_clean_count): Likewise. +(vm_page_queue_clean_lock): Likewise. +(vm_page_mark_dirty): New function to set and clear the dirty flag. +* vm/vm_resident.c (vm_page_queue_clean): New variable. +(vm_page_queue_clean_lock): Likewise. +(vm_page_clean_count): Likewise. +(vm_page_bootstrap): Initialize field `cleanq', the queue and the lock. +(vm_page_free): Get freed pages off the clean queue. +* linux/dev/glue/block.c: Use `vm_page_mark_dirty'. +* vm/memory_object.c: Likewise. +* vm/vm_debug.c: Likewise. +* vm/vm_fault.c: Likewise. +* vm/vm_map.c: Likewise. +* vm/vm_object.c: Likewise. +* vm/vm_pageout.c: Likewise. +* xen/block.c: Likewise. +--- + linux/dev/glue/block.c | 2 +- + vm/memory_object.c | 4 ++-- + vm/vm_debug.c | 2 +- + vm/vm_fault.c | 6 +++--- + vm/vm_map.c | 2 +- + vm/vm_object.c | 4 ++-- + vm/vm_page.h | 33 +++++++++++++++++++++++++++++++-- + vm/vm_pageout.c | 8 ++++---- + vm/vm_resident.c | 10 ++++++++++ + xen/block.c | 2 +- + 10 files changed, 56 insertions(+), 17 deletions(-) + +diff --git a/linux/dev/glue/block.c b/linux/dev/glue/block.c +index da4ef38..3bd2c5b 100644 +--- a/linux/dev/glue/block.c ++++ b/linux/dev/glue/block.c +@@ -1537,7 +1537,7 @@ device_read (void *d, ipc_port_t reply_port, + if (dirty) + { + PAGE_WAKEUP_DONE (m); +- m->dirty = TRUE; ++ vm_page_mark_dirty (m, TRUE); + vm_page_insert (m, object, o); + } + else +diff --git a/vm/memory_object.c b/vm/memory_object.c +index 097ed23..7a0dbb8 100644 +--- a/vm/memory_object.c ++++ b/vm/memory_object.c +@@ -209,7 +209,7 @@ retry_lookup: + */ + + data_m->busy = FALSE; +- data_m->dirty = FALSE; ++ vm_page_mark_dirty (data_m, FALSE); + pmap_clear_modify(data_m->phys_addr); + + data_m->page_lock = lock_value; +@@ -555,7 +555,7 @@ memory_object_lock_result_t memory_object_lock_page( + */ + + if (!m->dirty) +- m->dirty = pmap_is_modified(m->phys_addr); ++ vm_page_mark_dirty (m, pmap_is_modified(m->phys_addr)); + + if (m->dirty || (m->precious && + should_return == MEMORY_OBJECT_RETURN_ALL)) { +diff --git a/vm/vm_debug.c b/vm/vm_debug.c +index 227090e..822ca86 100644 +--- a/vm/vm_debug.c ++++ b/vm/vm_debug.c +@@ -352,7 +352,7 @@ mach_vm_object_pages( + if (((state & (VPI_STATE_NODATA|VPI_STATE_DIRTY)) == 0) && + pmap_is_modified(p->phys_addr)) { + state |= VPI_STATE_DIRTY; +- p->dirty = TRUE; ++ vm_page_mark_dirty (p, TRUE); + } + + vm_page_lock_queues(); +diff --git a/vm/vm_fault.c b/vm/vm_fault.c +index 0fa4d6a..4bb6385 100644 +--- a/vm/vm_fault.c ++++ b/vm/vm_fault.c +@@ -978,7 +978,7 @@ vm_fault_return_t vm_fault_page( + + vm_page_lock_queues(); + pmap_page_protect(m->phys_addr, VM_PROT_NONE); +- copy_m->dirty = TRUE; ++ vm_page_mark_dirty (copy_m, TRUE); + vm_page_unlock_queues(); + + /* +@@ -1077,7 +1077,7 @@ vm_fault_return_t vm_fault_page( + */ + + if (vm_fault_dirty_handling && (*protection & VM_PROT_WRITE)) +- m->dirty = TRUE; ++ vm_page_mark_dirty (m, TRUE); + + return(VM_FAULT_SUCCESS); + +@@ -1957,7 +1957,7 @@ kern_return_t vm_fault_copy( + vm_page_zero_fill(dst_page); + else + vm_page_copy(src_page, dst_page); +- dst_page->dirty = TRUE; ++ vm_page_mark_dirty (dst_page, TRUE); + + /* + * Unlock everything, and return +diff --git a/vm/vm_map.c b/vm/vm_map.c +index 6b13724..c229df5 100644 +--- a/vm/vm_map.c ++++ b/vm/vm_map.c +@@ -2931,7 +2931,7 @@ insert_pages: + assert(!m->wanted); + + m->busy = FALSE; +- m->dirty = TRUE; ++ vm_page_mark_dirty (m, TRUE); + vm_page_replace(m, object, old_last_offset + offset); + if (must_wire) { + vm_page_wire(m); +diff --git a/vm/vm_object.c b/vm/vm_object.c +index 2384b08..b195303 100644 +--- a/vm/vm_object.c ++++ b/vm/vm_object.c +@@ -587,7 +587,7 @@ void vm_object_terminate( + panic("vm_object_terminate.4 0x%x 0x%x", object, p); + + if (!p->dirty) +- p->dirty = pmap_is_modified(p->phys_addr); ++ vm_page_mark_dirty (p, pmap_is_modified(p->phys_addr)); + + if (p->dirty || p->precious) { + p->busy = TRUE; +@@ -1089,7 +1089,7 @@ kern_return_t vm_object_copy_slowly( + */ + + new_page->busy = FALSE; +- new_page->dirty = TRUE; ++ vm_page_mark_dirty (new_page, TRUE); + vm_object_lock(result_page->object); + PAGE_WAKEUP_DONE(result_page); + +diff --git a/vm/vm_page.h b/vm/vm_page.h +index e6a8c49..41c5711 100644 +--- a/vm/vm_page.h ++++ b/vm/vm_page.h +@@ -70,8 +70,10 @@ + * and sundry status bits. + * + * Fields in this structure are locked either by the lock on the +- * object that the page belongs to (O) or by the lock on the page +- * queues (P). [Some fields require that both locks be held to ++ * object that the page belongs to (O), by the lock on the page ++ * queues (P), or by vm_page_queue_clean_lock (C). ++ * ++ * [Some fields require that both locks, O and P, be held to + * change that field; holding either lock is sufficient to read.] + */ + +@@ -79,6 +81,7 @@ struct vm_page { + queue_chain_t pageq; /* queue info for FIFO + * queue or free list (P) */ + queue_chain_t listq; /* all pages in same object (O) */ ++ queue_chain_t cleanq; /* all clean pages (C) */ + struct vm_page *next; /* VP bucket link (O) */ + + vm_object_t object; /* which object am I in (O,P) */ +@@ -147,8 +150,12 @@ extern + queue_head_t vm_page_queue_active; /* active memory queue */ + extern + queue_head_t vm_page_queue_inactive; /* inactive memory queue */ ++extern ++queue_head_t vm_page_queue_clean; /* clean memory queue */ + + extern ++int vm_page_clean_count; /* How many pages are clean? */ ++extern + int vm_page_free_count; /* How many pages are free? */ + extern + int vm_page_fictitious_count;/* How many fictitious pages are free? */ +@@ -184,6 +191,8 @@ decl_simple_lock_data(extern,vm_page_queue_lock)/* lock on active and inactive + page queues */ + decl_simple_lock_data(extern,vm_page_queue_free_lock) + /* lock on free page queue */ ++decl_simple_lock_data(extern,vm_page_queue_clean_lock) ++ /* lock on clean page queue */ + + extern unsigned int vm_page_free_wanted; + /* how many threads are waiting for memory */ +@@ -312,4 +321,24 @@ extern unsigned int vm_page_info( + } \ + MACRO_END + ++static inline void ++vm_page_mark_dirty (vm_page_t m, boolean_t dirty) ++{ ++ if (m->dirty == dirty && (dirty || m->cleanq.next)) ++ return; /* No action necessary. */ ++ ++ simple_lock (&vm_page_queue_clean_lock); ++ if (dirty && m->cleanq.next) { ++ queue_remove (&vm_page_queue_clean, m, vm_page_t, cleanq); ++ vm_page_clean_count -= 1; ++ m->cleanq.next = NULL; ++ } ++ if (! dirty) { ++ queue_enter (&vm_page_queue_clean, m, vm_page_t, cleanq); ++ vm_page_clean_count += 1; ++ } ++ simple_unlock (&vm_page_queue_clean_lock); ++ m->dirty = dirty; ++} ++ + #endif /* _VM_VM_PAGE_H_ */ +diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c +index c4aba96..eb68b20 100644 +--- a/vm/vm_pageout.c ++++ b/vm/vm_pageout.c +@@ -293,7 +293,7 @@ vm_pageout_setup( + vm_page_insert(m, new_object, new_offset); + vm_page_unlock_queues(); + +- m->dirty = TRUE; ++ vm_page_mark_dirty (m, TRUE); + m->precious = FALSE; + m->page_lock = VM_PROT_NONE; + m->unlock_request = VM_PROT_NONE; +@@ -306,7 +306,7 @@ vm_pageout_setup( + vm_page_copy(m, new_m); + + vm_object_lock(old_object); +- m->dirty = FALSE; ++ vm_page_mark_dirty (m, FALSE); + pmap_clear_modify(m->phys_addr); + + /* +@@ -336,7 +336,7 @@ vm_pageout_setup( + * Use the new page below. + */ + m = new_m; +- m->dirty = TRUE; ++ vm_page_mark_dirty (m, TRUE); + assert(!m->precious); + PAGE_WAKEUP_DONE(m); + } +@@ -780,7 +780,7 @@ void vm_pageout_scan(void) + m->busy = TRUE; + pmap_page_protect(m->phys_addr, VM_PROT_NONE); + if (!m->dirty) +- m->dirty = pmap_is_modified(m->phys_addr); ++ vm_page_mark_dirty (m, pmap_is_modified(m->phys_addr)); + + if (m->external) { + /* Figure out if we still care about this +diff --git a/vm/vm_resident.c b/vm/vm_resident.c +index b65b756..46980fc 100644 +--- a/vm/vm_resident.c ++++ b/vm/vm_resident.c +@@ -148,9 +148,12 @@ vm_offset_t vm_page_fictitious_addr = (vm_offset_t) -1; + queue_head_t vm_page_queue_active; + queue_head_t vm_page_queue_inactive; + decl_simple_lock_data(,vm_page_queue_lock) ++queue_head_t vm_page_queue_clean; ++decl_simple_lock_data(,vm_page_queue_clean_lock) + int vm_page_active_count; + int vm_page_inactive_count; + int vm_page_wire_count; ++int vm_page_clean_count; + + /* + * Several page replacement parameters are also +@@ -200,6 +203,7 @@ void vm_page_bootstrap( + */ + + m = &vm_page_template; ++ m->cleanq.next = NULL; + m->object = VM_OBJECT_NULL; /* reset later */ + m->offset = 0; /* reset later */ + m->wire_count = 0; +@@ -231,12 +235,14 @@ void vm_page_bootstrap( + */ + + simple_lock_init(&vm_page_queue_free_lock); ++ simple_lock_init(&vm_page_queue_clean_lock); + simple_lock_init(&vm_page_queue_lock); + + vm_page_queue_free = VM_PAGE_NULL; + vm_page_queue_fictitious = VM_PAGE_NULL; + queue_init(&vm_page_queue_active); + queue_init(&vm_page_queue_inactive); ++ queue_init(&vm_page_queue_clean); + + vm_page_free_wanted = 0; + +@@ -1304,6 +1310,10 @@ void vm_page_free( + if (mem->absent) + vm_object_absent_release(mem->object); + ++ ++ /* Get it off the clean list. */ ++ vm_page_mark_dirty (mem, TRUE); ++ + /* + * XXX The calls to vm_page_init here are + * really overkill. +diff --git a/xen/block.c b/xen/block.c +index d98b31e..175955a 100644 +--- a/xen/block.c ++++ b/xen/block.c +@@ -539,7 +539,7 @@ device_read (void *d, ipc_port_t reply_port, + assert (m->busy); + vm_page_lock_queues (); + PAGE_WAKEUP_DONE (m); +- m->dirty = TRUE; ++ vm_page_mark_dirty (m, TRUE); + vm_page_insert (m, object, o); + vm_page_unlock_queues (); + o += PAGE_SIZE; +-- +2.1.4 + diff --git a/debian/patches/vm-cache-policy0003-vm-evict-clean-pages-first.patch b/debian/patches/vm-cache-policy0003-vm-evict-clean-pages-first.patch new file mode 100644 index 0000000..f2bf487 --- /dev/null +++ b/debian/patches/vm-cache-policy0003-vm-evict-clean-pages-first.patch @@ -0,0 +1,57 @@ +From c18a562f2b003a893bfb857607ac996ccc5c5be4 Mon Sep 17 00:00:00 2001 +From: Justus Winter <4winter@informatik.uni-hamburg.de> +Date: Sat, 21 Feb 2015 15:20:46 +0100 +Subject: [PATCH gnumach 3/3] vm: evict clean pages first + +* vm/vm_pageout.c (vm_pageout_scan): Evict clean pages from the list +of clean pages first, without requiring an expensive scan through the +inactive list. +--- + vm/vm_pageout.c | 24 +++++++++++++++++++++++- + 1 file changed, 23 insertions(+), 1 deletion(-) + +diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c +index eb68b20..740a465 100644 +--- a/vm/vm_pageout.c ++++ b/vm/vm_pageout.c +@@ -681,6 +681,28 @@ void vm_pageout_scan(void) + /*NOTREACHED*/ + } + ++ /* Try to evict a clean page first. */ ++ simple_lock (&vm_page_queue_clean); ++ int tries; ++ for (tries = vm_page_clean_count; tries; tries--) ++ { ++ assert (! queue_empty (&vm_page_queue_clean)); ++ queue_remove_first (&vm_page_queue_clean, ++ m, vm_page_t, cleanq); ++ if (! m->active && m->inactive ++ && (want_pages || m->external)) ++ { ++ m->cleanq.next = NULL; ++ vm_page_clean_count -= 1; ++ simple_unlock (&vm_page_queue_clean); ++ goto got_one; ++ } ++ else ++ queue_enter (&vm_page_queue_clean, ++ m, vm_page_t, cleanq); ++ } ++ simple_unlock (&vm_page_queue_clean); ++ + vm_pageout_inactive++; + + /* Find a page we are interested in paging out. If we +@@ -697,7 +719,7 @@ void vm_pageout_scan(void) + if (!m) + goto pause; + } +- ++ got_one: + object = m->object; + + /* +-- +2.1.4 + diff --git a/debian/patches/vm_cache_policy.patch b/debian/patches/vm_cache_policy.patch deleted file mode 100644 index f98b499..0000000 --- a/debian/patches/vm_cache_policy.patch +++ /dev/null @@ -1,346 +0,0 @@ -From 316e41125e61cc7c49d601c7f5f3f9757bb867a5 Mon Sep 17 00:00:00 2001 -From: Richard Braun -Date: Wed, 9 Oct 2013 11:51:54 +0200 -Subject: [PATCH gnumach] VM cache policy change - -This patch lets the kernel unconditionnally cache non empty unreferenced -objects instead of using a fixed arbitrary limit. As the pageout daemon -evicts pages, it collects cached objects that have become empty. The -effective result is a graceful adjustment of the number of objects -related to memory management (virtual memory objects, their associated -ports, and potentially objects maintained in the external memory -managers). Physical memory can now be almost entirely filled up with -cached pages. In addition, these cached pages are not automatically -deactivated as objects can quickly be referenced again. - -There are problems with this patch however. The first is that, on -machines with a large amount of physical memory (above 1 GiB but it also -depends on usage patterns), scalability issues are exposed. For example, -file systems which don't throttle their writeback requests can create -thread storms, strongly reducing system responsiveness. Other issues -such as linear scans of memory objects also add visible CPU overhead. - -The second is that, as most memory is used, it increases the chances of -swapping deadlocks. Applications that map large objects and quickly -cause lots of page faults can still easily bring the system to its -knees. ---- - vm/vm_object.c | 166 ++++++++++++++++++------------------------------------- - vm/vm_object.h | 7 ++- - vm/vm_pageout.c | 7 ++- - vm/vm_resident.c | 4 +- - 4 files changed, 68 insertions(+), 116 deletions(-) - -diff --git a/vm/vm_object.c b/vm/vm_object.c -index 582487e..87b98bb 100644 ---- a/vm/vm_object.c -+++ b/vm/vm_object.c -@@ -59,6 +59,11 @@ - #include - #endif /* MACH_KDB */ - -+void memory_object_release( -+ ipc_port_t pager, -+ pager_request_t pager_request, -+ ipc_port_t pager_name); /* forward */ -+ - /* - * Virtual memory objects maintain the actual data - * associated with allocated virtual memory. A given -@@ -159,8 +164,9 @@ vm_object_t kernel_object = &kernel_object_store; - * - * The kernel may choose to terminate objects from this - * queue in order to reclaim storage. The current policy -- * is to permit a fixed maximum number of unreferenced -- * objects (vm_object_cached_max). -+ * is to let memory pressure dynamically adjust the number -+ * of unreferenced objects. The pageout daemon attempts to -+ * collect objects after removing pages from them. - * - * A simple lock (accessed by routines - * vm_object_cache_{lock,lock_try,unlock}) governs the -@@ -176,7 +182,6 @@ vm_object_t kernel_object = &kernel_object_store; - */ - queue_head_t vm_object_cached_list; - int vm_object_cached_count; --int vm_object_cached_max = 4000; /* may be patched*/ - - decl_simple_lock_data(,vm_object_cached_lock_data) - -@@ -339,6 +344,33 @@ void vm_object_init(void) - IKOT_PAGING_NAME); - } - -+void vm_object_collect( -+ register vm_object_t object) -+{ -+ vm_object_unlock(object); -+ -+ /* -+ * The cache lock must be acquired in the proper order. -+ */ -+ -+ vm_object_cache_lock(); -+ vm_object_lock(object); -+ -+ /* -+ * If the object was referenced while the lock was -+ * dropped, cancel the termination. -+ */ -+ -+ if (!vm_object_collectable(object)) { -+ vm_object_unlock(object); -+ vm_object_cache_unlock(); -+ return; -+ } -+ -+ queue_remove(&vm_object_cached_list, object, vm_object_t, cached_list); -+ vm_object_terminate(object); -+} -+ - /* - * vm_object_reference: - * -@@ -399,103 +431,35 @@ void vm_object_deallocate( - - /* - * See whether this object can persist. If so, enter -- * it in the cache, then deactivate all of its -- * pages. -+ * it in the cache. - */ -- if (object->can_persist) { -- boolean_t overflow; -- -- /* -- * Enter the object onto the queue -- * of "cached" objects. Remember whether -- * we've caused the queue to overflow, -- * as a hint. -- */ -- -+ if (object->can_persist && (object->resident_page_count > 0)) { - queue_enter(&vm_object_cached_list, object, - vm_object_t, cached_list); -- overflow = (++vm_object_cached_count > vm_object_cached_max); -+ vm_object_cached_count++; - vm_object_cached_pages_update(object->resident_page_count); - vm_object_cache_unlock(); - -- vm_object_deactivate_pages(object); - vm_object_unlock(object); -+ return; -+ } - -- /* -- * If we didn't overflow, or if the queue has -- * been reduced back to below the specified -- * minimum, then quit. -- */ -- if (!overflow) -- return; -- -- while (TRUE) { -- vm_object_cache_lock(); -- if (vm_object_cached_count <= -- vm_object_cached_max) { -- vm_object_cache_unlock(); -- return; -- } -- -- /* -- * If we must trim down the queue, take -- * the first object, and proceed to -- * terminate it instead of the original -- * object. Have to wait for pager init. -- * if it's in progress. -- */ -- object= (vm_object_t) -- queue_first(&vm_object_cached_list); -- vm_object_lock(object); -- -- if (!(object->pager_created && -- !object->pager_initialized)) { -- -- /* -- * Ok to terminate, hang on to lock. -- */ -- break; -- } -- -- vm_object_assert_wait(object, -- VM_OBJECT_EVENT_INITIALIZED, FALSE); -- vm_object_unlock(object); -- vm_object_cache_unlock(); -- thread_block((void (*)()) 0); -- -- /* -- * Continue loop to check if cache still -- * needs to be trimmed. -- */ -- } -+ if (object->pager_created && -+ !object->pager_initialized) { - - /* -- * Actually remove object from cache. -+ * Have to wait for initialization. -+ * Put reference back and retry -+ * when it's initialized. - */ - -- queue_remove(&vm_object_cached_list, object, -- vm_object_t, cached_list); -- vm_object_cached_count--; -- -- assert(object->ref_count == 0); -- } -- else { -- if (object->pager_created && -- !object->pager_initialized) { -- -- /* -- * Have to wait for initialization. -- * Put reference back and retry -- * when it's initialized. -- */ -- object->ref_count++; -- vm_object_assert_wait(object, -- VM_OBJECT_EVENT_INITIALIZED, FALSE); -- vm_object_unlock(object); -- vm_object_cache_unlock(); -- thread_block((void (*)()) 0); -- continue; -- } -+ object->ref_count++; -+ vm_object_assert_wait(object, -+ VM_OBJECT_EVENT_INITIALIZED, FALSE); -+ vm_object_unlock(object); -+ vm_object_cache_unlock(); -+ thread_block((void (*)()) 0); -+ continue; - } - - /* -@@ -868,28 +832,6 @@ kern_return_t memory_object_destroy( - } - - /* -- * vm_object_deactivate_pages -- * -- * Deactivate all pages in the specified object. (Keep its pages -- * in memory even though it is no longer referenced.) -- * -- * The object must be locked. -- */ --void vm_object_deactivate_pages( -- vm_object_t object) --{ -- vm_page_t p; -- -- queue_iterate(&object->memq, p, vm_page_t, listq) { -- vm_page_lock_queues(); -- if (!p->busy) -- vm_page_deactivate(p); -- vm_page_unlock_queues(); -- } --} -- -- --/* - * Routine: vm_object_pmap_protect - * - * Purpose: -@@ -2734,7 +2676,7 @@ void vm_object_page_remove( - * It balances vm_object_lookup vs iteration. - */ - -- if (atop(end - start) < (unsigned)object->resident_page_count/16) { -+ if (atop(end - start) < object->resident_page_count/16) { - vm_object_page_remove_lookup++; - - for (; start < end; start += PAGE_SIZE) { -@@ -2958,7 +2900,7 @@ void vm_object_print( - - iprintf("Object 0x%X: size=0x%X", - (vm_offset_t) object, (vm_offset_t) object->size); -- printf(", %d references, %d resident pages,", object->ref_count, -+ printf(", %d references, %lu resident pages,", object->ref_count, - object->resident_page_count); - printf(" %d absent pages,", object->absent_count); - printf(" %d paging ops\n", object->paging_in_progress); -diff --git a/vm/vm_object.h b/vm/vm_object.h -index 5c42f56..94677e5 100644 ---- a/vm/vm_object.h -+++ b/vm/vm_object.h -@@ -72,7 +72,7 @@ struct vm_object { - */ - - int ref_count; /* Number of references */ -- int resident_page_count; -+ unsigned long resident_page_count; - /* number of resident pages */ - - struct vm_object *copy; /* Object that should receive -@@ -169,6 +169,7 @@ vm_object_t kernel_object; /* the single kernel object */ - - extern void vm_object_bootstrap(void); - extern void vm_object_init(void); -+extern void vm_object_collect(vm_object_t); - extern void vm_object_terminate(vm_object_t); - extern vm_object_t vm_object_allocate(vm_size_t); - extern void vm_object_reference(vm_object_t); -@@ -290,6 +291,10 @@ vm_object_t vm_object_copy_delayed( - * Routines implemented as macros - */ - -+#define vm_object_collectable(object) \ -+ (((object)->ref_count == 0) \ -+ && ((object)->resident_page_count == 0)) -+ - #define vm_object_paging_begin(object) \ - ((object)->paging_in_progress++) - -diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c -index ecedb57..3bab01e 100644 ---- a/vm/vm_pageout.c -+++ b/vm/vm_pageout.c -@@ -746,7 +746,12 @@ void vm_pageout_scan(void) - reclaim_page: - vm_page_free(m); - vm_page_unlock_queues(); -- vm_object_unlock(object); -+ -+ if (vm_object_collectable(object)) -+ vm_object_collect(object); -+ else -+ vm_object_unlock(object); -+ - continue; - } - -diff --git a/vm/vm_resident.c b/vm/vm_resident.c -index fa02cbc..cb3a466 100644 ---- a/vm/vm_resident.c -+++ b/vm/vm_resident.c -@@ -523,7 +523,7 @@ void vm_page_insert( - */ - - object->resident_page_count++; -- assert(object->resident_page_count >= 0); -+ assert(object->resident_page_count != 0); - - if (object->can_persist && (object->ref_count == 0)) - vm_object_cached_pages_update(1); -@@ -630,7 +630,7 @@ void vm_page_replace( - */ - - object->resident_page_count++; -- assert(object->resident_page_count >= 0); -+ assert(object->resident_page_count != 0); - - if (object->can_persist && (object->ref_count == 0)) - vm_object_cached_pages_update(1); --- -2.1.1 - diff --git a/debian/patches/vm_page_cleanq.patch b/debian/patches/vm_page_cleanq.patch deleted file mode 100644 index 5535d6e..0000000 --- a/debian/patches/vm_page_cleanq.patch +++ /dev/null @@ -1,336 +0,0 @@ -diff --git a/linux/dev/glue/block.c b/linux/dev/glue/block.c -index da4ef38..3bd2c5b 100644 ---- a/linux/dev/glue/block.c -+++ b/linux/dev/glue/block.c -@@ -1537,7 +1537,7 @@ device_read (void *d, ipc_port_t reply_port, - if (dirty) - { - PAGE_WAKEUP_DONE (m); -- m->dirty = TRUE; -+ vm_page_mark_dirty (m, TRUE); - vm_page_insert (m, object, o); - } - else -diff --git a/vm/memory_object.c b/vm/memory_object.c -index 097ed23..7a0dbb8 100644 ---- a/vm/memory_object.c -+++ b/vm/memory_object.c -@@ -209,7 +209,7 @@ retry_lookup: - */ - - data_m->busy = FALSE; -- data_m->dirty = FALSE; -+ vm_page_mark_dirty (data_m, FALSE); - pmap_clear_modify(data_m->phys_addr); - - data_m->page_lock = lock_value; -@@ -555,7 +555,7 @@ memory_object_lock_result_t memory_object_lock_page( - */ - - if (!m->dirty) -- m->dirty = pmap_is_modified(m->phys_addr); -+ vm_page_mark_dirty (m, pmap_is_modified(m->phys_addr)); - - if (m->dirty || (m->precious && - should_return == MEMORY_OBJECT_RETURN_ALL)) { -diff --git a/vm/vm_debug.c b/vm/vm_debug.c -index 227090e..822ca86 100644 ---- a/vm/vm_debug.c -+++ b/vm/vm_debug.c -@@ -352,7 +352,7 @@ mach_vm_object_pages( - if (((state & (VPI_STATE_NODATA|VPI_STATE_DIRTY)) == 0) && - pmap_is_modified(p->phys_addr)) { - state |= VPI_STATE_DIRTY; -- p->dirty = TRUE; -+ vm_page_mark_dirty (p, TRUE); - } - - vm_page_lock_queues(); -diff --git a/vm/vm_fault.c b/vm/vm_fault.c -index 686156c..a48902a 100644 ---- a/vm/vm_fault.c -+++ b/vm/vm_fault.c -@@ -978,7 +978,7 @@ vm_fault_return_t vm_fault_page( - - vm_page_lock_queues(); - pmap_page_protect(m->phys_addr, VM_PROT_NONE); -- copy_m->dirty = TRUE; -+ vm_page_mark_dirty (copy_m, TRUE); - vm_page_unlock_queues(); - - /* -@@ -1077,7 +1077,7 @@ vm_fault_return_t vm_fault_page( - */ - - if (vm_fault_dirty_handling && (*protection & VM_PROT_WRITE)) -- m->dirty = TRUE; -+ vm_page_mark_dirty (m, TRUE); - - return(VM_FAULT_SUCCESS); - -@@ -1957,7 +1957,7 @@ kern_return_t vm_fault_copy( - vm_page_zero_fill(dst_page); - else - vm_page_copy(src_page, dst_page); -- dst_page->dirty = TRUE; -+ vm_page_mark_dirty (dst_page, TRUE); - - /* - * Unlock everything, and return -diff --git a/vm/vm_map.c b/vm/vm_map.c -index 6b13724..c229df5 100644 ---- a/vm/vm_map.c -+++ b/vm/vm_map.c -@@ -2931,7 +2931,7 @@ insert_pages: - assert(!m->wanted); - - m->busy = FALSE; -- m->dirty = TRUE; -+ vm_page_mark_dirty (m, TRUE); - vm_page_replace(m, object, old_last_offset + offset); - if (must_wire) { - vm_page_wire(m); -diff --git a/vm/vm_object.c b/vm/vm_object.c -index a96516c..f7d8fd2 100644 ---- a/vm/vm_object.c -+++ b/vm/vm_object.c -@@ -587,7 +587,7 @@ void vm_object_terminate( - panic("vm_object_terminate.4 0x%x 0x%x", object, p); - - if (!p->dirty) -- p->dirty = pmap_is_modified(p->phys_addr); -+ vm_page_mark_dirty (p, pmap_is_modified(p->phys_addr)); - - if (p->dirty || p->precious) { - p->busy = TRUE; -@@ -1089,7 +1089,7 @@ kern_return_t vm_object_copy_slowly( - */ - - new_page->busy = FALSE; -- new_page->dirty = TRUE; -+ vm_page_mark_dirty (new_page, TRUE); - vm_object_lock(result_page->object); - PAGE_WAKEUP_DONE(result_page); - -diff --git a/vm/vm_page.h b/vm/vm_page.h -index 4fe1b41..729ad8e 100644 ---- a/vm/vm_page.h -+++ b/vm/vm_page.h -@@ -70,8 +70,10 @@ - * and sundry status bits. - * - * Fields in this structure are locked either by the lock on the -- * object that the page belongs to (O) or by the lock on the page -- * queues (P). [Some fields require that both locks be held to -+ * object that the page belongs to (O), by the lock on the page -+ * queues (P), or by vm_page_queue_clean_lock (C). -+ * -+ * [Some fields require that both locks, O and P, be held to - * change that field; holding either lock is sufficient to read.] - */ - -@@ -79,6 +81,7 @@ struct vm_page { - queue_chain_t pageq; /* queue info for FIFO - * queue or free list (P) */ - queue_chain_t listq; /* all pages in same object (O) */ -+ queue_chain_t cleanq; /* all clean pages (C) */ - struct vm_page *next; /* VP bucket link (O) */ - - vm_object_t object; /* which object am I in (O,P) */ -@@ -147,8 +150,12 @@ extern - queue_head_t vm_page_queue_active; /* active memory queue */ - extern - queue_head_t vm_page_queue_inactive; /* inactive memory queue */ -+extern -+queue_head_t vm_page_queue_clean; /* clean memory queue */ - - extern -+int vm_page_clean_count; /* How many pages are clean? */ -+extern - int vm_page_free_count; /* How many pages are free? */ - extern - int vm_page_fictitious_count;/* How many fictitious pages are free? */ -@@ -184,6 +191,8 @@ decl_simple_lock_data(extern,vm_page_queue_lock)/* lock on active and inactive - page queues */ - decl_simple_lock_data(extern,vm_page_queue_free_lock) - /* lock on free page queue */ -+decl_simple_lock_data(extern,vm_page_queue_clean_lock) -+ /* lock on clean page queue */ - - extern unsigned int vm_page_free_wanted; - /* how many threads are waiting for memory */ -@@ -312,4 +321,24 @@ extern unsigned int vm_page_info( - } \ - MACRO_END - -+static inline void -+vm_page_mark_dirty (vm_page_t m, boolean_t dirty) -+{ -+ if (m->dirty == dirty && (dirty || m->cleanq.next)) -+ return; /* No action necessary. */ -+ -+ simple_lock (&vm_page_queue_clean_lock); -+ if (dirty && m->cleanq.next) { -+ queue_remove (&vm_page_queue_clean, m, vm_page_t, cleanq); -+ vm_page_clean_count -= 1; -+ m->cleanq.next = NULL; -+ } -+ if (! dirty) { -+ queue_enter (&vm_page_queue_clean, m, vm_page_t, cleanq); -+ vm_page_clean_count += 1; -+ } -+ simple_unlock (&vm_page_queue_clean_lock); -+ m->dirty = dirty; -+} -+ - #endif /* _VM_VM_PAGE_H_ */ -diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c -index c4aba96..740a465 100644 ---- a/vm/vm_pageout.c -+++ b/vm/vm_pageout.c -@@ -293,7 +293,7 @@ vm_pageout_setup( - vm_page_insert(m, new_object, new_offset); - vm_page_unlock_queues(); - -- m->dirty = TRUE; -+ vm_page_mark_dirty (m, TRUE); - m->precious = FALSE; - m->page_lock = VM_PROT_NONE; - m->unlock_request = VM_PROT_NONE; -@@ -306,7 +306,7 @@ vm_pageout_setup( - vm_page_copy(m, new_m); - - vm_object_lock(old_object); -- m->dirty = FALSE; -+ vm_page_mark_dirty (m, FALSE); - pmap_clear_modify(m->phys_addr); - - /* -@@ -336,7 +336,7 @@ vm_pageout_setup( - * Use the new page below. - */ - m = new_m; -- m->dirty = TRUE; -+ vm_page_mark_dirty (m, TRUE); - assert(!m->precious); - PAGE_WAKEUP_DONE(m); - } -@@ -681,6 +681,28 @@ void vm_pageout_scan(void) - /*NOTREACHED*/ - } - -+ /* Try to evict a clean page first. */ -+ simple_lock (&vm_page_queue_clean); -+ int tries; -+ for (tries = vm_page_clean_count; tries; tries--) -+ { -+ assert (! queue_empty (&vm_page_queue_clean)); -+ queue_remove_first (&vm_page_queue_clean, -+ m, vm_page_t, cleanq); -+ if (! m->active && m->inactive -+ && (want_pages || m->external)) -+ { -+ m->cleanq.next = NULL; -+ vm_page_clean_count -= 1; -+ simple_unlock (&vm_page_queue_clean); -+ goto got_one; -+ } -+ else -+ queue_enter (&vm_page_queue_clean, -+ m, vm_page_t, cleanq); -+ } -+ simple_unlock (&vm_page_queue_clean); -+ - vm_pageout_inactive++; - - /* Find a page we are interested in paging out. If we -@@ -697,7 +719,7 @@ void vm_pageout_scan(void) - if (!m) - goto pause; - } -- -+ got_one: - object = m->object; - - /* -@@ -780,7 +802,7 @@ void vm_pageout_scan(void) - m->busy = TRUE; - pmap_page_protect(m->phys_addr, VM_PROT_NONE); - if (!m->dirty) -- m->dirty = pmap_is_modified(m->phys_addr); -+ vm_page_mark_dirty (m, pmap_is_modified(m->phys_addr)); - - if (m->external) { - /* Figure out if we still care about this -diff --git a/vm/vm_resident.c b/vm/vm_resident.c -index b65b756..2e918da 100644 ---- a/vm/vm_resident.c -+++ b/vm/vm_resident.c -@@ -148,9 +148,12 @@ vm_offset_t vm_page_fictitious_addr = (vm_offset_t) -1; - queue_head_t vm_page_queue_active; - queue_head_t vm_page_queue_inactive; - decl_simple_lock_data(,vm_page_queue_lock) -+queue_head_t vm_page_queue_clean; -+decl_simple_lock_data(,vm_page_queue_clean_lock) - int vm_page_active_count; - int vm_page_inactive_count; - int vm_page_wire_count; -+int vm_page_clean_count; - - /* - * Several page replacement parameters are also -@@ -200,6 +203,7 @@ void vm_page_bootstrap( - */ - - m = &vm_page_template; -+ m->cleanq.next = NULL; - m->object = VM_OBJECT_NULL; /* reset later */ - m->offset = 0; /* reset later */ - m->wire_count = 0; -@@ -231,12 +235,14 @@ void vm_page_bootstrap( - */ - - simple_lock_init(&vm_page_queue_free_lock); -+ simple_lock_init(&vm_page_queue_clean_lock); - simple_lock_init(&vm_page_queue_lock); - - vm_page_queue_free = VM_PAGE_NULL; - vm_page_queue_fictitious = VM_PAGE_NULL; - queue_init(&vm_page_queue_active); - queue_init(&vm_page_queue_inactive); -+ queue_init(&vm_page_queue_clean); - - vm_page_free_wanted = 0; - -@@ -666,7 +672,6 @@ void vm_page_remove( - bucket->pages = mem->next; - } else { - vm_page_t *prev; -- - for (prev = &this->next; - (this = *prev) != mem; - prev = &this->next) -@@ -1304,6 +1309,10 @@ void vm_page_free( - if (mem->absent) - vm_object_absent_release(mem->object); - -+ -+ /* Get it off the clean list. */ -+ vm_page_mark_dirty (mem, TRUE); -+ - /* - * XXX The calls to vm_page_init here are - * really overkill. -diff --git a/xen/block.c b/xen/block.c -index d98b31e..175955a 100644 ---- a/xen/block.c -+++ b/xen/block.c -@@ -539,7 +539,7 @@ device_read (void *d, ipc_port_t reply_port, - assert (m->busy); - vm_page_lock_queues (); - PAGE_WAKEUP_DONE (m); -- m->dirty = TRUE; -+ vm_page_mark_dirty (m, TRUE); - vm_page_insert (m, object, o); - vm_page_unlock_queues (); - o += PAGE_SIZE; -- cgit v1.2.3