summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJustus Winter <4winter@informatik.uni-hamburg.de>2015-08-17 15:09:14 +0200
committerJustus Winter <4winter@informatik.uni-hamburg.de>2015-08-17 15:09:14 +0200
commit93f232006cd76ad482bda6d2bc77945594ccbd3d (patch)
treedd65163490cf148157e0cfa004d0f6440fdd07c9
parent5bceb53899392bd43ec19707d9f4b4f8197fbc09 (diff)
add patch series
-rw-r--r--debian/patches/series3
-rw-r--r--debian/patches/vm-cache-policy0001-VM-cache-policy-change.patch346
-rw-r--r--debian/patches/vm-cache-policy0002-vm-keep-track-of-clean-pages.patch330
-rw-r--r--debian/patches/vm-cache-policy0003-vm-evict-clean-pages-first.patch57
4 files changed, 736 insertions, 0 deletions
diff --git a/debian/patches/series b/debian/patches/series
index 552f152..e65e70f 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -22,3 +22,6 @@ fix-locking0009-fu_locking_issues.patch
fix-locking0010-fu_locking_issues.patch
fix-locking0011-fu_locking_issues.patch
fix-locking0012-fu_locking_issues.patch
+vm-cache-policy0001-VM-cache-policy-change.patch
+vm-cache-policy0002-vm-keep-track-of-clean-pages.patch
+vm-cache-policy0003-vm-evict-clean-pages-first.patch
diff --git a/debian/patches/vm-cache-policy0001-VM-cache-policy-change.patch b/debian/patches/vm-cache-policy0001-VM-cache-policy-change.patch
new file mode 100644
index 0000000..e1b8648
--- /dev/null
+++ b/debian/patches/vm-cache-policy0001-VM-cache-policy-change.patch
@@ -0,0 +1,346 @@
+From ba7244b4c8d6200ea58135390f5a7d6fd0f24643 Mon Sep 17 00:00:00 2001
+From: Richard Braun <rbraun@sceen.net>
+Date: Wed, 9 Oct 2013 11:51:54 +0200
+Subject: [PATCH gnumach 1/3] VM cache policy change
+
+This patch lets the kernel unconditionnally cache non empty unreferenced
+objects instead of using a fixed arbitrary limit. As the pageout daemon
+evicts pages, it collects cached objects that have become empty. The
+effective result is a graceful adjustment of the number of objects
+related to memory management (virtual memory objects, their associated
+ports, and potentially objects maintained in the external memory
+managers). Physical memory can now be almost entirely filled up with
+cached pages. In addition, these cached pages are not automatically
+deactivated as objects can quickly be referenced again.
+
+There are problems with this patch however. The first is that, on
+machines with a large amount of physical memory (above 1 GiB but it also
+depends on usage patterns), scalability issues are exposed. For example,
+file systems which don't throttle their writeback requests can create
+thread storms, strongly reducing system responsiveness. Other issues
+such as linear scans of memory objects also add visible CPU overhead.
+
+The second is that, as most memory is used, it increases the chances of
+swapping deadlocks. Applications that map large objects and quickly
+cause lots of page faults can still easily bring the system to its
+knees.
+---
+ vm/vm_object.c | 166 ++++++++++++++++++-------------------------------------
+ vm/vm_object.h | 7 ++-
+ vm/vm_pageout.c | 7 ++-
+ vm/vm_resident.c | 4 +-
+ 4 files changed, 68 insertions(+), 116 deletions(-)
+
+diff --git a/vm/vm_object.c b/vm/vm_object.c
+index 1d3e727..f97a3fd 100644
+--- a/vm/vm_object.c
++++ b/vm/vm_object.c
+@@ -59,6 +59,11 @@
+ #include <ddb/db_output.h>
+ #endif /* MACH_KDB */
+
++void memory_object_release(
++ ipc_port_t pager,
++ pager_request_t pager_request,
++ ipc_port_t pager_name); /* forward */
++
+ /*
+ * Virtual memory objects maintain the actual data
+ * associated with allocated virtual memory. A given
+@@ -159,8 +164,9 @@ vm_object_t kernel_object = &kernel_object_store;
+ *
+ * The kernel may choose to terminate objects from this
+ * queue in order to reclaim storage. The current policy
+- * is to permit a fixed maximum number of unreferenced
+- * objects (vm_object_cached_max).
++ * is to let memory pressure dynamically adjust the number
++ * of unreferenced objects. The pageout daemon attempts to
++ * collect objects after removing pages from them.
+ *
+ * A simple lock (accessed by routines
+ * vm_object_cache_{lock,lock_try,unlock}) governs the
+@@ -176,7 +182,6 @@ vm_object_t kernel_object = &kernel_object_store;
+ */
+ queue_head_t vm_object_cached_list;
+ int vm_object_cached_count;
+-int vm_object_cached_max = 4000; /* may be patched*/
+
+ decl_simple_lock_data(,vm_object_cached_lock_data)
+
+@@ -344,6 +349,33 @@ void vm_object_init(void)
+ IKOT_PAGING_NAME);
+ }
+
++void vm_object_collect(
++ register vm_object_t object)
++{
++ vm_object_unlock(object);
++
++ /*
++ * The cache lock must be acquired in the proper order.
++ */
++
++ vm_object_cache_lock();
++ vm_object_lock(object);
++
++ /*
++ * If the object was referenced while the lock was
++ * dropped, cancel the termination.
++ */
++
++ if (!vm_object_collectable(object)) {
++ vm_object_unlock(object);
++ vm_object_cache_unlock();
++ return;
++ }
++
++ queue_remove(&vm_object_cached_list, object, vm_object_t, cached_list);
++ vm_object_terminate(object);
++}
++
+ /*
+ * vm_object_reference:
+ *
+@@ -404,103 +436,35 @@ void vm_object_deallocate(
+
+ /*
+ * See whether this object can persist. If so, enter
+- * it in the cache, then deactivate all of its
+- * pages.
++ * it in the cache.
+ */
+- if (object->can_persist) {
+- boolean_t overflow;
+-
+- /*
+- * Enter the object onto the queue
+- * of "cached" objects. Remember whether
+- * we've caused the queue to overflow,
+- * as a hint.
+- */
+-
++ if (object->can_persist && (object->resident_page_count > 0)) {
+ queue_enter(&vm_object_cached_list, object,
+ vm_object_t, cached_list);
+- overflow = (++vm_object_cached_count > vm_object_cached_max);
++ vm_object_cached_count++;
+ vm_object_cached_pages_update(object->resident_page_count);
+ vm_object_cache_unlock();
+
+- vm_object_deactivate_pages(object);
+ vm_object_unlock(object);
++ return;
++ }
+
+- /*
+- * If we didn't overflow, or if the queue has
+- * been reduced back to below the specified
+- * minimum, then quit.
+- */
+- if (!overflow)
+- return;
+-
+- while (TRUE) {
+- vm_object_cache_lock();
+- if (vm_object_cached_count <=
+- vm_object_cached_max) {
+- vm_object_cache_unlock();
+- return;
+- }
+-
+- /*
+- * If we must trim down the queue, take
+- * the first object, and proceed to
+- * terminate it instead of the original
+- * object. Have to wait for pager init.
+- * if it's in progress.
+- */
+- object= (vm_object_t)
+- queue_first(&vm_object_cached_list);
+- vm_object_lock(object);
+-
+- if (!(object->pager_created &&
+- !object->pager_initialized)) {
+-
+- /*
+- * Ok to terminate, hang on to lock.
+- */
+- break;
+- }
+-
+- vm_object_assert_wait(object,
+- VM_OBJECT_EVENT_INITIALIZED, FALSE);
+- vm_object_unlock(object);
+- vm_object_cache_unlock();
+- thread_block((void (*)()) 0);
+-
+- /*
+- * Continue loop to check if cache still
+- * needs to be trimmed.
+- */
+- }
++ if (object->pager_created &&
++ !object->pager_initialized) {
+
+ /*
+- * Actually remove object from cache.
++ * Have to wait for initialization.
++ * Put reference back and retry
++ * when it's initialized.
+ */
+
+- queue_remove(&vm_object_cached_list, object,
+- vm_object_t, cached_list);
+- vm_object_cached_count--;
+-
+- assert(object->ref_count == 0);
+- }
+- else {
+- if (object->pager_created &&
+- !object->pager_initialized) {
+-
+- /*
+- * Have to wait for initialization.
+- * Put reference back and retry
+- * when it's initialized.
+- */
+- object->ref_count++;
+- vm_object_assert_wait(object,
+- VM_OBJECT_EVENT_INITIALIZED, FALSE);
+- vm_object_unlock(object);
+- vm_object_cache_unlock();
+- thread_block((void (*)()) 0);
+- continue;
+- }
++ object->ref_count++;
++ vm_object_assert_wait(object,
++ VM_OBJECT_EVENT_INITIALIZED, FALSE);
++ vm_object_unlock(object);
++ vm_object_cache_unlock();
++ thread_block((void (*)()) 0);
++ continue;
+ }
+
+ /*
+@@ -875,28 +839,6 @@ kern_return_t memory_object_destroy(
+ }
+
+ /*
+- * vm_object_deactivate_pages
+- *
+- * Deactivate all pages in the specified object. (Keep its pages
+- * in memory even though it is no longer referenced.)
+- *
+- * The object must be locked.
+- */
+-void vm_object_deactivate_pages(
+- vm_object_t object)
+-{
+- vm_page_t p;
+-
+- queue_iterate(&object->memq, p, vm_page_t, listq) {
+- vm_page_lock_queues();
+- if (!p->busy)
+- vm_page_deactivate(p);
+- vm_page_unlock_queues();
+- }
+-}
+-
+-
+-/*
+ * Routine: vm_object_pmap_protect
+ *
+ * Purpose:
+@@ -2754,7 +2696,7 @@ void vm_object_page_remove(
+ * It balances vm_object_lookup vs iteration.
+ */
+
+- if (atop(end - start) < (unsigned)object->resident_page_count/16) {
++ if (atop(end - start) < object->resident_page_count/16) {
+ vm_object_page_remove_lookup++;
+
+ for (; start < end; start += PAGE_SIZE) {
+@@ -2978,7 +2920,7 @@ void vm_object_print(
+
+ iprintf("Object 0x%X: size=0x%X",
+ (vm_offset_t) object, (vm_offset_t) object->size);
+- printf(", %d references, %d resident pages,", object->ref_count,
++ printf(", %d references, %lu resident pages,", object->ref_count,
+ object->resident_page_count);
+ printf(" %d absent pages,", object->absent_count);
+ printf(" %d paging ops\n", object->paging_in_progress);
+diff --git a/vm/vm_object.h b/vm/vm_object.h
+index 3bfc67a..fa208aa 100644
+--- a/vm/vm_object.h
++++ b/vm/vm_object.h
+@@ -72,7 +72,7 @@ struct vm_object {
+ */
+
+ int ref_count; /* Number of references */
+- int resident_page_count;
++ unsigned long resident_page_count;
+ /* number of resident pages */
+
+ struct vm_object *copy; /* Object that should receive
+@@ -169,6 +169,7 @@ vm_object_t kernel_object; /* the single kernel object */
+
+ extern void vm_object_bootstrap(void);
+ extern void vm_object_init(void);
++extern void vm_object_collect(vm_object_t);
+ extern void vm_object_terminate(vm_object_t);
+ extern vm_object_t vm_object_allocate(vm_size_t);
+ extern void vm_object_reference(vm_object_t);
+@@ -290,6 +291,10 @@ vm_object_t vm_object_copy_delayed(
+ * Routines implemented as macros
+ */
+
++#define vm_object_collectable(object) \
++ (((object)->ref_count == 0) \
++ && ((object)->resident_page_count == 0))
++
+ #define vm_object_paging_begin(object) \
+ ((object)->paging_in_progress++)
+
+diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c
+index b676c7b..d0d124d 100644
+--- a/vm/vm_pageout.c
++++ b/vm/vm_pageout.c
+@@ -745,7 +745,12 @@ void vm_pageout_scan(void)
+ reclaim_page:
+ vm_page_free(m);
+ vm_page_unlock_queues();
+- vm_object_unlock(object);
++
++ if (vm_object_collectable(object))
++ vm_object_collect(object);
++ else
++ vm_object_unlock(object);
++
+ continue;
+ }
+
+diff --git a/vm/vm_resident.c b/vm/vm_resident.c
+index c70fa73..b65b756 100644
+--- a/vm/vm_resident.c
++++ b/vm/vm_resident.c
+@@ -523,7 +523,7 @@ void vm_page_insert(
+ */
+
+ object->resident_page_count++;
+- assert(object->resident_page_count >= 0);
++ assert(object->resident_page_count != 0);
+
+ if (object->can_persist && (object->ref_count == 0))
+ vm_object_cached_pages_update(1);
+@@ -630,7 +630,7 @@ void vm_page_replace(
+ */
+
+ object->resident_page_count++;
+- assert(object->resident_page_count >= 0);
++ assert(object->resident_page_count != 0);
+
+ if (object->can_persist && (object->ref_count == 0))
+ vm_object_cached_pages_update(1);
+--
+2.1.4
+
diff --git a/debian/patches/vm-cache-policy0002-vm-keep-track-of-clean-pages.patch b/debian/patches/vm-cache-policy0002-vm-keep-track-of-clean-pages.patch
new file mode 100644
index 0000000..45401ad
--- /dev/null
+++ b/debian/patches/vm-cache-policy0002-vm-keep-track-of-clean-pages.patch
@@ -0,0 +1,330 @@
+From 29b3ca0a0800894b563b16463dc62bf00433506d Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Sat, 21 Feb 2015 00:05:31 +0100
+Subject: [PATCH gnumach 2/3] vm: keep track of clean pages
+
+* vm/vm_page.h (struct vm_page): New field `cleanq'.
+(vm_page_queue_clean): New declaration.
+(vm_page_clean_count): Likewise.
+(vm_page_queue_clean_lock): Likewise.
+(vm_page_mark_dirty): New function to set and clear the dirty flag.
+* vm/vm_resident.c (vm_page_queue_clean): New variable.
+(vm_page_queue_clean_lock): Likewise.
+(vm_page_clean_count): Likewise.
+(vm_page_bootstrap): Initialize field `cleanq', the queue and the lock.
+(vm_page_free): Get freed pages off the clean queue.
+* linux/dev/glue/block.c: Use `vm_page_mark_dirty'.
+* vm/memory_object.c: Likewise.
+* vm/vm_debug.c: Likewise.
+* vm/vm_fault.c: Likewise.
+* vm/vm_map.c: Likewise.
+* vm/vm_object.c: Likewise.
+* vm/vm_pageout.c: Likewise.
+* xen/block.c: Likewise.
+---
+ linux/dev/glue/block.c | 2 +-
+ vm/memory_object.c | 4 ++--
+ vm/vm_debug.c | 2 +-
+ vm/vm_fault.c | 6 +++---
+ vm/vm_map.c | 2 +-
+ vm/vm_object.c | 4 ++--
+ vm/vm_page.h | 33 +++++++++++++++++++++++++++++++--
+ vm/vm_pageout.c | 9 +++++----
+ vm/vm_resident.c | 10 ++++++++++
+ xen/block.c | 2 +-
+ 10 files changed, 57 insertions(+), 17 deletions(-)
+
+diff --git a/linux/dev/glue/block.c b/linux/dev/glue/block.c
+index da4ef38..3bd2c5b 100644
+--- a/linux/dev/glue/block.c
++++ b/linux/dev/glue/block.c
+@@ -1537,7 +1537,7 @@ device_read (void *d, ipc_port_t reply_port,
+ if (dirty)
+ {
+ PAGE_WAKEUP_DONE (m);
+- m->dirty = TRUE;
++ vm_page_mark_dirty (m, TRUE);
+ vm_page_insert (m, object, o);
+ }
+ else
+diff --git a/vm/memory_object.c b/vm/memory_object.c
+index 0a07429..55d8084 100644
+--- a/vm/memory_object.c
++++ b/vm/memory_object.c
+@@ -210,7 +210,7 @@ retry_lookup:
+ */
+
+ data_m->busy = FALSE;
+- data_m->dirty = FALSE;
++ vm_page_mark_dirty (data_m, FALSE);
+ pmap_clear_modify(data_m->phys_addr);
+
+ data_m->page_lock = lock_value;
+@@ -557,7 +557,7 @@ memory_object_lock_result_t memory_object_lock_page(
+ */
+
+ if (!m->dirty)
+- m->dirty = pmap_is_modified(m->phys_addr);
++ vm_page_mark_dirty (m, pmap_is_modified(m->phys_addr));
+
+ if (m->dirty || (m->precious &&
+ should_return == MEMORY_OBJECT_RETURN_ALL)) {
+diff --git a/vm/vm_debug.c b/vm/vm_debug.c
+index 227090e..822ca86 100644
+--- a/vm/vm_debug.c
++++ b/vm/vm_debug.c
+@@ -352,7 +352,7 @@ mach_vm_object_pages(
+ if (((state & (VPI_STATE_NODATA|VPI_STATE_DIRTY)) == 0) &&
+ pmap_is_modified(p->phys_addr)) {
+ state |= VPI_STATE_DIRTY;
+- p->dirty = TRUE;
++ vm_page_mark_dirty (p, TRUE);
+ }
+
+ vm_page_lock_queues();
+diff --git a/vm/vm_fault.c b/vm/vm_fault.c
+index 101ebce..935b7b7 100644
+--- a/vm/vm_fault.c
++++ b/vm/vm_fault.c
+@@ -997,7 +997,7 @@ vm_fault_return_t vm_fault_page(
+
+ vm_page_lock_queues();
+ pmap_page_protect(m->phys_addr, VM_PROT_NONE);
+- copy_m->dirty = TRUE;
++ vm_page_mark_dirty (copy_m, TRUE);
+ vm_page_unlock_queues();
+
+ /*
+@@ -1096,7 +1096,7 @@ vm_fault_return_t vm_fault_page(
+ */
+
+ if (vm_fault_dirty_handling && (*protection & VM_PROT_WRITE))
+- m->dirty = TRUE;
++ vm_page_mark_dirty (m, TRUE);
+
+ return(VM_FAULT_SUCCESS);
+
+@@ -1976,7 +1976,7 @@ kern_return_t vm_fault_copy(
+ vm_page_zero_fill(dst_page);
+ else
+ vm_page_copy(src_page, dst_page);
+- dst_page->dirty = TRUE;
++ vm_page_mark_dirty (dst_page, TRUE);
+
+ /*
+ * Unlock everything, and return
+diff --git a/vm/vm_map.c b/vm/vm_map.c
+index 9098dfd..5e3fc2d 100644
+--- a/vm/vm_map.c
++++ b/vm/vm_map.c
+@@ -2960,7 +2960,7 @@ insert_pages:
+ assert(!m->wanted);
+
+ m->busy = FALSE;
+- m->dirty = TRUE;
++ vm_page_mark_dirty (m, TRUE);
+ vm_page_replace(m, object, old_last_offset + offset);
+ if (must_wire) {
+ vm_page_wire(m);
+diff --git a/vm/vm_object.c b/vm/vm_object.c
+index f97a3fd..4d882c1 100644
+--- a/vm/vm_object.c
++++ b/vm/vm_object.c
+@@ -588,7 +588,7 @@ void vm_object_terminate(
+ }
+
+ if (!p->dirty)
+- p->dirty = pmap_is_modified(p->phys_addr);
++ vm_page_mark_dirty (p, pmap_is_modified(p->phys_addr));
+
+ if (p->dirty || p->precious) {
+ p->busy = TRUE;
+@@ -1096,7 +1096,7 @@ kern_return_t vm_object_copy_slowly(
+ */
+
+ new_page->busy = FALSE;
+- new_page->dirty = TRUE;
++ vm_page_mark_dirty (new_page, TRUE);
+ vm_object_lock(result_page->object);
+ PAGE_WAKEUP_DONE(result_page);
+
+diff --git a/vm/vm_page.h b/vm/vm_page.h
+index e6a8c49..41c5711 100644
+--- a/vm/vm_page.h
++++ b/vm/vm_page.h
+@@ -70,8 +70,10 @@
+ * and sundry status bits.
+ *
+ * Fields in this structure are locked either by the lock on the
+- * object that the page belongs to (O) or by the lock on the page
+- * queues (P). [Some fields require that both locks be held to
++ * object that the page belongs to (O), by the lock on the page
++ * queues (P), or by vm_page_queue_clean_lock (C).
++ *
++ * [Some fields require that both locks, O and P, be held to
+ * change that field; holding either lock is sufficient to read.]
+ */
+
+@@ -79,6 +81,7 @@ struct vm_page {
+ queue_chain_t pageq; /* queue info for FIFO
+ * queue or free list (P) */
+ queue_chain_t listq; /* all pages in same object (O) */
++ queue_chain_t cleanq; /* all clean pages (C) */
+ struct vm_page *next; /* VP bucket link (O) */
+
+ vm_object_t object; /* which object am I in (O,P) */
+@@ -147,8 +150,12 @@ extern
+ queue_head_t vm_page_queue_active; /* active memory queue */
+ extern
+ queue_head_t vm_page_queue_inactive; /* inactive memory queue */
++extern
++queue_head_t vm_page_queue_clean; /* clean memory queue */
+
+ extern
++int vm_page_clean_count; /* How many pages are clean? */
++extern
+ int vm_page_free_count; /* How many pages are free? */
+ extern
+ int vm_page_fictitious_count;/* How many fictitious pages are free? */
+@@ -184,6 +191,8 @@ decl_simple_lock_data(extern,vm_page_queue_lock)/* lock on active and inactive
+ page queues */
+ decl_simple_lock_data(extern,vm_page_queue_free_lock)
+ /* lock on free page queue */
++decl_simple_lock_data(extern,vm_page_queue_clean_lock)
++ /* lock on clean page queue */
+
+ extern unsigned int vm_page_free_wanted;
+ /* how many threads are waiting for memory */
+@@ -312,4 +321,24 @@ extern unsigned int vm_page_info(
+ } \
+ MACRO_END
+
++static inline void
++vm_page_mark_dirty (vm_page_t m, boolean_t dirty)
++{
++ if (m->dirty == dirty && (dirty || m->cleanq.next))
++ return; /* No action necessary. */
++
++ simple_lock (&vm_page_queue_clean_lock);
++ if (dirty && m->cleanq.next) {
++ queue_remove (&vm_page_queue_clean, m, vm_page_t, cleanq);
++ vm_page_clean_count -= 1;
++ m->cleanq.next = NULL;
++ }
++ if (! dirty) {
++ queue_enter (&vm_page_queue_clean, m, vm_page_t, cleanq);
++ vm_page_clean_count += 1;
++ }
++ simple_unlock (&vm_page_queue_clean_lock);
++ m->dirty = dirty;
++}
++
+ #endif /* _VM_VM_PAGE_H_ */
+diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c
+index d0d124d..a8db604 100644
+--- a/vm/vm_pageout.c
++++ b/vm/vm_pageout.c
+@@ -292,7 +292,7 @@ vm_pageout_setup(
+ vm_page_insert(m, new_object, new_offset);
+ vm_page_unlock_queues();
+
+- m->dirty = TRUE;
++ vm_page_mark_dirty (m, TRUE);
+ m->precious = FALSE;
+ m->page_lock = VM_PROT_NONE;
+ m->unlock_request = VM_PROT_NONE;
+@@ -304,7 +304,8 @@ vm_pageout_setup(
+ */
+ vm_page_copy(m, new_m);
+
+- m->dirty = FALSE;
++ vm_object_lock(old_object);
++ vm_page_mark_dirty (m, FALSE);
+ pmap_clear_modify(m->phys_addr);
+
+ /*
+@@ -332,7 +333,7 @@ vm_pageout_setup(
+ * Use the new page below.
+ */
+ m = new_m;
+- m->dirty = TRUE;
++ vm_page_mark_dirty (m, TRUE);
+ assert(!m->precious);
+ PAGE_WAKEUP_DONE(m);
+ }
+@@ -777,7 +778,7 @@ void vm_pageout_scan(void)
+ m->busy = TRUE;
+ pmap_page_protect(m->phys_addr, VM_PROT_NONE);
+ if (!m->dirty)
+- m->dirty = pmap_is_modified(m->phys_addr);
++ vm_page_mark_dirty (m, pmap_is_modified(m->phys_addr));
+
+ if (m->external) {
+ /* Figure out if we still care about this
+diff --git a/vm/vm_resident.c b/vm/vm_resident.c
+index b65b756..46980fc 100644
+--- a/vm/vm_resident.c
++++ b/vm/vm_resident.c
+@@ -148,9 +148,12 @@ vm_offset_t vm_page_fictitious_addr = (vm_offset_t) -1;
+ queue_head_t vm_page_queue_active;
+ queue_head_t vm_page_queue_inactive;
+ decl_simple_lock_data(,vm_page_queue_lock)
++queue_head_t vm_page_queue_clean;
++decl_simple_lock_data(,vm_page_queue_clean_lock)
+ int vm_page_active_count;
+ int vm_page_inactive_count;
+ int vm_page_wire_count;
++int vm_page_clean_count;
+
+ /*
+ * Several page replacement parameters are also
+@@ -200,6 +203,7 @@ void vm_page_bootstrap(
+ */
+
+ m = &vm_page_template;
++ m->cleanq.next = NULL;
+ m->object = VM_OBJECT_NULL; /* reset later */
+ m->offset = 0; /* reset later */
+ m->wire_count = 0;
+@@ -231,12 +235,14 @@ void vm_page_bootstrap(
+ */
+
+ simple_lock_init(&vm_page_queue_free_lock);
++ simple_lock_init(&vm_page_queue_clean_lock);
+ simple_lock_init(&vm_page_queue_lock);
+
+ vm_page_queue_free = VM_PAGE_NULL;
+ vm_page_queue_fictitious = VM_PAGE_NULL;
+ queue_init(&vm_page_queue_active);
+ queue_init(&vm_page_queue_inactive);
++ queue_init(&vm_page_queue_clean);
+
+ vm_page_free_wanted = 0;
+
+@@ -1304,6 +1310,10 @@ void vm_page_free(
+ if (mem->absent)
+ vm_object_absent_release(mem->object);
+
++
++ /* Get it off the clean list. */
++ vm_page_mark_dirty (mem, TRUE);
++
+ /*
+ * XXX The calls to vm_page_init here are
+ * really overkill.
+diff --git a/xen/block.c b/xen/block.c
+index d98b31e..175955a 100644
+--- a/xen/block.c
++++ b/xen/block.c
+@@ -539,7 +539,7 @@ device_read (void *d, ipc_port_t reply_port,
+ assert (m->busy);
+ vm_page_lock_queues ();
+ PAGE_WAKEUP_DONE (m);
+- m->dirty = TRUE;
++ vm_page_mark_dirty (m, TRUE);
+ vm_page_insert (m, object, o);
+ vm_page_unlock_queues ();
+ o += PAGE_SIZE;
+--
+2.1.4
+
diff --git a/debian/patches/vm-cache-policy0003-vm-evict-clean-pages-first.patch b/debian/patches/vm-cache-policy0003-vm-evict-clean-pages-first.patch
new file mode 100644
index 0000000..c7b03b4
--- /dev/null
+++ b/debian/patches/vm-cache-policy0003-vm-evict-clean-pages-first.patch
@@ -0,0 +1,57 @@
+From 91a700088a53173203486a751ad417502ccaf710 Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Sat, 21 Feb 2015 15:20:46 +0100
+Subject: [PATCH gnumach 3/3] vm: evict clean pages first
+
+* vm/vm_pageout.c (vm_pageout_scan): Evict clean pages from the list
+of clean pages first, without requiring an expensive scan through the
+inactive list.
+---
+ vm/vm_pageout.c | 24 +++++++++++++++++++++++-
+ 1 file changed, 23 insertions(+), 1 deletion(-)
+
+diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c
+index a8db604..5fc8106 100644
+--- a/vm/vm_pageout.c
++++ b/vm/vm_pageout.c
+@@ -679,6 +679,28 @@ void vm_pageout_scan(void)
+ /*NOTREACHED*/
+ }
+
++ /* Try to evict a clean page first. */
++ simple_lock (&vm_page_queue_clean_lock);
++ int tries;
++ for (tries = vm_page_clean_count; tries; tries--)
++ {
++ assert (! queue_empty (&vm_page_queue_clean));
++ queue_remove_first (&vm_page_queue_clean,
++ m, vm_page_t, cleanq);
++ if (! m->active && m->inactive
++ && (want_pages || m->external))
++ {
++ m->cleanq.next = NULL;
++ vm_page_clean_count -= 1;
++ simple_unlock (&vm_page_queue_clean_lock);
++ goto got_one;
++ }
++ else
++ queue_enter (&vm_page_queue_clean,
++ m, vm_page_t, cleanq);
++ }
++ simple_unlock (&vm_page_queue_clean_lock);
++
+ vm_pageout_inactive++;
+
+ /* Find a page we are interested in paging out. If we
+@@ -695,7 +717,7 @@ void vm_pageout_scan(void)
+ if (!m)
+ goto pause;
+ }
+-
++ got_one:
+ object = m->object;
+
+ /*
+--
+2.1.4
+