diff options
author | Justus Winter <4winter@informatik.uni-hamburg.de> | 2016-02-07 15:10:27 +0100 |
---|---|---|
committer | Justus Winter <4winter@informatik.uni-hamburg.de> | 2016-02-07 15:10:27 +0100 |
commit | 3a96e5ca098c578dc60c5d298b8f9f448f76c386 (patch) | |
tree | 678f40aacbafce1a8ec511d3bd6ad8fc04975ea5 /debian/patches | |
parent | 4465f40dc9660c6c875e415e9761265326b02fbc (diff) |
add patch series
Diffstat (limited to 'debian/patches')
-rw-r--r-- | debian/patches/series | 2 | ||||
-rw-r--r-- | debian/patches/vm-cache-policy0001-VM-cache-policy-change.patch | 352 | ||||
-rw-r--r-- | debian/patches/vm-cache-policy0002-Fix-page-cache-accounting.patch | 180 |
3 files changed, 534 insertions, 0 deletions
diff --git a/debian/patches/series b/debian/patches/series index 6bcdf9a..cdb408e 100644 --- a/debian/patches/series +++ b/debian/patches/series @@ -16,3 +16,5 @@ sysenter0001-yyy-sysenter-prototype.patch vm-external-fixes0001-vm-remove-unused-field-from-struct-vm_external.patch vm-external-fixes0002-vm-allocate-a-large-map-for-all-objects-larger-than-.patch vm-external-fixes0003-vm-initialize-external-maps.patch +vm-cache-policy0001-VM-cache-policy-change.patch +vm-cache-policy0002-Fix-page-cache-accounting.patch diff --git a/debian/patches/vm-cache-policy0001-VM-cache-policy-change.patch b/debian/patches/vm-cache-policy0001-VM-cache-policy-change.patch new file mode 100644 index 0000000..258ca2e --- /dev/null +++ b/debian/patches/vm-cache-policy0001-VM-cache-policy-change.patch @@ -0,0 +1,352 @@ +From 98d64d1a78172b1efc26cac36a367eec8496926f Mon Sep 17 00:00:00 2001 +From: Richard Braun <rbraun@sceen.net> +Date: Wed, 9 Oct 2013 11:51:54 +0200 +Subject: [PATCH gnumach 1/2] VM cache policy change + +This patch lets the kernel unconditionnally cache non empty unreferenced +objects instead of using a fixed arbitrary limit. As the pageout daemon +evicts pages, it collects cached objects that have become empty. The +effective result is a graceful adjustment of the number of objects +related to memory management (virtual memory objects, their associated +ports, and potentially objects maintained in the external memory +managers). Physical memory can now be almost entirely filled up with +cached pages. In addition, these cached pages are not automatically +deactivated as objects can quickly be referenced again. + +There are problems with this patch however. The first is that, on +machines with a large amount of physical memory (above 1 GiB but it also +depends on usage patterns), scalability issues are exposed. For example, +file systems which don't throttle their writeback requests can create +thread storms, strongly reducing system responsiveness. Other issues +such as linear scans of memory objects also add visible CPU overhead. + +The second is that, as most memory is used, it increases the chances of +swapping deadlocks. Applications that map large objects and quickly +cause lots of page faults can still easily bring the system to its +knees. +--- + vm/vm_object.c | 165 +++++++++++++++++-------------------------------------- + vm/vm_object.h | 7 ++- + vm/vm_pageout.c | 7 ++- + vm/vm_resident.c | 4 +- + 4 files changed, 63 insertions(+), 120 deletions(-) + +diff --git a/vm/vm_object.c b/vm/vm_object.c +index 18a909f..9a019f6 100644 +--- a/vm/vm_object.c ++++ b/vm/vm_object.c +@@ -65,8 +65,6 @@ void memory_object_release( + pager_request_t pager_request, + ipc_port_t pager_name); /* forward */ + +-void vm_object_deactivate_pages(vm_object_t); +- + /* + * Virtual memory objects maintain the actual data + * associated with allocated virtual memory. A given +@@ -167,8 +165,9 @@ vm_object_t kernel_object = &kernel_object_store; + * + * The kernel may choose to terminate objects from this + * queue in order to reclaim storage. The current policy +- * is to permit a fixed maximum number of unreferenced +- * objects (vm_object_cached_max). ++ * is to let memory pressure dynamically adjust the number ++ * of unreferenced objects. The pageout daemon attempts to ++ * collect objects after removing pages from them. + * + * A simple lock (accessed by routines + * vm_object_cache_{lock,lock_try,unlock}) governs the +@@ -184,7 +183,6 @@ vm_object_t kernel_object = &kernel_object_store; + */ + queue_head_t vm_object_cached_list; + int vm_object_cached_count; +-int vm_object_cached_max = 4000; /* may be patched*/ + + decl_simple_lock_data(,vm_object_cached_lock_data) + +@@ -347,6 +345,33 @@ void vm_object_init(void) + IKOT_PAGING_NAME); + } + ++void vm_object_collect( ++ register vm_object_t object) ++{ ++ vm_object_unlock(object); ++ ++ /* ++ * The cache lock must be acquired in the proper order. ++ */ ++ ++ vm_object_cache_lock(); ++ vm_object_lock(object); ++ ++ /* ++ * If the object was referenced while the lock was ++ * dropped, cancel the termination. ++ */ ++ ++ if (!vm_object_collectable(object)) { ++ vm_object_unlock(object); ++ vm_object_cache_unlock(); ++ return; ++ } ++ ++ queue_remove(&vm_object_cached_list, object, vm_object_t, cached_list); ++ vm_object_terminate(object); ++} ++ + /* + * vm_object_reference: + * +@@ -407,103 +432,35 @@ void vm_object_deallocate( + + /* + * See whether this object can persist. If so, enter +- * it in the cache, then deactivate all of its +- * pages. ++ * it in the cache. + */ +- if (object->can_persist) { +- boolean_t overflow; +- +- /* +- * Enter the object onto the queue +- * of "cached" objects. Remember whether +- * we've caused the queue to overflow, +- * as a hint. +- */ +- ++ if (object->can_persist && (object->resident_page_count > 0)) { + queue_enter(&vm_object_cached_list, object, + vm_object_t, cached_list); +- overflow = (++vm_object_cached_count > vm_object_cached_max); ++ vm_object_cached_count++; + vm_object_cached_pages_update(object->resident_page_count); + vm_object_cache_unlock(); + +- vm_object_deactivate_pages(object); + vm_object_unlock(object); ++ return; ++ } + +- /* +- * If we didn't overflow, or if the queue has +- * been reduced back to below the specified +- * minimum, then quit. +- */ +- if (!overflow) +- return; +- +- while (TRUE) { +- vm_object_cache_lock(); +- if (vm_object_cached_count <= +- vm_object_cached_max) { +- vm_object_cache_unlock(); +- return; +- } +- +- /* +- * If we must trim down the queue, take +- * the first object, and proceed to +- * terminate it instead of the original +- * object. Have to wait for pager init. +- * if it's in progress. +- */ +- object= (vm_object_t) +- queue_first(&vm_object_cached_list); +- vm_object_lock(object); +- +- if (!(object->pager_created && +- !object->pager_initialized)) { +- +- /* +- * Ok to terminate, hang on to lock. +- */ +- break; +- } +- +- vm_object_assert_wait(object, +- VM_OBJECT_EVENT_INITIALIZED, FALSE); +- vm_object_unlock(object); +- vm_object_cache_unlock(); +- thread_block((void (*)()) 0); +- +- /* +- * Continue loop to check if cache still +- * needs to be trimmed. +- */ +- } ++ if (object->pager_created && ++ !object->pager_initialized) { + + /* +- * Actually remove object from cache. ++ * Have to wait for initialization. ++ * Put reference back and retry ++ * when it's initialized. + */ + +- queue_remove(&vm_object_cached_list, object, +- vm_object_t, cached_list); +- vm_object_cached_count--; +- +- assert(object->ref_count == 0); +- } +- else { +- if (object->pager_created && +- !object->pager_initialized) { +- +- /* +- * Have to wait for initialization. +- * Put reference back and retry +- * when it's initialized. +- */ +- object->ref_count++; +- vm_object_assert_wait(object, +- VM_OBJECT_EVENT_INITIALIZED, FALSE); +- vm_object_unlock(object); +- vm_object_cache_unlock(); +- thread_block((void (*)()) 0); +- continue; +- } ++ object->ref_count++; ++ vm_object_assert_wait(object, ++ VM_OBJECT_EVENT_INITIALIZED, FALSE); ++ vm_object_unlock(object); ++ vm_object_cache_unlock(); ++ thread_block((void (*)()) 0); ++ continue; + } + + /* +@@ -530,8 +487,6 @@ void vm_object_deallocate( + } + } + +-boolean_t vm_object_terminate_remove_all = FALSE; +- + /* + * Routine: vm_object_terminate + * Purpose: +@@ -884,28 +839,6 @@ kern_return_t memory_object_destroy( + } + + /* +- * vm_object_deactivate_pages +- * +- * Deactivate all pages in the specified object. (Keep its pages +- * in memory even though it is no longer referenced.) +- * +- * The object must be locked. +- */ +-void vm_object_deactivate_pages( +- register vm_object_t object) +-{ +- register vm_page_t p; +- +- queue_iterate(&object->memq, p, vm_page_t, listq) { +- vm_page_lock_queues(); +- if (!p->busy) +- vm_page_deactivate(p); +- vm_page_unlock_queues(); +- } +-} +- +- +-/* + * Routine: vm_object_pmap_protect + * + * Purpose: +@@ -2761,7 +2694,7 @@ void vm_object_page_remove( + * It balances vm_object_lookup vs iteration. + */ + +- if (atop(end - start) < (unsigned)object->resident_page_count/16) { ++ if (atop(end - start) < object->resident_page_count/16) { + vm_object_page_remove_lookup++; + + for (; start < end; start += PAGE_SIZE) { +@@ -2989,7 +2922,7 @@ void vm_object_print( + + iprintf("Object 0x%X: size=0x%X", + (vm_offset_t) object, (vm_offset_t) object->size); +- printf(", %d references, %d resident pages,", object->ref_count, ++ printf(", %d references, %lu resident pages,", object->ref_count, + object->resident_page_count); + printf(" %d absent pages,", object->absent_count); + printf(" %d paging ops\n", object->paging_in_progress); +diff --git a/vm/vm_object.h b/vm/vm_object.h +index adeff65..9579879 100644 +--- a/vm/vm_object.h ++++ b/vm/vm_object.h +@@ -72,7 +72,7 @@ struct vm_object { + */ + + int ref_count; /* Number of references */ +- int resident_page_count; ++ unsigned long resident_page_count; + /* number of resident pages */ + + struct vm_object *copy; /* Object that should receive +@@ -169,6 +169,7 @@ vm_object_t kernel_object; /* the single kernel object */ + + extern void vm_object_bootstrap(void); + extern void vm_object_init(void); ++extern void vm_object_collect(vm_object_t); + extern void vm_object_terminate(vm_object_t); + extern vm_object_t vm_object_allocate(vm_size_t); + extern void vm_object_reference(vm_object_t); +@@ -280,6 +281,10 @@ extern void vm_object_pager_wakeup(ipc_port_t pager); + * Routines implemented as macros + */ + ++#define vm_object_collectable(object) \ ++ (((object)->ref_count == 0) \ ++ && ((object)->resident_page_count == 0)) ++ + #define vm_object_paging_begin(object) \ + ((object)->paging_in_progress++) + +diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c +index 661675f..eb75b97 100644 +--- a/vm/vm_pageout.c ++++ b/vm/vm_pageout.c +@@ -750,7 +750,12 @@ void vm_pageout_scan() + reclaim_page: + vm_page_free(m); + vm_page_unlock_queues(); +- vm_object_unlock(object); ++ ++ if (vm_object_collectable(object)) ++ vm_object_collect(object); ++ else ++ vm_object_unlock(object); ++ + continue; + } + +diff --git a/vm/vm_resident.c b/vm/vm_resident.c +index 7906b58..66ab51f 100644 +--- a/vm/vm_resident.c ++++ b/vm/vm_resident.c +@@ -523,7 +523,7 @@ void vm_page_insert( + */ + + object->resident_page_count++; +- assert(object->resident_page_count >= 0); ++ assert(object->resident_page_count != 0); + + if (object->can_persist && (object->ref_count == 0)) + vm_object_cached_pages_update(1); +@@ -630,7 +630,7 @@ void vm_page_replace( + */ + + object->resident_page_count++; +- assert(object->resident_page_count >= 0); ++ assert(object->resident_page_count != 0); + + if (object->can_persist && (object->ref_count == 0)) + vm_object_cached_pages_update(1); +-- +2.1.4 + diff --git a/debian/patches/vm-cache-policy0002-Fix-page-cache-accounting.patch b/debian/patches/vm-cache-policy0002-Fix-page-cache-accounting.patch new file mode 100644 index 0000000..8f17aa9 --- /dev/null +++ b/debian/patches/vm-cache-policy0002-Fix-page-cache-accounting.patch @@ -0,0 +1,180 @@ +From c774e89387a43d737abbdd99781a294c1cceebb2 Mon Sep 17 00:00:00 2001 +From: Richard Braun <rbraun@sceen.net> +Date: Sun, 7 Feb 2016 14:08:24 +0100 +Subject: [PATCH gnumach 2/2] Fix page cache accounting + +* vm/vm_object.c (vm_object_bootstrap): Set template object `cached' +member to FALSE. +(vm_object_cache_add, vm_object_cache_remove): New functions. +(vm_object_collect, vm_object_deallocate, vm_object_lookup, +vm_object_lookup_name, vm_object_destroy): Use new cache management functions. +(vm_object_terminate, vm_object_collapse): Make sure object isn't cached. +* vm/vm_object.h (struct vm_object): New `cached' member. +--- + vm/vm_object.c | 70 +++++++++++++++++++++++++++++++++------------------------- + vm/vm_object.h | 3 ++- + 2 files changed, 42 insertions(+), 31 deletions(-) + +diff --git a/vm/vm_object.c b/vm/vm_object.c +index 9a019f6..526b6f3 100644 +--- a/vm/vm_object.c ++++ b/vm/vm_object.c +@@ -300,6 +300,7 @@ void vm_object_bootstrap(void) + + vm_object_template.paging_in_progress = 0; + vm_object_template.can_persist = FALSE; ++ vm_object_template.cached = FALSE; + vm_object_template.internal = TRUE; + vm_object_template.temporary = TRUE; + vm_object_template.alive = TRUE; +@@ -345,6 +346,33 @@ void vm_object_init(void) + IKOT_PAGING_NAME); + } + ++/* ++ * Object cache management functions. ++ * ++ * Both the cache and the object must be locked ++ * before calling these functions. ++ */ ++ ++static void vm_object_cache_add( ++ vm_object_t object) ++{ ++ assert(!object->cached); ++ queue_enter(&vm_object_cached_list, object, vm_object_t, cached_list); ++ vm_object_cached_count++; ++ vm_object_cached_pages_update(object->resident_page_count); ++ object->cached = TRUE; ++} ++ ++static void vm_object_cache_remove( ++ vm_object_t object) ++{ ++ assert(object->cached); ++ queue_remove(&vm_object_cached_list, object, vm_object_t, cached_list); ++ vm_object_cached_count--; ++ vm_object_cached_pages_update(-object->resident_page_count); ++ object->cached = FALSE; ++} ++ + void vm_object_collect( + register vm_object_t object) + { +@@ -368,7 +396,7 @@ void vm_object_collect( + return; + } + +- queue_remove(&vm_object_cached_list, object, vm_object_t, cached_list); ++ vm_object_cache_remove(object); + vm_object_terminate(object); + } + +@@ -435,12 +463,8 @@ void vm_object_deallocate( + * it in the cache. + */ + if (object->can_persist && (object->resident_page_count > 0)) { +- queue_enter(&vm_object_cached_list, object, +- vm_object_t, cached_list); +- vm_object_cached_count++; +- vm_object_cached_pages_update(object->resident_page_count); ++ vm_object_cache_add(object); + vm_object_cache_unlock(); +- + vm_object_unlock(object); + return; + } +@@ -601,6 +625,7 @@ void vm_object_terminate( + + assert(object->ref_count == 0); + assert(object->paging_in_progress == 0); ++ assert(!object->cached); + + /* + * Throw away port rights... note that they may +@@ -1803,12 +1828,8 @@ vm_object_t vm_object_lookup( + + assert(object->alive); + +- if (object->ref_count == 0) { +- queue_remove(&vm_object_cached_list, object, +- vm_object_t, cached_list); +- vm_object_cached_count--; +- vm_object_cached_pages_update(-object->resident_page_count); +- } ++ if (object->ref_count == 0) ++ vm_object_cache_remove(object); + + object->ref_count++; + vm_object_unlock(object); +@@ -1835,12 +1856,8 @@ vm_object_t vm_object_lookup_name( + + assert(object->alive); + +- if (object->ref_count == 0) { +- queue_remove(&vm_object_cached_list, object, +- vm_object_t, cached_list); +- vm_object_cached_count--; +- vm_object_cached_pages_update(-object->resident_page_count); +- } ++ if (object->ref_count == 0) ++ vm_object_cache_remove(object); + + object->ref_count++; + vm_object_unlock(object); +@@ -1872,12 +1889,8 @@ void vm_object_destroy( + + object = (vm_object_t) pager->ip_kobject; + vm_object_lock(object); +- if (object->ref_count == 0) { +- queue_remove(&vm_object_cached_list, object, +- vm_object_t, cached_list); +- vm_object_cached_count--; +- vm_object_cached_pages_update(-object->resident_page_count); +- } ++ if (object->ref_count == 0) ++ vm_object_cache_remove(object); + object->ref_count++; + + object->can_persist = FALSE; +@@ -2026,12 +2039,8 @@ restart: + + if ((object != VM_OBJECT_NULL) && !must_init) { + vm_object_lock(object); +- if (object->ref_count == 0) { +- queue_remove(&vm_object_cached_list, object, +- vm_object_t, cached_list); +- vm_object_cached_count--; +- vm_object_cached_pages_update(-object->resident_page_count); +- } ++ if (object->ref_count == 0) ++ vm_object_cache_remove(object); + object->ref_count++; + vm_object_unlock(object); + +@@ -2566,6 +2575,7 @@ void vm_object_collapse( + ); + + assert(backing_object->alive); ++ assert(!backing_object->cached); + backing_object->alive = FALSE; + vm_object_unlock(backing_object); + +diff --git a/vm/vm_object.h b/vm/vm_object.h +index 9579879..6b9f0bc 100644 +--- a/vm/vm_object.h ++++ b/vm/vm_object.h +@@ -148,8 +148,9 @@ struct vm_object { + */ + /* boolean_t */ use_shared_copy : 1,/* Use shared (i.e., + * delayed) copy on write */ +- /* boolean_t */ shadowed: 1; /* Shadow may exist */ ++ /* boolean_t */ shadowed: 1, /* Shadow may exist */ + ++ /* boolean_t */ cached: 1; /* Object is cached */ + queue_chain_t cached_list; /* Attachment point for the list + * of objects cached as a result + * of their can_persist value +-- +2.1.4 + |