diff options
author | Justus Winter <4winter@informatik.uni-hamburg.de> | 2015-08-18 16:48:48 +0200 |
---|---|---|
committer | Justus Winter <4winter@informatik.uni-hamburg.de> | 2015-08-18 16:48:48 +0200 |
commit | 32cee4938cef0766b22d3e799393c78c997d72a7 (patch) | |
tree | d650c33ecef8bce81fe4f049f6b42fc975c481ef /debian | |
parent | 8bbccb93cc9d854c199c2033051c63597949cfa6 (diff) |
add patch series
Diffstat (limited to 'debian')
4 files changed, 705 insertions, 0 deletions
diff --git a/debian/patches/fix-locking0001-vm-collapse-unreachable-branch-into-assertion.patch b/debian/patches/fix-locking0001-vm-collapse-unreachable-branch-into-assertion.patch new file mode 100644 index 0000000..de441e8 --- /dev/null +++ b/debian/patches/fix-locking0001-vm-collapse-unreachable-branch-into-assertion.patch @@ -0,0 +1,56 @@ +From ecf0b595a31a9e267ed8e67caeaafbeec054a840 Mon Sep 17 00:00:00 2001 +From: Justus Winter <4winter@informatik.uni-hamburg.de> +Date: Mon, 17 Aug 2015 21:33:33 +0200 +Subject: [PATCH gnumach 1/3] vm: collapse unreachable branch into assertion + +* vm/vm_object.c (vm_object_collapse): Collapse unreachable branch +into assertion. +--- + vm/vm_object.c | 31 +++---------------------------- + 1 file changed, 3 insertions(+), 28 deletions(-) + +diff --git a/vm/vm_object.c b/vm/vm_object.c +index deac0c2..133181f 100644 +--- a/vm/vm_object.c ++++ b/vm/vm_object.c +@@ -2465,34 +2465,9 @@ void vm_object_collapse( + VM_PAGE_FREE(p); + } + else { +- if (pp != VM_PAGE_NULL) { +- /* +- * Parent has an absent page... +- * it's not being paged in, so +- * it must really be missing from +- * the parent. +- * +- * Throw out the absent page... +- * any faults looking for that +- * page will restart with the new +- * one. +- */ +- +- /* +- * This should never happen -- the +- * parent cannot have ever had an +- * external memory object, and thus +- * cannot have absent pages. +- */ +- panic("vm_object_collapse: bad case"); +- +- VM_PAGE_FREE(pp); +- +- /* +- * Fall through to move the backing +- * object's page up. +- */ +- } ++ assert(pp == VM_PAGE_NULL || ! ++ "vm_object_collapse: bad case"); ++ + /* + * Parent now has no page. + * Move the backing object's page up. +-- +2.1.4 + diff --git a/debian/patches/fix-locking0002-i386-fix-panic-message.patch b/debian/patches/fix-locking0002-i386-fix-panic-message.patch new file mode 100644 index 0000000..d21fb18 --- /dev/null +++ b/debian/patches/fix-locking0002-i386-fix-panic-message.patch @@ -0,0 +1,27 @@ +From 5d3ea1ab58d80f1e0c01a1a441c2847a1334a9ff Mon Sep 17 00:00:00 2001 +From: Justus Winter <4winter@informatik.uni-hamburg.de> +Date: Mon, 17 Aug 2015 21:34:25 +0200 +Subject: [PATCH gnumach 2/3] i386: fix panic message + +* i386/intel/pmap.c (pmap_page_protect): Fix function name in panic +message. +--- + i386/intel/pmap.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c +index dc9f360..5cfffb5 100644 +--- a/i386/intel/pmap.c ++++ b/i386/intel/pmap.c +@@ -1597,7 +1597,7 @@ void pmap_page_protect( + * Remove the mapping, collecting any modify bits. + */ + if (*pte & INTEL_PTE_WIRED) +- panic("pmap_remove_all removing a wired page"); ++ panic("pmap_page_protect removing a wired page"); + + { + int i = ptes_per_vm_page; +-- +2.1.4 + diff --git a/debian/patches/fix-locking0003-vm-fix-locking-issues.patch b/debian/patches/fix-locking0003-vm-fix-locking-issues.patch new file mode 100644 index 0000000..4e1e85e --- /dev/null +++ b/debian/patches/fix-locking0003-vm-fix-locking-issues.patch @@ -0,0 +1,619 @@ +From b2b5f7355a3293bc5d02a43f1341a889cabd93fa Mon Sep 17 00:00:00 2001 +From: Justus Winter <4winter@informatik.uni-hamburg.de> +Date: Sun, 16 Aug 2015 12:54:41 +0200 +Subject: [PATCH gnumach 3/3] vm: fix locking issues + +Avoid accessing fields of `vm_object' objects without having it +locked. These problems have been found using a code transformation +done by Coccinelle that instrumented all accesses with a runtime +check, and manual inspection. + +* vm/memory_object.c (memory_object_data_supply): Avoid accessing +fields without the lock. +* vm/vm_fault.c (vm_fault_page): Likewise. +* vm/vm_map.c (vm_map_submap): Properly lock `object'. +(vm_map_copy_overwrite): Avoid accessing fields without the lock. +(vm_map_copyin): Lock `src_object'. +* vm/vm_object.c (_vm_object_setup): Likewise. +(vm_object_allocate): Likewise. +(vm_object_terminate): Avoid accessing fields without the lock. +(vm_object_copy_slowly): Lock `new_object'. +(vm_object_copy_delayed): Lock `src_object' earlier, lock `new_copy'. +(vm_object_shadow): Lock `result'. +(vm_object_enter): Properly lock `object'. Avoid accessing fields +without the lock. +* vm/vm_pageout.c (vm_pageout_setup): Properly lock `old_object'. +--- + vm/memory_object.c | 17 +++++++---- + vm/vm_fault.c | 39 ++++++++++++++++++------ + vm/vm_map.c | 55 ++++++++++++++++++++++++---------- + vm/vm_object.c | 88 ++++++++++++++++++++++++++++++++++++++---------------- + vm/vm_pageout.c | 9 ++---- + 5 files changed, 145 insertions(+), 63 deletions(-) + +diff --git a/vm/memory_object.c b/vm/memory_object.c +index 097ed23..0a07429 100644 +--- a/vm/memory_object.c ++++ b/vm/memory_object.c +@@ -101,6 +101,7 @@ kern_return_t memory_object_data_supply( + vm_page_t *page_list; + boolean_t was_absent; + vm_map_copy_t orig_copy = data_copy; ++ pager_request_t pager_request; + + /* + * Look for bogus arguments +@@ -270,6 +271,7 @@ retry_lookup: + /* + * Send reply if one was requested. + */ ++ pager_request = object->pager_request; + vm_object_paging_end(object); + vm_object_unlock(object); + +@@ -279,7 +281,7 @@ retry_lookup: + if (IP_VALID(reply_to)) { + memory_object_supply_completed( + reply_to, reply_to_type, +- object->pager_request, ++ pager_request, + original_offset, + original_length, + result, +@@ -788,7 +790,9 @@ MACRO_END + continue; + + case MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN: +- case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN: ++ case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN:; ++ vm_offset_t object_paging_offset; ++ + /* + * The clean and return cases are similar. + * +@@ -811,6 +815,7 @@ MACRO_END + PAGEOUT_PAGES; + } + ++ object_paging_offset = object->paging_offset; + vm_object_unlock(object); + + /* +@@ -821,8 +826,7 @@ MACRO_END + if (new_object == VM_OBJECT_NULL) { + new_object = vm_object_allocate(original_size); + new_offset = 0; +- paging_offset = m->offset + +- object->paging_offset; ++ paging_offset = m->offset + object_paging_offset; + pageout_action = page_lock_result; + } + +@@ -831,7 +835,7 @@ MACRO_END + * new object. + */ + m = vm_pageout_setup(m, +- m->offset + object->paging_offset, ++ m->offset + object_paging_offset, + new_object, + new_offset, + should_flush); +@@ -859,11 +863,12 @@ MACRO_END + } + + if (IP_VALID(reply_to)) { ++ pager_request_t pager_request = object->pager_request; + vm_object_unlock(object); + + /* consumes our naked send-once/send right for reply_to */ + (void) memory_object_lock_completed(reply_to, reply_to_type, +- object->pager_request, original_offset, original_size); ++ pager_request, original_offset, original_size); + + vm_object_lock(object); + } +diff --git a/vm/vm_fault.c b/vm/vm_fault.c +index 46779f6..101ebce 100644 +--- a/vm/vm_fault.c ++++ b/vm/vm_fault.c +@@ -229,6 +229,17 @@ vm_fault_return_t vm_fault_page( + boolean_t look_for_page; + vm_prot_t access_required; + ++ /* We need to unlock an object before making requests to a ++ memory manager. We use this object to temporarily store ++ object attributes needed for the request to avoid accessing ++ the object while it is unlocked. */ ++ struct ++ { ++ struct ipc_port * pager; ++ pager_request_t pager_request; ++ vm_offset_t paging_offset; ++ } obj; ++ + if (resume) { + vm_fault_state_t *state = + (vm_fault_state_t *) current_thread()->ith_other; +@@ -510,11 +521,16 @@ vm_fault_return_t vm_fault_page( + + new_unlock_request = m->unlock_request = + (access_required | m->unlock_request); ++ obj.pager = object->pager; ++ obj.pager_request = ++ object->pager_request; ++ obj.paging_offset = ++ object->paging_offset; + vm_object_unlock(object); + if ((rc = memory_object_data_unlock( +- object->pager, +- object->pager_request, +- offset + object->paging_offset, ++ obj.pager, ++ obj.pager_request, ++ offset + obj.paging_offset, + PAGE_SIZE, + new_unlock_request)) + != KERN_SUCCESS) { +@@ -633,6 +649,11 @@ vm_fault_return_t vm_fault_page( + m->absent = TRUE; + object->absent_count++; + ++ /* Save attributes for the request. */ ++ obj.pager = object->pager; ++ obj.pager_request = object->pager_request; ++ obj.paging_offset = object->paging_offset; ++ + /* + * We have a busy page, so we can + * release the object lock. +@@ -647,16 +668,16 @@ vm_fault_return_t vm_fault_page( + vm_stat_sample(SAMPLED_PC_VM_PAGEIN_FAULTS); + current_task()->pageins++; + +- if ((rc = memory_object_data_request(object->pager, +- object->pager_request, +- m->offset + object->paging_offset, ++ if ((rc = memory_object_data_request(obj.pager, ++ obj.pager_request, ++ m->offset + obj.paging_offset, + PAGE_SIZE, access_required)) != KERN_SUCCESS) { + if (rc != MACH_SEND_INTERRUPTED) + printf("%s(0x%p, 0x%p, 0x%lx, 0x%x, 0x%x) failed, %x\n", + "memory_object_data_request", +- object->pager, +- object->pager_request, +- m->offset + object->paging_offset, ++ obj.pager, ++ obj.pager_request, ++ m->offset + obj.paging_offset, + PAGE_SIZE, access_required, rc); + /* + * Don't want to leave a busy page around, +diff --git a/vm/vm_map.c b/vm/vm_map.c +index ae3ce21..9098dfd 100644 +--- a/vm/vm_map.c ++++ b/vm/vm_map.c +@@ -1182,16 +1182,20 @@ kern_return_t vm_map_submap( + + if ((entry->vme_start == start) && (entry->vme_end == end) && + (!entry->is_sub_map) && +- ((object = entry->object.vm_object) == vm_submap_object) && +- (object->resident_page_count == 0) && +- (object->copy == VM_OBJECT_NULL) && +- (object->shadow == VM_OBJECT_NULL) && +- (!object->pager_created)) { +- entry->object.vm_object = VM_OBJECT_NULL; +- vm_object_deallocate(object); +- entry->is_sub_map = TRUE; +- vm_map_reference(entry->object.sub_map = submap); +- result = KERN_SUCCESS; ++ ((object = entry->object.vm_object) == vm_submap_object)) { ++ vm_object_lock(object); ++ if ((object->resident_page_count == 0) && ++ (object->copy == VM_OBJECT_NULL) && ++ (object->shadow == VM_OBJECT_NULL) && ++ (!object->pager_created)) { ++ vm_object_unlock(object); ++ entry->object.vm_object = VM_OBJECT_NULL; ++ vm_object_deallocate(object); ++ entry->is_sub_map = TRUE; ++ vm_map_reference(entry->object.sub_map = submap); ++ result = KERN_SUCCESS; ++ } else ++ vm_object_unlock(object); + } + vm_map_unlock(map); + +@@ -2122,6 +2126,7 @@ start_pass_1: + for (entry = tmp_entry;;) { + vm_size_t sub_size = (entry->vme_end - entry->vme_start); + vm_map_entry_t next = entry->vme_next; ++ vm_object_t object; + + if ( ! (entry->protection & VM_PROT_WRITE)) { + vm_map_unlock(dst_map); +@@ -2157,10 +2162,13 @@ start_pass_1: + /* + * Check for permanent objects in the destination. + */ +- +- if ((entry->object.vm_object != VM_OBJECT_NULL) && +- !entry->object.vm_object->temporary) +- contains_permanent_objects = TRUE; ++ object = entry->object.vm_object; ++ if ((object != VM_OBJECT_NULL) ++ && ! contains_permanent_objects) { ++ vm_object_lock(object); ++ contains_permanent_objects = object->temporary; ++ vm_object_unlock(object); ++ } + + size -= sub_size; + entry = next; +@@ -2220,6 +2228,7 @@ start_pass_1: + vm_map_entry_t copy_entry = vm_map_copy_first_entry(copy); + vm_size_t copy_size = (copy_entry->vme_end - copy_entry->vme_start); + vm_object_t object; ++ int temporary; + + entry = tmp_entry; + size = (entry->vme_end - entry->vme_start); +@@ -2275,8 +2284,15 @@ start_pass_1: + */ + + object = entry->object.vm_object; ++ temporary = 0; ++ if (object != VM_OBJECT_NULL) { ++ vm_object_lock(object); ++ temporary = object->temporary; ++ vm_object_unlock(object); ++ } ++ + if (!entry->is_shared && +- ((object == VM_OBJECT_NULL) || object->temporary)) { ++ ((object == VM_OBJECT_NULL) || temporary)) { + vm_object_t old_object = entry->object.vm_object; + vm_offset_t old_offset = entry->offset; + +@@ -3219,11 +3235,15 @@ kern_return_t vm_map_copyin( + /* + * Attempt non-blocking copy-on-write optimizations. + */ +- ++ if (src_object) ++ vm_object_lock(src_object); + if (src_destroy && + (src_object == VM_OBJECT_NULL || + (src_object->temporary && !src_object->use_shared_copy))) + { ++ if (src_object) ++ vm_object_unlock(src_object); ++ + /* + * If we are destroying the source, and the object + * is temporary, and not shared writable, +@@ -3243,6 +3263,9 @@ kern_return_t vm_map_copyin( + goto CopySuccessful; + } + ++ if (src_object) ++ vm_object_unlock(src_object); ++ + if (!was_wired && + vm_object_copy_temporary( + &new_entry->object.vm_object, +diff --git a/vm/vm_object.c b/vm/vm_object.c +index 133181f..36dbd8b 100644 +--- a/vm/vm_object.c ++++ b/vm/vm_object.c +@@ -217,9 +217,11 @@ static void _vm_object_setup( + vm_size_t size) + { + *object = vm_object_template; +- queue_init(&object->memq); + vm_object_lock_init(object); ++ vm_object_lock(object); ++ queue_init(&object->memq); + object->size = size; ++ vm_object_unlock(object); + } + + vm_object_t _vm_object_allocate( +@@ -244,8 +246,11 @@ vm_object_t vm_object_allocate( + port = ipc_port_alloc_kernel(); + if (port == IP_NULL) + panic("vm_object_allocate"); ++ ++ vm_object_lock(object); + object->pager_name = port; + ipc_kobject_set(port, (ipc_kobject_t) object, IKOT_PAGING_NAME); ++ vm_object_unlock(object); + + return object; + } +@@ -540,6 +545,12 @@ void vm_object_terminate( + { + vm_page_t p; + vm_object_t shadow_object; ++ struct ipc_port *pager; ++ pager_request_t pager_request; ++ struct ipc_port *pager_name; ++#if MACH_PAGEMAP ++ vm_external_t existence_info; ++#endif /* MACH_PAGEMAP */ + + /* + * Make sure the object isn't already being terminated +@@ -637,20 +648,26 @@ void vm_object_terminate( + * using memory_object_terminate. + */ + ++ /* Copy attributes while object is locked. */ ++ pager = object->pager; ++ pager_request = object->pager_request; ++ pager_name = object->pager_name; ++#if MACH_PAGEMAP ++ existence_info = object->existence_info; ++#endif /* MACH_PAGEMAP */ ++ + vm_object_unlock(object); + +- if (object->pager != IP_NULL) { ++ if (pager != IP_NULL) { + /* consumes our rights for pager, pager_request, pager_name */ +- memory_object_release(object->pager, +- object->pager_request, +- object->pager_name); +- } else if (object->pager_name != IP_NULL) { ++ memory_object_release(pager, pager_request, pager_name); ++ } else if (pager_name != IP_NULL) { + /* consumes our right for pager_name */ +- ipc_port_dealloc_kernel(object->pager_name); ++ ipc_port_dealloc_kernel(pager_name); + } + + #if MACH_PAGEMAP +- vm_external_destroy(object->existence_info); ++ vm_external_destroy(existence_info); + #endif /* MACH_PAGEMAP */ + + /* +@@ -1080,6 +1097,7 @@ kern_return_t vm_object_copy_slowly( + */ + + new_object = vm_object_allocate(size); ++ vm_object_lock(new_object); + new_offset = 0; + + assert(size == trunc_page(size)); /* Will the loop terminate? */ +@@ -1171,6 +1189,7 @@ kern_return_t vm_object_copy_slowly( + + case VM_FAULT_INTERRUPTED: + vm_page_free(new_page); ++ vm_object_unlock(new_object); + vm_object_deallocate(new_object); + vm_object_deallocate(src_object); + *_result_object = VM_OBJECT_NULL; +@@ -1186,6 +1205,7 @@ kern_return_t vm_object_copy_slowly( + */ + + vm_page_free(new_page); ++ vm_object_unlock(new_object); + vm_object_deallocate(new_object); + vm_object_deallocate(src_object); + *_result_object = VM_OBJECT_NULL; +@@ -1200,6 +1220,7 @@ kern_return_t vm_object_copy_slowly( + + vm_object_deallocate(src_object); + *_result_object = new_object; ++ vm_object_unlock(new_object); + return KERN_SUCCESS; + } + +@@ -1474,14 +1495,11 @@ vm_object_t vm_object_copy_delayed( + * must be done carefully, to avoid deadlock. + */ + +- /* +- * Allocate a new copy object before locking, even +- * though we may not need it later. +- */ ++ vm_object_lock(src_object); + + new_copy = vm_object_allocate(src_object->size); + +- vm_object_lock(src_object); ++ vm_object_lock(new_copy); + + /* + * See whether we can reuse the result of a previous +@@ -1519,7 +1537,7 @@ vm_object_t vm_object_copy_delayed( + old_copy->ref_count++; + vm_object_unlock(old_copy); + vm_object_unlock(src_object); +- ++ vm_object_unlock(new_copy); + vm_object_deallocate(new_copy); + + return old_copy; +@@ -1574,7 +1592,7 @@ vm_object_t vm_object_copy_delayed( + } + + vm_object_unlock(src_object); +- ++ vm_object_unlock(new_copy); + return new_copy; + } + +@@ -1711,6 +1729,8 @@ void vm_object_shadow( + if ((result = vm_object_allocate(length)) == VM_OBJECT_NULL) + panic("vm_object_shadow: no object for shadowing"); + ++ vm_object_lock(result); ++ + /* + * The new object shadows the source object, adding + * a reference to it. Our caller changes his reference +@@ -1733,6 +1753,7 @@ void vm_object_shadow( + + *offset = 0; + *object = result; ++ vm_object_unlock(result); + } + + /* +@@ -2053,8 +2074,10 @@ restart: + object = (po == IKOT_PAGER) ? (vm_object_t) pager->ip_kobject + : VM_OBJECT_NULL; + +- if ((object != VM_OBJECT_NULL) && !must_init) { ++ if (object != VM_OBJECT_NULL) + vm_object_lock(object); ++ ++ if ((object != VM_OBJECT_NULL) && !must_init) { + if (object->ref_count == 0) { + queue_remove(&vm_object_cached_list, object, + vm_object_t, cached_list); +@@ -2062,10 +2085,9 @@ restart: + vm_object_cached_pages_update(-object->resident_page_count); + } + object->ref_count++; +- vm_object_unlock(object); +- + vm_stat.hits++; + } ++ + assert((object == VM_OBJECT_NULL) || (object->ref_count > 0) || + ((object->paging_in_progress != 0) && internal)); + +@@ -2085,6 +2107,10 @@ restart: + return(object); + + if (must_init) { ++ vm_size_t pager_size; ++ pager_request_t pager_request; ++ struct ipc_port *pager_name; ++ + /* + * Copy the naked send right we were given. + */ +@@ -2112,6 +2138,11 @@ restart: + * Let the pager know we're using it. + */ + ++ /* Store attributes while we're holding the lock. */ ++ pager_size = object->size; ++ pager_request = object->pager_request; ++ pager_name = object->pager_name; ++ + if (internal) { + /* acquire a naked send right for the DMM */ + ipc_port_t DMM = memory_manager_default_reference(); +@@ -2123,12 +2154,15 @@ restart: + /* default-pager objects are ready immediately */ + object->pager_ready = TRUE; + ++ /* Unlock object across call to memory manager. */ ++ vm_object_unlock(object); ++ + /* consumes the naked send right for DMM */ + (void) memory_object_create(DMM, + pager, +- object->size, +- object->pager_request, +- object->pager_name, ++ pager_size, ++ pager_request, ++ pager_name, + PAGE_SIZE); + } else { + /* the object is external and not temporary */ +@@ -2138,13 +2172,16 @@ restart: + /* user pager objects are not ready until marked so */ + object->pager_ready = FALSE; + ++ /* Unlock object across call to memory manager. */ ++ vm_object_unlock(object); ++ + (void) memory_object_init(pager, +- object->pager_request, +- object->pager_name, ++ pager_request, ++ pager_name, + PAGE_SIZE); +- + } + ++ /* Object was unlocked across call to memory manager. */ + vm_object_lock(object); + object->pager_initialized = TRUE; + +@@ -2152,9 +2189,8 @@ restart: + object->pager_ready = TRUE; + + vm_object_wakeup(object, VM_OBJECT_EVENT_INITIALIZED); +- } else { +- vm_object_lock(object); + } ++ + /* + * [At this point, the object must be locked] + */ +diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c +index 51a6a0d..b676c7b 100644 +--- a/vm/vm_pageout.c ++++ b/vm/vm_pageout.c +@@ -252,6 +252,8 @@ vm_pageout_setup( + vm_object_unlock(new_object); + } + ++ vm_object_lock(old_object); ++ + if (flush) { + /* + * Create a place-holder page where the old one was, +@@ -262,7 +264,6 @@ vm_pageout_setup( + == VM_PAGE_NULL) + vm_page_more_fictitious(); + +- vm_object_lock(old_object); + vm_page_lock_queues(); + vm_page_remove(m); + vm_page_unlock_queues(); +@@ -281,8 +282,6 @@ vm_pageout_setup( + VM_EXTERNAL_STATE_EXISTS); + #endif /* MACH_PAGEMAP */ + +- vm_object_unlock(old_object); +- + vm_object_lock(new_object); + + /* +@@ -305,7 +304,6 @@ vm_pageout_setup( + */ + vm_page_copy(m, new_m); + +- vm_object_lock(old_object); + m->dirty = FALSE; + pmap_clear_modify(m->phys_addr); + +@@ -328,8 +326,6 @@ vm_pageout_setup( + VM_EXTERNAL_STATE_EXISTS); + #endif /* MACH_PAGEMAP */ + +- vm_object_unlock(old_object); +- + vm_object_lock(new_object); + + /* +@@ -383,6 +379,7 @@ vm_pageout_setup( + */ + + vm_object_unlock(new_object); ++ vm_object_unlock(old_object); + + /* + * Return the placeholder page to simplify cleanup. +-- +2.1.4 + diff --git a/debian/patches/series b/debian/patches/series index 029e46a..014c6d8 100644 --- a/debian/patches/series +++ b/debian/patches/series @@ -13,3 +13,6 @@ sysenter0001-yyy-sysenter-prototype.patch vm-cache-policy0001-VM-cache-policy-change.patch vm-cache-policy0002-vm-keep-track-of-clean-pages.patch vm-cache-policy0003-vm-evict-clean-pages-first.patch +fix-locking0001-vm-collapse-unreachable-branch-into-assertion.patch +fix-locking0002-i386-fix-panic-message.patch +fix-locking0003-vm-fix-locking-issues.patch |