summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJustus Winter <4winter@informatik.uni-hamburg.de>2015-08-20 21:50:30 +0200
committerJustus Winter <4winter@informatik.uni-hamburg.de>2015-08-20 21:50:30 +0200
commit50e3ba8bab9f1327d369e80d49f2d23aaa3efcf9 (patch)
tree9fbcdbfcc03fcc10ccc44fcee347de83238767cc
parente54b33e168da134076028212adea3300b169ef1f (diff)
add patch series
-rw-r--r--debian/patches/fix-locking0001-kern-really-zero-out-unused-simple-lock-info-entries.patch29
-rw-r--r--debian/patches/fix-locking0002-i386-enable-assertions.patch59
-rw-r--r--debian/patches/fix-locking0003-kern-bootstrap-tune-locking.patch30
-rw-r--r--debian/patches/fix-locking0004-vm-fix-locking-issues.patch619
-rw-r--r--debian/patches/fix-locking0005-ipc-fix-locking-issues.patch268
-rw-r--r--debian/patches/series5
6 files changed, 1010 insertions, 0 deletions
diff --git a/debian/patches/fix-locking0001-kern-really-zero-out-unused-simple-lock-info-entries.patch b/debian/patches/fix-locking0001-kern-really-zero-out-unused-simple-lock-info-entries.patch
new file mode 100644
index 0000000..ff635a2
--- /dev/null
+++ b/debian/patches/fix-locking0001-kern-really-zero-out-unused-simple-lock-info-entries.patch
@@ -0,0 +1,29 @@
+From 435cc1becf334f650dcdd0763cb0022f27f3f54a Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Wed, 19 Aug 2015 15:57:54 +0200
+Subject: [PATCH gnumach 1/5] kern: really zero-out unused simple lock info
+ entries
+
+* kern/lock.c (simple_unlock): Really zero-out unused simple lock info
+entries.
+---
+ kern/lock.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kern/lock.c b/kern/lock.c
+index 46c78da..1daf1b4 100644
+--- a/kern/lock.c
++++ b/kern/lock.c
+@@ -219,8 +219,8 @@ void simple_unlock(
+
+ simple_locks_info[i] = simple_locks_info[simple_locks_taken-1];
+ }
+- simple_locks_info[simple_locks_taken] = (struct simple_locks_info) {0};
+ simple_locks_taken--;
++ simple_locks_info[simple_locks_taken] = (struct simple_locks_info) {0};
+ }
+
+ #endif /* MACH_SLOCKS && NCPUS == 1 */
+--
+2.1.4
+
diff --git a/debian/patches/fix-locking0002-i386-enable-assertions.patch b/debian/patches/fix-locking0002-i386-enable-assertions.patch
new file mode 100644
index 0000000..271ad08
--- /dev/null
+++ b/debian/patches/fix-locking0002-i386-enable-assertions.patch
@@ -0,0 +1,59 @@
+From cf2eb5d84928952dbef9f9388139599d54217dab Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Wed, 19 Aug 2015 16:22:25 +0200
+Subject: [PATCH gnumach 2/5] i386: enable assertions
+
+* i386/intel/pmap.c (pmap_page_protect): Enable assertions.
+(phys_attribute_clear, phys_attribute_test): Likewise.
+---
+ i386/intel/pmap.c | 12 ++++--------
+ 1 file changed, 4 insertions(+), 8 deletions(-)
+
+diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c
+index 5cfffb5..cf7a736 100644
+--- a/i386/intel/pmap.c
++++ b/i386/intel/pmap.c
+@@ -1585,8 +1585,8 @@ void pmap_page_protect(
+ /*
+ * Consistency checks.
+ */
+- /* assert(*pte & INTEL_PTE_VALID); XXX */
+- /* assert(pte_to_phys(*pte) == phys); */
++ assert(*pte & INTEL_PTE_VALID);
++ assert(pte_to_pa(*pte) == phys);
+
+ /*
+ * Remove the mapping if new protection is NONE
+@@ -2463,13 +2463,11 @@ phys_attribute_clear(
+ va = pv_e->va;
+ pte = pmap_pte(pmap, va);
+
+-#if 0
+ /*
+ * Consistency checks.
+ */
+ assert(*pte & INTEL_PTE_VALID);
+- /* assert(pte_to_phys(*pte) == phys); */
+-#endif
++ assert(pte_to_pa(*pte) == phys);
+
+ /*
+ * Clear modify or reference bits.
+@@ -2556,13 +2554,11 @@ phys_attribute_test(
+ va = pv_e->va;
+ pte = pmap_pte(pmap, va);
+
+-#if 0
+ /*
+ * Consistency checks.
+ */
+ assert(*pte & INTEL_PTE_VALID);
+- /* assert(pte_to_phys(*pte) == phys); */
+-#endif
++ assert(pte_to_pa(*pte) == phys);
+ }
+
+ /*
+--
+2.1.4
+
diff --git a/debian/patches/fix-locking0003-kern-bootstrap-tune-locking.patch b/debian/patches/fix-locking0003-kern-bootstrap-tune-locking.patch
new file mode 100644
index 0000000..990a31f
--- /dev/null
+++ b/debian/patches/fix-locking0003-kern-bootstrap-tune-locking.patch
@@ -0,0 +1,30 @@
+From 0bae7342bf2275a210117bbcc4fa5a13523eaa81 Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Sat, 25 Jul 2015 02:15:31 +0200
+Subject: [PATCH gnumach 3/5] kern/bootstrap: tune locking
+
+* kern/bootstrap.c (boot_script_exec_cmd): Avoid holding the lock
+across the call to `thread_create'.
+---
+ kern/bootstrap.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kern/bootstrap.c b/kern/bootstrap.c
+index cb0f976..aad0fb1 100644
+--- a/kern/bootstrap.c
++++ b/kern/bootstrap.c
+@@ -719,10 +719,10 @@ boot_script_exec_cmd (void *hook, task_t task, char *path, int argc,
+ thread_t thread;
+ struct user_bootstrap_info info = { mod, argv, 0, };
+ simple_lock_init (&info.lock);
+- simple_lock (&info.lock);
+
+ err = thread_create ((task_t)task, &thread);
+ assert(err == 0);
++ simple_lock (&info.lock);
+ thread->saved.other = &info;
+ thread_start (thread, user_bootstrap);
+ err = thread_resume (thread);
+--
+2.1.4
+
diff --git a/debian/patches/fix-locking0004-vm-fix-locking-issues.patch b/debian/patches/fix-locking0004-vm-fix-locking-issues.patch
new file mode 100644
index 0000000..9a592ae
--- /dev/null
+++ b/debian/patches/fix-locking0004-vm-fix-locking-issues.patch
@@ -0,0 +1,619 @@
+From 85ec6f573feb5f2564e1b1ce0064f829c9790d6f Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Sun, 16 Aug 2015 12:54:41 +0200
+Subject: [PATCH gnumach 4/5] vm: fix locking issues
+
+Avoid accessing fields of `vm_object' objects without having it
+locked. These problems have been found using a code transformation
+done by Coccinelle that instrumented all accesses with a runtime
+check, and manual inspection.
+
+* vm/memory_object.c (memory_object_data_supply): Avoid accessing
+fields without the lock.
+* vm/vm_fault.c (vm_fault_page): Likewise.
+* vm/vm_map.c (vm_map_submap): Properly lock `object'.
+(vm_map_copy_overwrite): Avoid accessing fields without the lock.
+(vm_map_copyin): Lock `src_object'.
+* vm/vm_object.c (_vm_object_setup): Likewise.
+(vm_object_allocate): Likewise.
+(vm_object_terminate): Avoid accessing fields without the lock.
+(vm_object_copy_slowly): Lock `new_object'.
+(vm_object_copy_delayed): Lock `src_object' earlier, lock `new_copy'.
+(vm_object_shadow): Lock `result'.
+(vm_object_enter): Properly lock `object'. Avoid accessing fields
+without the lock.
+* vm/vm_pageout.c (vm_pageout_setup): Properly lock `old_object'.
+---
+ vm/memory_object.c | 17 +++++++----
+ vm/vm_fault.c | 39 ++++++++++++++++++------
+ vm/vm_map.c | 55 ++++++++++++++++++++++++----------
+ vm/vm_object.c | 88 ++++++++++++++++++++++++++++++++++++++----------------
+ vm/vm_pageout.c | 9 ++----
+ 5 files changed, 145 insertions(+), 63 deletions(-)
+
+diff --git a/vm/memory_object.c b/vm/memory_object.c
+index 097ed23..0a07429 100644
+--- a/vm/memory_object.c
++++ b/vm/memory_object.c
+@@ -101,6 +101,7 @@ kern_return_t memory_object_data_supply(
+ vm_page_t *page_list;
+ boolean_t was_absent;
+ vm_map_copy_t orig_copy = data_copy;
++ pager_request_t pager_request;
+
+ /*
+ * Look for bogus arguments
+@@ -270,6 +271,7 @@ retry_lookup:
+ /*
+ * Send reply if one was requested.
+ */
++ pager_request = object->pager_request;
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+
+@@ -279,7 +281,7 @@ retry_lookup:
+ if (IP_VALID(reply_to)) {
+ memory_object_supply_completed(
+ reply_to, reply_to_type,
+- object->pager_request,
++ pager_request,
+ original_offset,
+ original_length,
+ result,
+@@ -788,7 +790,9 @@ MACRO_END
+ continue;
+
+ case MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN:
+- case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN:
++ case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN:;
++ vm_offset_t object_paging_offset;
++
+ /*
+ * The clean and return cases are similar.
+ *
+@@ -811,6 +815,7 @@ MACRO_END
+ PAGEOUT_PAGES;
+ }
+
++ object_paging_offset = object->paging_offset;
+ vm_object_unlock(object);
+
+ /*
+@@ -821,8 +826,7 @@ MACRO_END
+ if (new_object == VM_OBJECT_NULL) {
+ new_object = vm_object_allocate(original_size);
+ new_offset = 0;
+- paging_offset = m->offset +
+- object->paging_offset;
++ paging_offset = m->offset + object_paging_offset;
+ pageout_action = page_lock_result;
+ }
+
+@@ -831,7 +835,7 @@ MACRO_END
+ * new object.
+ */
+ m = vm_pageout_setup(m,
+- m->offset + object->paging_offset,
++ m->offset + object_paging_offset,
+ new_object,
+ new_offset,
+ should_flush);
+@@ -859,11 +863,12 @@ MACRO_END
+ }
+
+ if (IP_VALID(reply_to)) {
++ pager_request_t pager_request = object->pager_request;
+ vm_object_unlock(object);
+
+ /* consumes our naked send-once/send right for reply_to */
+ (void) memory_object_lock_completed(reply_to, reply_to_type,
+- object->pager_request, original_offset, original_size);
++ pager_request, original_offset, original_size);
+
+ vm_object_lock(object);
+ }
+diff --git a/vm/vm_fault.c b/vm/vm_fault.c
+index 46779f6..101ebce 100644
+--- a/vm/vm_fault.c
++++ b/vm/vm_fault.c
+@@ -229,6 +229,17 @@ vm_fault_return_t vm_fault_page(
+ boolean_t look_for_page;
+ vm_prot_t access_required;
+
++ /* We need to unlock an object before making requests to a
++ memory manager. We use this object to temporarily store
++ object attributes needed for the request to avoid accessing
++ the object while it is unlocked. */
++ struct
++ {
++ struct ipc_port * pager;
++ pager_request_t pager_request;
++ vm_offset_t paging_offset;
++ } obj;
++
+ if (resume) {
+ vm_fault_state_t *state =
+ (vm_fault_state_t *) current_thread()->ith_other;
+@@ -510,11 +521,16 @@ vm_fault_return_t vm_fault_page(
+
+ new_unlock_request = m->unlock_request =
+ (access_required | m->unlock_request);
++ obj.pager = object->pager;
++ obj.pager_request =
++ object->pager_request;
++ obj.paging_offset =
++ object->paging_offset;
+ vm_object_unlock(object);
+ if ((rc = memory_object_data_unlock(
+- object->pager,
+- object->pager_request,
+- offset + object->paging_offset,
++ obj.pager,
++ obj.pager_request,
++ offset + obj.paging_offset,
+ PAGE_SIZE,
+ new_unlock_request))
+ != KERN_SUCCESS) {
+@@ -633,6 +649,11 @@ vm_fault_return_t vm_fault_page(
+ m->absent = TRUE;
+ object->absent_count++;
+
++ /* Save attributes for the request. */
++ obj.pager = object->pager;
++ obj.pager_request = object->pager_request;
++ obj.paging_offset = object->paging_offset;
++
+ /*
+ * We have a busy page, so we can
+ * release the object lock.
+@@ -647,16 +668,16 @@ vm_fault_return_t vm_fault_page(
+ vm_stat_sample(SAMPLED_PC_VM_PAGEIN_FAULTS);
+ current_task()->pageins++;
+
+- if ((rc = memory_object_data_request(object->pager,
+- object->pager_request,
+- m->offset + object->paging_offset,
++ if ((rc = memory_object_data_request(obj.pager,
++ obj.pager_request,
++ m->offset + obj.paging_offset,
+ PAGE_SIZE, access_required)) != KERN_SUCCESS) {
+ if (rc != MACH_SEND_INTERRUPTED)
+ printf("%s(0x%p, 0x%p, 0x%lx, 0x%x, 0x%x) failed, %x\n",
+ "memory_object_data_request",
+- object->pager,
+- object->pager_request,
+- m->offset + object->paging_offset,
++ obj.pager,
++ obj.pager_request,
++ m->offset + obj.paging_offset,
+ PAGE_SIZE, access_required, rc);
+ /*
+ * Don't want to leave a busy page around,
+diff --git a/vm/vm_map.c b/vm/vm_map.c
+index ae3ce21..9098dfd 100644
+--- a/vm/vm_map.c
++++ b/vm/vm_map.c
+@@ -1182,16 +1182,20 @@ kern_return_t vm_map_submap(
+
+ if ((entry->vme_start == start) && (entry->vme_end == end) &&
+ (!entry->is_sub_map) &&
+- ((object = entry->object.vm_object) == vm_submap_object) &&
+- (object->resident_page_count == 0) &&
+- (object->copy == VM_OBJECT_NULL) &&
+- (object->shadow == VM_OBJECT_NULL) &&
+- (!object->pager_created)) {
+- entry->object.vm_object = VM_OBJECT_NULL;
+- vm_object_deallocate(object);
+- entry->is_sub_map = TRUE;
+- vm_map_reference(entry->object.sub_map = submap);
+- result = KERN_SUCCESS;
++ ((object = entry->object.vm_object) == vm_submap_object)) {
++ vm_object_lock(object);
++ if ((object->resident_page_count == 0) &&
++ (object->copy == VM_OBJECT_NULL) &&
++ (object->shadow == VM_OBJECT_NULL) &&
++ (!object->pager_created)) {
++ vm_object_unlock(object);
++ entry->object.vm_object = VM_OBJECT_NULL;
++ vm_object_deallocate(object);
++ entry->is_sub_map = TRUE;
++ vm_map_reference(entry->object.sub_map = submap);
++ result = KERN_SUCCESS;
++ } else
++ vm_object_unlock(object);
+ }
+ vm_map_unlock(map);
+
+@@ -2122,6 +2126,7 @@ start_pass_1:
+ for (entry = tmp_entry;;) {
+ vm_size_t sub_size = (entry->vme_end - entry->vme_start);
+ vm_map_entry_t next = entry->vme_next;
++ vm_object_t object;
+
+ if ( ! (entry->protection & VM_PROT_WRITE)) {
+ vm_map_unlock(dst_map);
+@@ -2157,10 +2162,13 @@ start_pass_1:
+ /*
+ * Check for permanent objects in the destination.
+ */
+-
+- if ((entry->object.vm_object != VM_OBJECT_NULL) &&
+- !entry->object.vm_object->temporary)
+- contains_permanent_objects = TRUE;
++ object = entry->object.vm_object;
++ if ((object != VM_OBJECT_NULL)
++ && ! contains_permanent_objects) {
++ vm_object_lock(object);
++ contains_permanent_objects = object->temporary;
++ vm_object_unlock(object);
++ }
+
+ size -= sub_size;
+ entry = next;
+@@ -2220,6 +2228,7 @@ start_pass_1:
+ vm_map_entry_t copy_entry = vm_map_copy_first_entry(copy);
+ vm_size_t copy_size = (copy_entry->vme_end - copy_entry->vme_start);
+ vm_object_t object;
++ int temporary;
+
+ entry = tmp_entry;
+ size = (entry->vme_end - entry->vme_start);
+@@ -2275,8 +2284,15 @@ start_pass_1:
+ */
+
+ object = entry->object.vm_object;
++ temporary = 0;
++ if (object != VM_OBJECT_NULL) {
++ vm_object_lock(object);
++ temporary = object->temporary;
++ vm_object_unlock(object);
++ }
++
+ if (!entry->is_shared &&
+- ((object == VM_OBJECT_NULL) || object->temporary)) {
++ ((object == VM_OBJECT_NULL) || temporary)) {
+ vm_object_t old_object = entry->object.vm_object;
+ vm_offset_t old_offset = entry->offset;
+
+@@ -3219,11 +3235,15 @@ kern_return_t vm_map_copyin(
+ /*
+ * Attempt non-blocking copy-on-write optimizations.
+ */
+-
++ if (src_object)
++ vm_object_lock(src_object);
+ if (src_destroy &&
+ (src_object == VM_OBJECT_NULL ||
+ (src_object->temporary && !src_object->use_shared_copy)))
+ {
++ if (src_object)
++ vm_object_unlock(src_object);
++
+ /*
+ * If we are destroying the source, and the object
+ * is temporary, and not shared writable,
+@@ -3243,6 +3263,9 @@ kern_return_t vm_map_copyin(
+ goto CopySuccessful;
+ }
+
++ if (src_object)
++ vm_object_unlock(src_object);
++
+ if (!was_wired &&
+ vm_object_copy_temporary(
+ &new_entry->object.vm_object,
+diff --git a/vm/vm_object.c b/vm/vm_object.c
+index 133181f..36dbd8b 100644
+--- a/vm/vm_object.c
++++ b/vm/vm_object.c
+@@ -217,9 +217,11 @@ static void _vm_object_setup(
+ vm_size_t size)
+ {
+ *object = vm_object_template;
+- queue_init(&object->memq);
+ vm_object_lock_init(object);
++ vm_object_lock(object);
++ queue_init(&object->memq);
+ object->size = size;
++ vm_object_unlock(object);
+ }
+
+ vm_object_t _vm_object_allocate(
+@@ -244,8 +246,11 @@ vm_object_t vm_object_allocate(
+ port = ipc_port_alloc_kernel();
+ if (port == IP_NULL)
+ panic("vm_object_allocate");
++
++ vm_object_lock(object);
+ object->pager_name = port;
+ ipc_kobject_set(port, (ipc_kobject_t) object, IKOT_PAGING_NAME);
++ vm_object_unlock(object);
+
+ return object;
+ }
+@@ -540,6 +545,12 @@ void vm_object_terminate(
+ {
+ vm_page_t p;
+ vm_object_t shadow_object;
++ struct ipc_port *pager;
++ pager_request_t pager_request;
++ struct ipc_port *pager_name;
++#if MACH_PAGEMAP
++ vm_external_t existence_info;
++#endif /* MACH_PAGEMAP */
+
+ /*
+ * Make sure the object isn't already being terminated
+@@ -637,20 +648,26 @@ void vm_object_terminate(
+ * using memory_object_terminate.
+ */
+
++ /* Copy attributes while object is locked. */
++ pager = object->pager;
++ pager_request = object->pager_request;
++ pager_name = object->pager_name;
++#if MACH_PAGEMAP
++ existence_info = object->existence_info;
++#endif /* MACH_PAGEMAP */
++
+ vm_object_unlock(object);
+
+- if (object->pager != IP_NULL) {
++ if (pager != IP_NULL) {
+ /* consumes our rights for pager, pager_request, pager_name */
+- memory_object_release(object->pager,
+- object->pager_request,
+- object->pager_name);
+- } else if (object->pager_name != IP_NULL) {
++ memory_object_release(pager, pager_request, pager_name);
++ } else if (pager_name != IP_NULL) {
+ /* consumes our right for pager_name */
+- ipc_port_dealloc_kernel(object->pager_name);
++ ipc_port_dealloc_kernel(pager_name);
+ }
+
+ #if MACH_PAGEMAP
+- vm_external_destroy(object->existence_info);
++ vm_external_destroy(existence_info);
+ #endif /* MACH_PAGEMAP */
+
+ /*
+@@ -1080,6 +1097,7 @@ kern_return_t vm_object_copy_slowly(
+ */
+
+ new_object = vm_object_allocate(size);
++ vm_object_lock(new_object);
+ new_offset = 0;
+
+ assert(size == trunc_page(size)); /* Will the loop terminate? */
+@@ -1171,6 +1189,7 @@ kern_return_t vm_object_copy_slowly(
+
+ case VM_FAULT_INTERRUPTED:
+ vm_page_free(new_page);
++ vm_object_unlock(new_object);
+ vm_object_deallocate(new_object);
+ vm_object_deallocate(src_object);
+ *_result_object = VM_OBJECT_NULL;
+@@ -1186,6 +1205,7 @@ kern_return_t vm_object_copy_slowly(
+ */
+
+ vm_page_free(new_page);
++ vm_object_unlock(new_object);
+ vm_object_deallocate(new_object);
+ vm_object_deallocate(src_object);
+ *_result_object = VM_OBJECT_NULL;
+@@ -1200,6 +1220,7 @@ kern_return_t vm_object_copy_slowly(
+
+ vm_object_deallocate(src_object);
+ *_result_object = new_object;
++ vm_object_unlock(new_object);
+ return KERN_SUCCESS;
+ }
+
+@@ -1474,14 +1495,11 @@ vm_object_t vm_object_copy_delayed(
+ * must be done carefully, to avoid deadlock.
+ */
+
+- /*
+- * Allocate a new copy object before locking, even
+- * though we may not need it later.
+- */
++ vm_object_lock(src_object);
+
+ new_copy = vm_object_allocate(src_object->size);
+
+- vm_object_lock(src_object);
++ vm_object_lock(new_copy);
+
+ /*
+ * See whether we can reuse the result of a previous
+@@ -1519,7 +1537,7 @@ vm_object_t vm_object_copy_delayed(
+ old_copy->ref_count++;
+ vm_object_unlock(old_copy);
+ vm_object_unlock(src_object);
+-
++ vm_object_unlock(new_copy);
+ vm_object_deallocate(new_copy);
+
+ return old_copy;
+@@ -1574,7 +1592,7 @@ vm_object_t vm_object_copy_delayed(
+ }
+
+ vm_object_unlock(src_object);
+-
++ vm_object_unlock(new_copy);
+ return new_copy;
+ }
+
+@@ -1711,6 +1729,8 @@ void vm_object_shadow(
+ if ((result = vm_object_allocate(length)) == VM_OBJECT_NULL)
+ panic("vm_object_shadow: no object for shadowing");
+
++ vm_object_lock(result);
++
+ /*
+ * The new object shadows the source object, adding
+ * a reference to it. Our caller changes his reference
+@@ -1733,6 +1753,7 @@ void vm_object_shadow(
+
+ *offset = 0;
+ *object = result;
++ vm_object_unlock(result);
+ }
+
+ /*
+@@ -2053,8 +2074,10 @@ restart:
+ object = (po == IKOT_PAGER) ? (vm_object_t) pager->ip_kobject
+ : VM_OBJECT_NULL;
+
+- if ((object != VM_OBJECT_NULL) && !must_init) {
++ if (object != VM_OBJECT_NULL)
+ vm_object_lock(object);
++
++ if ((object != VM_OBJECT_NULL) && !must_init) {
+ if (object->ref_count == 0) {
+ queue_remove(&vm_object_cached_list, object,
+ vm_object_t, cached_list);
+@@ -2062,10 +2085,9 @@ restart:
+ vm_object_cached_pages_update(-object->resident_page_count);
+ }
+ object->ref_count++;
+- vm_object_unlock(object);
+-
+ vm_stat.hits++;
+ }
++
+ assert((object == VM_OBJECT_NULL) || (object->ref_count > 0) ||
+ ((object->paging_in_progress != 0) && internal));
+
+@@ -2085,6 +2107,10 @@ restart:
+ return(object);
+
+ if (must_init) {
++ vm_size_t pager_size;
++ pager_request_t pager_request;
++ struct ipc_port *pager_name;
++
+ /*
+ * Copy the naked send right we were given.
+ */
+@@ -2112,6 +2138,11 @@ restart:
+ * Let the pager know we're using it.
+ */
+
++ /* Store attributes while we're holding the lock. */
++ pager_size = object->size;
++ pager_request = object->pager_request;
++ pager_name = object->pager_name;
++
+ if (internal) {
+ /* acquire a naked send right for the DMM */
+ ipc_port_t DMM = memory_manager_default_reference();
+@@ -2123,12 +2154,15 @@ restart:
+ /* default-pager objects are ready immediately */
+ object->pager_ready = TRUE;
+
++ /* Unlock object across call to memory manager. */
++ vm_object_unlock(object);
++
+ /* consumes the naked send right for DMM */
+ (void) memory_object_create(DMM,
+ pager,
+- object->size,
+- object->pager_request,
+- object->pager_name,
++ pager_size,
++ pager_request,
++ pager_name,
+ PAGE_SIZE);
+ } else {
+ /* the object is external and not temporary */
+@@ -2138,13 +2172,16 @@ restart:
+ /* user pager objects are not ready until marked so */
+ object->pager_ready = FALSE;
+
++ /* Unlock object across call to memory manager. */
++ vm_object_unlock(object);
++
+ (void) memory_object_init(pager,
+- object->pager_request,
+- object->pager_name,
++ pager_request,
++ pager_name,
+ PAGE_SIZE);
+-
+ }
+
++ /* Object was unlocked across call to memory manager. */
+ vm_object_lock(object);
+ object->pager_initialized = TRUE;
+
+@@ -2152,9 +2189,8 @@ restart:
+ object->pager_ready = TRUE;
+
+ vm_object_wakeup(object, VM_OBJECT_EVENT_INITIALIZED);
+- } else {
+- vm_object_lock(object);
+ }
++
+ /*
+ * [At this point, the object must be locked]
+ */
+diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c
+index 51a6a0d..b676c7b 100644
+--- a/vm/vm_pageout.c
++++ b/vm/vm_pageout.c
+@@ -252,6 +252,8 @@ vm_pageout_setup(
+ vm_object_unlock(new_object);
+ }
+
++ vm_object_lock(old_object);
++
+ if (flush) {
+ /*
+ * Create a place-holder page where the old one was,
+@@ -262,7 +264,6 @@ vm_pageout_setup(
+ == VM_PAGE_NULL)
+ vm_page_more_fictitious();
+
+- vm_object_lock(old_object);
+ vm_page_lock_queues();
+ vm_page_remove(m);
+ vm_page_unlock_queues();
+@@ -281,8 +282,6 @@ vm_pageout_setup(
+ VM_EXTERNAL_STATE_EXISTS);
+ #endif /* MACH_PAGEMAP */
+
+- vm_object_unlock(old_object);
+-
+ vm_object_lock(new_object);
+
+ /*
+@@ -305,7 +304,6 @@ vm_pageout_setup(
+ */
+ vm_page_copy(m, new_m);
+
+- vm_object_lock(old_object);
+ m->dirty = FALSE;
+ pmap_clear_modify(m->phys_addr);
+
+@@ -328,8 +326,6 @@ vm_pageout_setup(
+ VM_EXTERNAL_STATE_EXISTS);
+ #endif /* MACH_PAGEMAP */
+
+- vm_object_unlock(old_object);
+-
+ vm_object_lock(new_object);
+
+ /*
+@@ -383,6 +379,7 @@ vm_pageout_setup(
+ */
+
+ vm_object_unlock(new_object);
++ vm_object_unlock(old_object);
+
+ /*
+ * Return the placeholder page to simplify cleanup.
+--
+2.1.4
+
diff --git a/debian/patches/fix-locking0005-ipc-fix-locking-issues.patch b/debian/patches/fix-locking0005-ipc-fix-locking-issues.patch
new file mode 100644
index 0000000..736e957
--- /dev/null
+++ b/debian/patches/fix-locking0005-ipc-fix-locking-issues.patch
@@ -0,0 +1,268 @@
+From 803f226e749138713b5c767b1a108544ce5e0baf Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Tue, 18 Aug 2015 11:32:15 +0200
+Subject: [PATCH gnumach 5/5] ipc: fix locking issues
+
+* ipc/ipc_port.h (struct ipc_port): Document locking exception.
+* ipc/ipc_port.c (ipc_port_destroy): Avoid accessing `port's fields
+without the lock.
+(ipc_port_alloc_special): Lock `port'.
+* ipc/mach_msg.c (mach_msg_trap): Avoid using
+`ipc_port_flag_protected_payload' on unlocked port.
+* ipc/ipc_kmsg.c (ipc_kmsg_copyout_header): Likewise.
+---
+ ipc/ipc_kmsg.c | 16 ++++++++++++----
+ ipc/ipc_port.c | 11 ++++++-----
+ ipc/ipc_port.h | 2 ++
+ ipc/mach_msg.c | 16 +++++++++++++---
+ 4 files changed, 33 insertions(+), 12 deletions(-)
+
+diff --git a/ipc/ipc_kmsg.c b/ipc/ipc_kmsg.c
+index 5076809..6c4b59a 100644
+--- a/ipc/ipc_kmsg.c
++++ b/ipc/ipc_kmsg.c
+@@ -1713,6 +1713,7 @@ ipc_kmsg_copyout_header(
+ mach_port_t dest_name;
+ ipc_port_t nsrequest;
+ unsigned long payload;
++ int have_payload;
+
+ /* receiving an asynchronous message */
+
+@@ -1732,6 +1733,7 @@ ipc_kmsg_copyout_header(
+ else
+ dest_name = MACH_PORT_NULL;
+ payload = dest->ip_protected_payload;
++ have_payload = ipc_port_flag_protected_payload(dest);
+
+ if ((--dest->ip_srights == 0) &&
+ ((nsrequest = dest->ip_nsrequest) != IP_NULL)) {
+@@ -1745,7 +1747,7 @@ ipc_kmsg_copyout_header(
+ } else
+ ip_unlock(dest);
+
+- if (! ipc_port_flag_protected_payload(dest)) {
++ if (! have_payload) {
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(0, MACH_MSG_TYPE_PORT_SEND));
+ msg->msgh_local_port = dest_name;
+@@ -1766,6 +1768,7 @@ ipc_kmsg_copyout_header(
+ mach_port_t dest_name, reply_name;
+ ipc_port_t nsrequest;
+ unsigned long payload;
++ int have_payload;
+
+ /* receiving a request message */
+
+@@ -1837,6 +1840,7 @@ ipc_kmsg_copyout_header(
+ else
+ dest_name = MACH_PORT_NULL;
+ payload = dest->ip_protected_payload;
++ have_payload = ipc_port_flag_protected_payload(dest);
+
+ if ((--dest->ip_srights == 0) &&
+ ((nsrequest = dest->ip_nsrequest) != IP_NULL)) {
+@@ -1850,7 +1854,7 @@ ipc_kmsg_copyout_header(
+ } else
+ ip_unlock(dest);
+
+- if (! ipc_port_flag_protected_payload(dest)) {
++ if (! have_payload) {
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE,
+ MACH_MSG_TYPE_PORT_SEND));
+@@ -1868,6 +1872,7 @@ ipc_kmsg_copyout_header(
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0): {
+ mach_port_t dest_name;
+ unsigned long payload;
++ int have_payload;
+
+ /* receiving a reply message */
+
+@@ -1882,6 +1887,7 @@ ipc_kmsg_copyout_header(
+ assert(dest->ip_sorights > 0);
+
+ payload = dest->ip_protected_payload;
++ have_payload = ipc_port_flag_protected_payload(dest);
+
+ if (dest->ip_receiver == space) {
+ ip_release(dest);
+@@ -1895,7 +1901,7 @@ ipc_kmsg_copyout_header(
+ dest_name = MACH_PORT_NULL;
+ }
+
+- if (! ipc_port_flag_protected_payload(dest)) {
++ if (! have_payload) {
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(0,
+ MACH_MSG_TYPE_PORT_SEND_ONCE));
+@@ -1922,6 +1928,7 @@ ipc_kmsg_copyout_header(
+ ipc_port_t reply = (ipc_port_t) msg->msgh_local_port;
+ mach_port_t dest_name, reply_name;
+ unsigned long payload;
++ int have_payload;
+
+ if (IP_VALID(reply)) {
+ ipc_port_t notify_port;
+@@ -2161,6 +2168,7 @@ ipc_kmsg_copyout_header(
+
+ copyout_dest:
+ payload = dest->ip_protected_payload;
++ have_payload = ipc_port_flag_protected_payload(dest);
+
+ if (ip_active(dest)) {
+ ipc_object_copyout_dest(space, (ipc_object_t) dest,
+@@ -2189,7 +2197,7 @@ ipc_kmsg_copyout_header(
+ if (IP_VALID(reply))
+ ipc_port_release(reply);
+
+- if (! ipc_port_flag_protected_payload(dest)) {
++ if (! have_payload) {
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(reply_type, dest_type));
+ msg->msgh_local_port = dest_name;
+diff --git a/ipc/ipc_port.c b/ipc/ipc_port.c
+index 86a4ee2..4e4ad59 100644
+--- a/ipc/ipc_port.c
++++ b/ipc/ipc_port.c
+@@ -694,11 +694,13 @@ ipc_port_destroy(
+
+ port->ip_object.io_bits &= ~IO_BITS_ACTIVE;
+ port->ip_timestamp = ipc_port_timestamp();
++
++ nsrequest = port->ip_nsrequest;
++ dnrequests = port->ip_dnrequests;
++
+ ip_unlock(port);
+
+ /* throw away no-senders request */
+-
+- nsrequest = port->ip_nsrequest;
+ if (nsrequest != IP_NULL)
+ ipc_notify_send_once(nsrequest); /* consumes ref */
+
+@@ -725,8 +727,6 @@ ipc_port_destroy(
+ imq_unlock(mqueue);
+
+ /* generate dead-name notifications */
+-
+- dnrequests = port->ip_dnrequests;
+ if (dnrequests != IPR_NULL) {
+ ipc_table_size_t its = dnrequests->ipr_size;
+ ipc_table_elems_t size = its->its_size;
+@@ -1183,6 +1183,7 @@ ipc_port_alloc_special(ipc_space_t space)
+ return IP_NULL;
+
+ ip_lock_init(port);
++ ip_lock(port);
+ port->ip_references = 1;
+ port->ip_object.io_bits = io_makebits(TRUE, IOT_PORT, 0);
+
+@@ -1198,7 +1199,7 @@ ipc_port_alloc_special(ipc_space_t space)
+ */
+
+ ipc_port_init(port, space, (mach_port_t)port);
+-
++ ip_unlock(port);
+ return port;
+ }
+
+diff --git a/ipc/ipc_port.h b/ipc/ipc_port.h
+index ade6967..de43b42 100644
+--- a/ipc/ipc_port.h
++++ b/ipc/ipc_port.h
+@@ -82,6 +82,8 @@ struct ipc_port {
+ ipc_port_timestamp_t timestamp;
+ } data;
+
++ /* Note: For pagers (IKOT_PAGER), this field is protected by
++ vm_object_cache_lock. */
+ ipc_kobject_t ip_kobject;
+
+ mach_port_mscount_t ip_mscount;
+diff --git a/ipc/mach_msg.c b/ipc/mach_msg.c
+index fe0c43e..371d725 100644
+--- a/ipc/mach_msg.c
++++ b/ipc/mach_msg.c
+@@ -954,6 +954,7 @@ mach_msg_trap(
+ (ipc_port_t) kmsg->ikm_header.msgh_local_port;
+ mach_port_t dest_name, reply_name;
+ unsigned long payload;
++ int have_payload;
+
+ /* receiving a request message */
+
+@@ -1018,6 +1019,8 @@ mach_msg_trap(
+ else
+ dest_name = MACH_PORT_NULL;
+ payload = dest_port->ip_protected_payload;
++ have_payload =
++ ipc_port_flag_protected_payload(dest_port);
+
+ if ((--dest_port->ip_srights == 0) &&
+ (dest_port->ip_nsrequest != IP_NULL)) {
+@@ -1035,7 +1038,7 @@ mach_msg_trap(
+ } else
+ ip_unlock(dest_port);
+
+- if (! ipc_port_flag_protected_payload(dest_port)) {
++ if (! have_payload) {
+ kmsg->ikm_header.msgh_bits = MACH_MSGH_BITS(
+ MACH_MSG_TYPE_PORT_SEND_ONCE,
+ MACH_MSG_TYPE_PORT_SEND);
+@@ -1059,6 +1062,7 @@ mach_msg_trap(
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0): {
+ mach_port_t dest_name;
+ unsigned long payload;
++ int have_payload;
+
+ /* receiving a reply message */
+
+@@ -1071,6 +1075,8 @@ mach_msg_trap(
+ assert(dest_port->ip_sorights > 0);
+
+ payload = dest_port->ip_protected_payload;
++ have_payload =
++ ipc_port_flag_protected_payload(dest_port);
+
+ if (dest_port->ip_receiver == space) {
+ ip_release(dest_port);
+@@ -1084,7 +1090,7 @@ mach_msg_trap(
+ dest_name = MACH_PORT_NULL;
+ }
+
+- if (! ipc_port_flag_protected_payload(dest_port)) {
++ if (! have_payload) {
+ kmsg->ikm_header.msgh_bits = MACH_MSGH_BITS(
+ 0,
+ MACH_MSG_TYPE_PORT_SEND_ONCE);
+@@ -1104,6 +1110,7 @@ mach_msg_trap(
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0): {
+ mach_port_t dest_name;
+ unsigned long payload;
++ int have_payload;
+
+ /* receiving a complex reply message */
+
+@@ -1116,6 +1123,9 @@ mach_msg_trap(
+ assert(dest_port->ip_sorights > 0);
+
+ payload = dest_port->ip_protected_payload;
++ have_payload =
++ ipc_port_flag_protected_payload(dest_port);
++
+
+ if (dest_port->ip_receiver == space) {
+ ip_release(dest_port);
+@@ -1129,7 +1139,7 @@ mach_msg_trap(
+ dest_name = MACH_PORT_NULL;
+ }
+
+- if (! ipc_port_flag_protected_payload(dest_port)) {
++ if (! have_payload) {
+ kmsg->ikm_header.msgh_bits =
+ MACH_MSGH_BITS_COMPLEX
+ | MACH_MSGH_BITS(
+--
+2.1.4
+
diff --git a/debian/patches/series b/debian/patches/series
index 029e46a..3ed624a 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -13,3 +13,8 @@ sysenter0001-yyy-sysenter-prototype.patch
vm-cache-policy0001-VM-cache-policy-change.patch
vm-cache-policy0002-vm-keep-track-of-clean-pages.patch
vm-cache-policy0003-vm-evict-clean-pages-first.patch
+fix-locking0001-kern-really-zero-out-unused-simple-lock-info-entries.patch
+fix-locking0002-i386-enable-assertions.patch
+fix-locking0003-kern-bootstrap-tune-locking.patch
+fix-locking0004-vm-fix-locking-issues.patch
+fix-locking0005-ipc-fix-locking-issues.patch