summaryrefslogtreecommitdiff
path: root/vm
diff options
context:
space:
mode:
authorJustus Winter <4winter@informatik.uni-hamburg.de>2015-08-16 12:54:41 +0200
committerJustus Winter <4winter@informatik.uni-hamburg.de>2015-09-14 14:45:05 +0200
commit22778b589af94261c2d8d0665220770d0b56d1a8 (patch)
tree97bdb1c058f030d30dfe0d566630010572a4fb35 /vm
parent81747ac6d3e846d1955edaa94dee9065e541b7d8 (diff)
vm: fix locking issues
Avoid accessing fields of `vm_object' objects without having it locked. These problems have been found using a code transformation done by Coccinelle that instrumented all accesses with a runtime check, and manual inspection. * vm/memory_object.c (memory_object_data_supply): Avoid accessing fields without the lock. * vm/vm_fault.c (vm_fault_page): Likewise. * vm/vm_map.c (vm_map_submap): Properly lock `object'. (vm_map_copy_overwrite): Avoid accessing fields without the lock. (vm_map_copyin): Lock `src_object'. * vm/vm_object.c (_vm_object_setup): Likewise. (vm_object_allocate): Likewise. (vm_object_terminate): Avoid accessing fields without the lock. (vm_object_copy_slowly): Lock `new_object'. (vm_object_copy_delayed): Lock `src_object' earlier, lock `new_copy'. (vm_object_shadow): Lock `result'. (vm_object_enter): Properly lock `object'. Avoid accessing fields without the lock. * vm/vm_pageout.c (vm_pageout_setup): Properly lock `old_object'.
Diffstat (limited to 'vm')
-rw-r--r--vm/memory_object.c17
-rw-r--r--vm/vm_fault.c39
-rw-r--r--vm/vm_map.c55
-rw-r--r--vm/vm_object.c88
-rw-r--r--vm/vm_pageout.c9
5 files changed, 145 insertions, 63 deletions
diff --git a/vm/memory_object.c b/vm/memory_object.c
index 097ed23..0a07429 100644
--- a/vm/memory_object.c
+++ b/vm/memory_object.c
@@ -101,6 +101,7 @@ kern_return_t memory_object_data_supply(
vm_page_t *page_list;
boolean_t was_absent;
vm_map_copy_t orig_copy = data_copy;
+ pager_request_t pager_request;
/*
* Look for bogus arguments
@@ -270,6 +271,7 @@ retry_lookup:
/*
* Send reply if one was requested.
*/
+ pager_request = object->pager_request;
vm_object_paging_end(object);
vm_object_unlock(object);
@@ -279,7 +281,7 @@ retry_lookup:
if (IP_VALID(reply_to)) {
memory_object_supply_completed(
reply_to, reply_to_type,
- object->pager_request,
+ pager_request,
original_offset,
original_length,
result,
@@ -788,7 +790,9 @@ MACRO_END
continue;
case MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN:
- case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN:
+ case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN:;
+ vm_offset_t object_paging_offset;
+
/*
* The clean and return cases are similar.
*
@@ -811,6 +815,7 @@ MACRO_END
PAGEOUT_PAGES;
}
+ object_paging_offset = object->paging_offset;
vm_object_unlock(object);
/*
@@ -821,8 +826,7 @@ MACRO_END
if (new_object == VM_OBJECT_NULL) {
new_object = vm_object_allocate(original_size);
new_offset = 0;
- paging_offset = m->offset +
- object->paging_offset;
+ paging_offset = m->offset + object_paging_offset;
pageout_action = page_lock_result;
}
@@ -831,7 +835,7 @@ MACRO_END
* new object.
*/
m = vm_pageout_setup(m,
- m->offset + object->paging_offset,
+ m->offset + object_paging_offset,
new_object,
new_offset,
should_flush);
@@ -859,11 +863,12 @@ MACRO_END
}
if (IP_VALID(reply_to)) {
+ pager_request_t pager_request = object->pager_request;
vm_object_unlock(object);
/* consumes our naked send-once/send right for reply_to */
(void) memory_object_lock_completed(reply_to, reply_to_type,
- object->pager_request, original_offset, original_size);
+ pager_request, original_offset, original_size);
vm_object_lock(object);
}
diff --git a/vm/vm_fault.c b/vm/vm_fault.c
index 46779f6..101ebce 100644
--- a/vm/vm_fault.c
+++ b/vm/vm_fault.c
@@ -229,6 +229,17 @@ vm_fault_return_t vm_fault_page(
boolean_t look_for_page;
vm_prot_t access_required;
+ /* We need to unlock an object before making requests to a
+ memory manager. We use this object to temporarily store
+ object attributes needed for the request to avoid accessing
+ the object while it is unlocked. */
+ struct
+ {
+ struct ipc_port * pager;
+ pager_request_t pager_request;
+ vm_offset_t paging_offset;
+ } obj;
+
if (resume) {
vm_fault_state_t *state =
(vm_fault_state_t *) current_thread()->ith_other;
@@ -510,11 +521,16 @@ vm_fault_return_t vm_fault_page(
new_unlock_request = m->unlock_request =
(access_required | m->unlock_request);
+ obj.pager = object->pager;
+ obj.pager_request =
+ object->pager_request;
+ obj.paging_offset =
+ object->paging_offset;
vm_object_unlock(object);
if ((rc = memory_object_data_unlock(
- object->pager,
- object->pager_request,
- offset + object->paging_offset,
+ obj.pager,
+ obj.pager_request,
+ offset + obj.paging_offset,
PAGE_SIZE,
new_unlock_request))
!= KERN_SUCCESS) {
@@ -633,6 +649,11 @@ vm_fault_return_t vm_fault_page(
m->absent = TRUE;
object->absent_count++;
+ /* Save attributes for the request. */
+ obj.pager = object->pager;
+ obj.pager_request = object->pager_request;
+ obj.paging_offset = object->paging_offset;
+
/*
* We have a busy page, so we can
* release the object lock.
@@ -647,16 +668,16 @@ vm_fault_return_t vm_fault_page(
vm_stat_sample(SAMPLED_PC_VM_PAGEIN_FAULTS);
current_task()->pageins++;
- if ((rc = memory_object_data_request(object->pager,
- object->pager_request,
- m->offset + object->paging_offset,
+ if ((rc = memory_object_data_request(obj.pager,
+ obj.pager_request,
+ m->offset + obj.paging_offset,
PAGE_SIZE, access_required)) != KERN_SUCCESS) {
if (rc != MACH_SEND_INTERRUPTED)
printf("%s(0x%p, 0x%p, 0x%lx, 0x%x, 0x%x) failed, %x\n",
"memory_object_data_request",
- object->pager,
- object->pager_request,
- m->offset + object->paging_offset,
+ obj.pager,
+ obj.pager_request,
+ m->offset + obj.paging_offset,
PAGE_SIZE, access_required, rc);
/*
* Don't want to leave a busy page around,
diff --git a/vm/vm_map.c b/vm/vm_map.c
index ae3ce21..9098dfd 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -1182,16 +1182,20 @@ kern_return_t vm_map_submap(
if ((entry->vme_start == start) && (entry->vme_end == end) &&
(!entry->is_sub_map) &&
- ((object = entry->object.vm_object) == vm_submap_object) &&
- (object->resident_page_count == 0) &&
- (object->copy == VM_OBJECT_NULL) &&
- (object->shadow == VM_OBJECT_NULL) &&
- (!object->pager_created)) {
- entry->object.vm_object = VM_OBJECT_NULL;
- vm_object_deallocate(object);
- entry->is_sub_map = TRUE;
- vm_map_reference(entry->object.sub_map = submap);
- result = KERN_SUCCESS;
+ ((object = entry->object.vm_object) == vm_submap_object)) {
+ vm_object_lock(object);
+ if ((object->resident_page_count == 0) &&
+ (object->copy == VM_OBJECT_NULL) &&
+ (object->shadow == VM_OBJECT_NULL) &&
+ (!object->pager_created)) {
+ vm_object_unlock(object);
+ entry->object.vm_object = VM_OBJECT_NULL;
+ vm_object_deallocate(object);
+ entry->is_sub_map = TRUE;
+ vm_map_reference(entry->object.sub_map = submap);
+ result = KERN_SUCCESS;
+ } else
+ vm_object_unlock(object);
}
vm_map_unlock(map);
@@ -2122,6 +2126,7 @@ start_pass_1:
for (entry = tmp_entry;;) {
vm_size_t sub_size = (entry->vme_end - entry->vme_start);
vm_map_entry_t next = entry->vme_next;
+ vm_object_t object;
if ( ! (entry->protection & VM_PROT_WRITE)) {
vm_map_unlock(dst_map);
@@ -2157,10 +2162,13 @@ start_pass_1:
/*
* Check for permanent objects in the destination.
*/
-
- if ((entry->object.vm_object != VM_OBJECT_NULL) &&
- !entry->object.vm_object->temporary)
- contains_permanent_objects = TRUE;
+ object = entry->object.vm_object;
+ if ((object != VM_OBJECT_NULL)
+ && ! contains_permanent_objects) {
+ vm_object_lock(object);
+ contains_permanent_objects = object->temporary;
+ vm_object_unlock(object);
+ }
size -= sub_size;
entry = next;
@@ -2220,6 +2228,7 @@ start_pass_1:
vm_map_entry_t copy_entry = vm_map_copy_first_entry(copy);
vm_size_t copy_size = (copy_entry->vme_end - copy_entry->vme_start);
vm_object_t object;
+ int temporary;
entry = tmp_entry;
size = (entry->vme_end - entry->vme_start);
@@ -2275,8 +2284,15 @@ start_pass_1:
*/
object = entry->object.vm_object;
+ temporary = 0;
+ if (object != VM_OBJECT_NULL) {
+ vm_object_lock(object);
+ temporary = object->temporary;
+ vm_object_unlock(object);
+ }
+
if (!entry->is_shared &&
- ((object == VM_OBJECT_NULL) || object->temporary)) {
+ ((object == VM_OBJECT_NULL) || temporary)) {
vm_object_t old_object = entry->object.vm_object;
vm_offset_t old_offset = entry->offset;
@@ -3219,11 +3235,15 @@ kern_return_t vm_map_copyin(
/*
* Attempt non-blocking copy-on-write optimizations.
*/
-
+ if (src_object)
+ vm_object_lock(src_object);
if (src_destroy &&
(src_object == VM_OBJECT_NULL ||
(src_object->temporary && !src_object->use_shared_copy)))
{
+ if (src_object)
+ vm_object_unlock(src_object);
+
/*
* If we are destroying the source, and the object
* is temporary, and not shared writable,
@@ -3243,6 +3263,9 @@ kern_return_t vm_map_copyin(
goto CopySuccessful;
}
+ if (src_object)
+ vm_object_unlock(src_object);
+
if (!was_wired &&
vm_object_copy_temporary(
&new_entry->object.vm_object,
diff --git a/vm/vm_object.c b/vm/vm_object.c
index 133181f..36dbd8b 100644
--- a/vm/vm_object.c
+++ b/vm/vm_object.c
@@ -217,9 +217,11 @@ static void _vm_object_setup(
vm_size_t size)
{
*object = vm_object_template;
- queue_init(&object->memq);
vm_object_lock_init(object);
+ vm_object_lock(object);
+ queue_init(&object->memq);
object->size = size;
+ vm_object_unlock(object);
}
vm_object_t _vm_object_allocate(
@@ -244,8 +246,11 @@ vm_object_t vm_object_allocate(
port = ipc_port_alloc_kernel();
if (port == IP_NULL)
panic("vm_object_allocate");
+
+ vm_object_lock(object);
object->pager_name = port;
ipc_kobject_set(port, (ipc_kobject_t) object, IKOT_PAGING_NAME);
+ vm_object_unlock(object);
return object;
}
@@ -540,6 +545,12 @@ void vm_object_terminate(
{
vm_page_t p;
vm_object_t shadow_object;
+ struct ipc_port *pager;
+ pager_request_t pager_request;
+ struct ipc_port *pager_name;
+#if MACH_PAGEMAP
+ vm_external_t existence_info;
+#endif /* MACH_PAGEMAP */
/*
* Make sure the object isn't already being terminated
@@ -637,20 +648,26 @@ void vm_object_terminate(
* using memory_object_terminate.
*/
+ /* Copy attributes while object is locked. */
+ pager = object->pager;
+ pager_request = object->pager_request;
+ pager_name = object->pager_name;
+#if MACH_PAGEMAP
+ existence_info = object->existence_info;
+#endif /* MACH_PAGEMAP */
+
vm_object_unlock(object);
- if (object->pager != IP_NULL) {
+ if (pager != IP_NULL) {
/* consumes our rights for pager, pager_request, pager_name */
- memory_object_release(object->pager,
- object->pager_request,
- object->pager_name);
- } else if (object->pager_name != IP_NULL) {
+ memory_object_release(pager, pager_request, pager_name);
+ } else if (pager_name != IP_NULL) {
/* consumes our right for pager_name */
- ipc_port_dealloc_kernel(object->pager_name);
+ ipc_port_dealloc_kernel(pager_name);
}
#if MACH_PAGEMAP
- vm_external_destroy(object->existence_info);
+ vm_external_destroy(existence_info);
#endif /* MACH_PAGEMAP */
/*
@@ -1080,6 +1097,7 @@ kern_return_t vm_object_copy_slowly(
*/
new_object = vm_object_allocate(size);
+ vm_object_lock(new_object);
new_offset = 0;
assert(size == trunc_page(size)); /* Will the loop terminate? */
@@ -1171,6 +1189,7 @@ kern_return_t vm_object_copy_slowly(
case VM_FAULT_INTERRUPTED:
vm_page_free(new_page);
+ vm_object_unlock(new_object);
vm_object_deallocate(new_object);
vm_object_deallocate(src_object);
*_result_object = VM_OBJECT_NULL;
@@ -1186,6 +1205,7 @@ kern_return_t vm_object_copy_slowly(
*/
vm_page_free(new_page);
+ vm_object_unlock(new_object);
vm_object_deallocate(new_object);
vm_object_deallocate(src_object);
*_result_object = VM_OBJECT_NULL;
@@ -1200,6 +1220,7 @@ kern_return_t vm_object_copy_slowly(
vm_object_deallocate(src_object);
*_result_object = new_object;
+ vm_object_unlock(new_object);
return KERN_SUCCESS;
}
@@ -1474,14 +1495,11 @@ vm_object_t vm_object_copy_delayed(
* must be done carefully, to avoid deadlock.
*/
- /*
- * Allocate a new copy object before locking, even
- * though we may not need it later.
- */
+ vm_object_lock(src_object);
new_copy = vm_object_allocate(src_object->size);
- vm_object_lock(src_object);
+ vm_object_lock(new_copy);
/*
* See whether we can reuse the result of a previous
@@ -1519,7 +1537,7 @@ vm_object_t vm_object_copy_delayed(
old_copy->ref_count++;
vm_object_unlock(old_copy);
vm_object_unlock(src_object);
-
+ vm_object_unlock(new_copy);
vm_object_deallocate(new_copy);
return old_copy;
@@ -1574,7 +1592,7 @@ vm_object_t vm_object_copy_delayed(
}
vm_object_unlock(src_object);
-
+ vm_object_unlock(new_copy);
return new_copy;
}
@@ -1711,6 +1729,8 @@ void vm_object_shadow(
if ((result = vm_object_allocate(length)) == VM_OBJECT_NULL)
panic("vm_object_shadow: no object for shadowing");
+ vm_object_lock(result);
+
/*
* The new object shadows the source object, adding
* a reference to it. Our caller changes his reference
@@ -1733,6 +1753,7 @@ void vm_object_shadow(
*offset = 0;
*object = result;
+ vm_object_unlock(result);
}
/*
@@ -2053,8 +2074,10 @@ restart:
object = (po == IKOT_PAGER) ? (vm_object_t) pager->ip_kobject
: VM_OBJECT_NULL;
- if ((object != VM_OBJECT_NULL) && !must_init) {
+ if (object != VM_OBJECT_NULL)
vm_object_lock(object);
+
+ if ((object != VM_OBJECT_NULL) && !must_init) {
if (object->ref_count == 0) {
queue_remove(&vm_object_cached_list, object,
vm_object_t, cached_list);
@@ -2062,10 +2085,9 @@ restart:
vm_object_cached_pages_update(-object->resident_page_count);
}
object->ref_count++;
- vm_object_unlock(object);
-
vm_stat.hits++;
}
+
assert((object == VM_OBJECT_NULL) || (object->ref_count > 0) ||
((object->paging_in_progress != 0) && internal));
@@ -2085,6 +2107,10 @@ restart:
return(object);
if (must_init) {
+ vm_size_t pager_size;
+ pager_request_t pager_request;
+ struct ipc_port *pager_name;
+
/*
* Copy the naked send right we were given.
*/
@@ -2112,6 +2138,11 @@ restart:
* Let the pager know we're using it.
*/
+ /* Store attributes while we're holding the lock. */
+ pager_size = object->size;
+ pager_request = object->pager_request;
+ pager_name = object->pager_name;
+
if (internal) {
/* acquire a naked send right for the DMM */
ipc_port_t DMM = memory_manager_default_reference();
@@ -2123,12 +2154,15 @@ restart:
/* default-pager objects are ready immediately */
object->pager_ready = TRUE;
+ /* Unlock object across call to memory manager. */
+ vm_object_unlock(object);
+
/* consumes the naked send right for DMM */
(void) memory_object_create(DMM,
pager,
- object->size,
- object->pager_request,
- object->pager_name,
+ pager_size,
+ pager_request,
+ pager_name,
PAGE_SIZE);
} else {
/* the object is external and not temporary */
@@ -2138,13 +2172,16 @@ restart:
/* user pager objects are not ready until marked so */
object->pager_ready = FALSE;
+ /* Unlock object across call to memory manager. */
+ vm_object_unlock(object);
+
(void) memory_object_init(pager,
- object->pager_request,
- object->pager_name,
+ pager_request,
+ pager_name,
PAGE_SIZE);
-
}
+ /* Object was unlocked across call to memory manager. */
vm_object_lock(object);
object->pager_initialized = TRUE;
@@ -2152,9 +2189,8 @@ restart:
object->pager_ready = TRUE;
vm_object_wakeup(object, VM_OBJECT_EVENT_INITIALIZED);
- } else {
- vm_object_lock(object);
}
+
/*
* [At this point, the object must be locked]
*/
diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c
index 51a6a0d..b676c7b 100644
--- a/vm/vm_pageout.c
+++ b/vm/vm_pageout.c
@@ -252,6 +252,8 @@ vm_pageout_setup(
vm_object_unlock(new_object);
}
+ vm_object_lock(old_object);
+
if (flush) {
/*
* Create a place-holder page where the old one was,
@@ -262,7 +264,6 @@ vm_pageout_setup(
== VM_PAGE_NULL)
vm_page_more_fictitious();
- vm_object_lock(old_object);
vm_page_lock_queues();
vm_page_remove(m);
vm_page_unlock_queues();
@@ -281,8 +282,6 @@ vm_pageout_setup(
VM_EXTERNAL_STATE_EXISTS);
#endif /* MACH_PAGEMAP */
- vm_object_unlock(old_object);
-
vm_object_lock(new_object);
/*
@@ -305,7 +304,6 @@ vm_pageout_setup(
*/
vm_page_copy(m, new_m);
- vm_object_lock(old_object);
m->dirty = FALSE;
pmap_clear_modify(m->phys_addr);
@@ -328,8 +326,6 @@ vm_pageout_setup(
VM_EXTERNAL_STATE_EXISTS);
#endif /* MACH_PAGEMAP */
- vm_object_unlock(old_object);
-
vm_object_lock(new_object);
/*
@@ -383,6 +379,7 @@ vm_pageout_setup(
*/
vm_object_unlock(new_object);
+ vm_object_unlock(old_object);
/*
* Return the placeholder page to simplify cleanup.