summaryrefslogtreecommitdiff
path: root/vm/vm_object.c
diff options
context:
space:
mode:
Diffstat (limited to 'vm/vm_object.c')
-rw-r--r--vm/vm_object.c88
1 files changed, 62 insertions, 26 deletions
diff --git a/vm/vm_object.c b/vm/vm_object.c
index 133181f..36dbd8b 100644
--- a/vm/vm_object.c
+++ b/vm/vm_object.c
@@ -217,9 +217,11 @@ static void _vm_object_setup(
vm_size_t size)
{
*object = vm_object_template;
- queue_init(&object->memq);
vm_object_lock_init(object);
+ vm_object_lock(object);
+ queue_init(&object->memq);
object->size = size;
+ vm_object_unlock(object);
}
vm_object_t _vm_object_allocate(
@@ -244,8 +246,11 @@ vm_object_t vm_object_allocate(
port = ipc_port_alloc_kernel();
if (port == IP_NULL)
panic("vm_object_allocate");
+
+ vm_object_lock(object);
object->pager_name = port;
ipc_kobject_set(port, (ipc_kobject_t) object, IKOT_PAGING_NAME);
+ vm_object_unlock(object);
return object;
}
@@ -540,6 +545,12 @@ void vm_object_terminate(
{
vm_page_t p;
vm_object_t shadow_object;
+ struct ipc_port *pager;
+ pager_request_t pager_request;
+ struct ipc_port *pager_name;
+#if MACH_PAGEMAP
+ vm_external_t existence_info;
+#endif /* MACH_PAGEMAP */
/*
* Make sure the object isn't already being terminated
@@ -637,20 +648,26 @@ void vm_object_terminate(
* using memory_object_terminate.
*/
+ /* Copy attributes while object is locked. */
+ pager = object->pager;
+ pager_request = object->pager_request;
+ pager_name = object->pager_name;
+#if MACH_PAGEMAP
+ existence_info = object->existence_info;
+#endif /* MACH_PAGEMAP */
+
vm_object_unlock(object);
- if (object->pager != IP_NULL) {
+ if (pager != IP_NULL) {
/* consumes our rights for pager, pager_request, pager_name */
- memory_object_release(object->pager,
- object->pager_request,
- object->pager_name);
- } else if (object->pager_name != IP_NULL) {
+ memory_object_release(pager, pager_request, pager_name);
+ } else if (pager_name != IP_NULL) {
/* consumes our right for pager_name */
- ipc_port_dealloc_kernel(object->pager_name);
+ ipc_port_dealloc_kernel(pager_name);
}
#if MACH_PAGEMAP
- vm_external_destroy(object->existence_info);
+ vm_external_destroy(existence_info);
#endif /* MACH_PAGEMAP */
/*
@@ -1080,6 +1097,7 @@ kern_return_t vm_object_copy_slowly(
*/
new_object = vm_object_allocate(size);
+ vm_object_lock(new_object);
new_offset = 0;
assert(size == trunc_page(size)); /* Will the loop terminate? */
@@ -1171,6 +1189,7 @@ kern_return_t vm_object_copy_slowly(
case VM_FAULT_INTERRUPTED:
vm_page_free(new_page);
+ vm_object_unlock(new_object);
vm_object_deallocate(new_object);
vm_object_deallocate(src_object);
*_result_object = VM_OBJECT_NULL;
@@ -1186,6 +1205,7 @@ kern_return_t vm_object_copy_slowly(
*/
vm_page_free(new_page);
+ vm_object_unlock(new_object);
vm_object_deallocate(new_object);
vm_object_deallocate(src_object);
*_result_object = VM_OBJECT_NULL;
@@ -1200,6 +1220,7 @@ kern_return_t vm_object_copy_slowly(
vm_object_deallocate(src_object);
*_result_object = new_object;
+ vm_object_unlock(new_object);
return KERN_SUCCESS;
}
@@ -1474,14 +1495,11 @@ vm_object_t vm_object_copy_delayed(
* must be done carefully, to avoid deadlock.
*/
- /*
- * Allocate a new copy object before locking, even
- * though we may not need it later.
- */
+ vm_object_lock(src_object);
new_copy = vm_object_allocate(src_object->size);
- vm_object_lock(src_object);
+ vm_object_lock(new_copy);
/*
* See whether we can reuse the result of a previous
@@ -1519,7 +1537,7 @@ vm_object_t vm_object_copy_delayed(
old_copy->ref_count++;
vm_object_unlock(old_copy);
vm_object_unlock(src_object);
-
+ vm_object_unlock(new_copy);
vm_object_deallocate(new_copy);
return old_copy;
@@ -1574,7 +1592,7 @@ vm_object_t vm_object_copy_delayed(
}
vm_object_unlock(src_object);
-
+ vm_object_unlock(new_copy);
return new_copy;
}
@@ -1711,6 +1729,8 @@ void vm_object_shadow(
if ((result = vm_object_allocate(length)) == VM_OBJECT_NULL)
panic("vm_object_shadow: no object for shadowing");
+ vm_object_lock(result);
+
/*
* The new object shadows the source object, adding
* a reference to it. Our caller changes his reference
@@ -1733,6 +1753,7 @@ void vm_object_shadow(
*offset = 0;
*object = result;
+ vm_object_unlock(result);
}
/*
@@ -2053,8 +2074,10 @@ restart:
object = (po == IKOT_PAGER) ? (vm_object_t) pager->ip_kobject
: VM_OBJECT_NULL;
- if ((object != VM_OBJECT_NULL) && !must_init) {
+ if (object != VM_OBJECT_NULL)
vm_object_lock(object);
+
+ if ((object != VM_OBJECT_NULL) && !must_init) {
if (object->ref_count == 0) {
queue_remove(&vm_object_cached_list, object,
vm_object_t, cached_list);
@@ -2062,10 +2085,9 @@ restart:
vm_object_cached_pages_update(-object->resident_page_count);
}
object->ref_count++;
- vm_object_unlock(object);
-
vm_stat.hits++;
}
+
assert((object == VM_OBJECT_NULL) || (object->ref_count > 0) ||
((object->paging_in_progress != 0) && internal));
@@ -2085,6 +2107,10 @@ restart:
return(object);
if (must_init) {
+ vm_size_t pager_size;
+ pager_request_t pager_request;
+ struct ipc_port *pager_name;
+
/*
* Copy the naked send right we were given.
*/
@@ -2112,6 +2138,11 @@ restart:
* Let the pager know we're using it.
*/
+ /* Store attributes while we're holding the lock. */
+ pager_size = object->size;
+ pager_request = object->pager_request;
+ pager_name = object->pager_name;
+
if (internal) {
/* acquire a naked send right for the DMM */
ipc_port_t DMM = memory_manager_default_reference();
@@ -2123,12 +2154,15 @@ restart:
/* default-pager objects are ready immediately */
object->pager_ready = TRUE;
+ /* Unlock object across call to memory manager. */
+ vm_object_unlock(object);
+
/* consumes the naked send right for DMM */
(void) memory_object_create(DMM,
pager,
- object->size,
- object->pager_request,
- object->pager_name,
+ pager_size,
+ pager_request,
+ pager_name,
PAGE_SIZE);
} else {
/* the object is external and not temporary */
@@ -2138,13 +2172,16 @@ restart:
/* user pager objects are not ready until marked so */
object->pager_ready = FALSE;
+ /* Unlock object across call to memory manager. */
+ vm_object_unlock(object);
+
(void) memory_object_init(pager,
- object->pager_request,
- object->pager_name,
+ pager_request,
+ pager_name,
PAGE_SIZE);
-
}
+ /* Object was unlocked across call to memory manager. */
vm_object_lock(object);
object->pager_initialized = TRUE;
@@ -2152,9 +2189,8 @@ restart:
object->pager_ready = TRUE;
vm_object_wakeup(object, VM_OBJECT_EVENT_INITIALIZED);
- } else {
- vm_object_lock(object);
}
+
/*
* [At this point, the object must be locked]
*/