diff options
Diffstat (limited to 'vm')
-rw-r--r-- | vm/vm_fault.c | 3 | ||||
-rw-r--r-- | vm/vm_map.c | 4 | ||||
-rw-r--r-- | vm/vm_map.h | 3 | ||||
-rw-r--r-- | vm/vm_object.c | 19 | ||||
-rw-r--r-- | vm/vm_object.h | 3 | ||||
-rw-r--r-- | vm/vm_page.h | 1 | ||||
-rw-r--r-- | vm/vm_pageout.c | 1 | ||||
-rw-r--r-- | vm/vm_resident.c | 20 |
8 files changed, 52 insertions, 2 deletions
diff --git a/vm/vm_fault.c b/vm/vm_fault.c index 101ebce..40d1cbc 100644 --- a/vm/vm_fault.c +++ b/vm/vm_fault.c @@ -127,6 +127,7 @@ vm_fault_cleanup( vm_object_t object, vm_page_t top_page) { + assert(have_vm_object_lock(object)); vm_object_paging_end(object); vm_object_unlock(object); @@ -254,6 +255,8 @@ vm_fault_return_t vm_fault_page( goto after_thread_block; } + assert(have_vm_object_lock(first_object)); + vm_stat_sample(SAMPLED_PC_VM_FAULTS_ANY); vm_stat.faults++; /* needs lock XXX */ current_task()->faults++; diff --git a/vm/vm_map.c b/vm/vm_map.c index 9098dfd..5099283 100644 --- a/vm/vm_map.c +++ b/vm/vm_map.c @@ -531,6 +531,8 @@ kern_return_t vm_map_find_entry( vm_offset_t start; vm_offset_t end; + assert(have_vm_map_lock(map)); + /* * Look for the first possible address; * if there's already something at this @@ -601,6 +603,7 @@ kern_return_t vm_map_find_entry( * * the map should be locked. */ + assert(have_vm_map_lock(map)); *address = start; @@ -918,6 +921,7 @@ kern_return_t vm_map_enter( * * the map should be locked. */ + assert(have_vm_map_lock(map)); /* * See whether we can avoid creating a new entry (and object) by diff --git a/vm/vm_map.h b/vm/vm_map.h index 9b31f90..c9f828a 100644 --- a/vm/vm_map.h +++ b/vm/vm_map.h @@ -358,6 +358,9 @@ MACRO_END lock_set_recursive(&(map)->lock) #define vm_map_lock_clear_recursive(map) \ lock_clear_recursive(&(map)->lock) +#define have_vm_map_lock(map) have_lock(&(map)->lock) +#define have_vm_map_read_lock(map) have_read_lock(&(map)->lock) +#define have_vm_map_write_lock(map) have_write_lock(&(map)->lock) /* * Exported procedures that operate on vm_map_t. diff --git a/vm/vm_object.c b/vm/vm_object.c index 4b31482..836f5ef 100644 --- a/vm/vm_object.c +++ b/vm/vm_object.c @@ -188,6 +188,8 @@ struct lock vm_object_cache_lock_data; lock_try_write(&vm_object_cache_lock_data) #define vm_object_cache_unlock() \ lock_write_done(&vm_object_cache_lock_data) +#define have_vm_object_cache_lock() \ + have_write_lock(&vm_object_cache_lock_data) /* * Number of physical pages referenced by cached objects. @@ -516,7 +518,6 @@ void vm_object_deallocate( * Destroy the object; the cache lock will * be released in the process. */ - vm_object_terminate(object); /* @@ -554,6 +555,9 @@ void vm_object_terminate( vm_external_t existence_info; #endif /* MACH_PAGEMAP */ + assert(have_vm_object_lock(object)); + assert(have_vm_object_cache_lock()); + /* * Make sure the object isn't already being terminated */ @@ -754,6 +758,8 @@ void vm_object_abort_activity( vm_page_t p; vm_page_t next; + assert(have_vm_object_lock(object)); + /* * Abort all activity that would be waiting * for a result on this memory object. @@ -791,6 +797,7 @@ void vm_object_abort_activity( object->pager_ready = TRUE; vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY); + assert(have_vm_object_lock(object)); } /* @@ -889,6 +896,7 @@ void vm_object_deactivate_pages( { vm_page_t p; + assert(have_vm_object_lock(object)); queue_iterate(&object->memq, p, vm_page_t, listq) { vm_page_lock_queues(); if (!p->busy) @@ -1348,6 +1356,8 @@ kern_return_t vm_object_copy_call( vm_object_t new_object; vm_page_t p; + assert(have_vm_object_lock(src_object)); + /* * Create a memory object port to be associated * with this new vm_object. @@ -2196,6 +2206,7 @@ restart: /* * [At this point, the object must be locked] */ + assert(have_vm_object_lock(object)); /* * Wait for the work above to be done by the first @@ -2325,6 +2336,8 @@ void vm_object_remove( { ipc_port_t port; + assert(have_vm_object_cache_lock()); + if ((port = object->pager) != IP_NULL) { if (ip_kotype(port) == IKOT_PAGER) ipc_kobject_set(port, IKO_NULL, @@ -2380,6 +2393,8 @@ void vm_object_collapse( vm_page_t p, pp; ipc_port_t old_name_port; + assert(have_vm_object_lock(object)); + if (!vm_object_collapse_allowed) return; @@ -2729,6 +2744,8 @@ void vm_object_page_remove( { vm_page_t p, next; + assert(have_vm_object_lock(object)); + /* * One and two page removals are most popular. * The factor of 16 here is somewhat arbitrary. diff --git a/vm/vm_object.h b/vm/vm_object.h index 3c9055f..01ce4ad 100644 --- a/vm/vm_object.h +++ b/vm/vm_object.h @@ -376,8 +376,9 @@ MACRO_END #define vm_object_sleep(event, object, interruptible) \ thread_sleep_lock((event_t)(event), &(object)->Lock, \ (interruptible)) -#define vm_object_lock_taken(object) lock_taken(&(object)->Lock) #endif /* VM_OBJECT_DEBUG */ +#define have_vm_object_lock(object) \ + (object == NULL || have_write_lock(&(object)->Lock)) /* * Page cache accounting. diff --git a/vm/vm_page.h b/vm/vm_page.h index 90599a1..dd571e2 100644 --- a/vm/vm_page.h +++ b/vm/vm_page.h @@ -293,6 +293,7 @@ extern unsigned int vm_page_info( #define vm_page_lock_queues() lock_write(&vm_page_queue_lock) #define vm_page_unlock_queues() lock_write_done(&vm_page_queue_lock) +#define have_vm_page_queue_lock() have_write_lock(&vm_page_queue_lock) #define VM_PAGE_QUEUES_REMOVE(mem) \ MACRO_BEGIN \ diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c index b676c7b..29cfca7 100644 --- a/vm/vm_pageout.c +++ b/vm/vm_pageout.c @@ -424,6 +424,7 @@ vm_pageout_page( boolean_t precious_clean; assert(m->busy); + assert(have_vm_object_lock(m->object)); /* * Cleaning but not flushing a clean precious page is a diff --git a/vm/vm_resident.c b/vm/vm_resident.c index 427c8f5..4e1eed5 100644 --- a/vm/vm_resident.c +++ b/vm/vm_resident.c @@ -489,6 +489,7 @@ void vm_page_insert( { vm_page_bucket_t *bucket; + assert(have_vm_object_lock(object)); VM_PAGE_CHECK(mem); if (mem->tabled) @@ -561,6 +562,7 @@ void vm_page_replace( { vm_page_bucket_t *bucket; + assert(have_vm_object_lock(object)); VM_PAGE_CHECK(mem); if (mem->tabled) @@ -651,6 +653,7 @@ void vm_page_remove( vm_page_bucket_t *bucket; vm_page_t this; + assert(have_vm_object_lock(mem->object)); assert(mem->tabled); VM_PAGE_CHECK(mem); @@ -710,6 +713,8 @@ vm_page_t vm_page_lookup( vm_page_t mem; vm_page_bucket_t *bucket; + assert(have_vm_object_lock(object)); + /* * Search the hash table for this object/offset pair */ @@ -739,6 +744,8 @@ void vm_page_rename( vm_object_t new_object, vm_offset_t new_offset) { + assert(have_vm_object_lock(new_object)); + /* * Changes to mem->object require the page lock because * the pageout daemon uses that lock to get the object. @@ -844,6 +851,8 @@ boolean_t vm_page_convert( { vm_page_t real_m; + assert(have_vm_object_lock(m->object)); + real_m = vm_page_grab(external); if (real_m == VM_PAGE_NULL) return FALSE; @@ -1259,6 +1268,8 @@ vm_page_t vm_page_alloc( { vm_page_t mem; + assert(have_vm_object_lock(object)); + mem = vm_page_grab(!object->internal); if (mem == VM_PAGE_NULL) return VM_PAGE_NULL; @@ -1281,6 +1292,9 @@ vm_page_t vm_page_alloc( void vm_page_free( vm_page_t mem) { + assert(have_vm_object_lock(mem->object)); + assert(have_vm_page_queue_lock()); + if (mem->free) panic("vm_page_free"); @@ -1332,6 +1346,8 @@ void vm_page_free( void vm_page_wire( vm_page_t mem) { + assert(have_vm_object_lock(mem->object)); + assert(have_vm_page_queue_lock()); VM_PAGE_CHECK(mem); if (mem->wire_count == 0) { @@ -1353,6 +1369,8 @@ void vm_page_wire( void vm_page_unwire( vm_page_t mem) { + assert(have_vm_object_lock(mem->object)); + assert(have_vm_page_queue_lock()); VM_PAGE_CHECK(mem); if (--mem->wire_count == 0) { @@ -1376,6 +1394,7 @@ void vm_page_unwire( void vm_page_deactivate( vm_page_t m) { + assert(have_vm_page_queue_lock()); VM_PAGE_CHECK(m); /* @@ -1410,6 +1429,7 @@ void vm_page_deactivate( void vm_page_activate( vm_page_t m) { + assert(have_vm_page_queue_lock()); VM_PAGE_CHECK(m); if (m->inactive) { |