summaryrefslogtreecommitdiff
path: root/vm
diff options
context:
space:
mode:
authorJustus Winter <4winter@informatik.uni-hamburg.de>2015-08-16 02:18:47 +0200
committerJustus Winter <4winter@informatik.uni-hamburg.de>2015-08-16 02:19:38 +0200
commit80ffa7de3147b27c82667a7e2224439f92106f92 (patch)
treef4af5eae93780a12306c9026cfd381d8d7b33a4b /vm
parent03955afa0a3cc6e9dece39ec631ce3744b1b08ba (diff)
codify locking contractspmm-2015-08-16
Diffstat (limited to 'vm')
-rw-r--r--vm/vm_fault.c3
-rw-r--r--vm/vm_map.c4
-rw-r--r--vm/vm_map.h3
-rw-r--r--vm/vm_object.c21
-rw-r--r--vm/vm_object.h3
-rw-r--r--vm/vm_page.h1
-rw-r--r--vm/vm_pageout.c1
-rw-r--r--vm/vm_resident.c19
8 files changed, 53 insertions, 2 deletions
diff --git a/vm/vm_fault.c b/vm/vm_fault.c
index 96f53fb..1024da6 100644
--- a/vm/vm_fault.c
+++ b/vm/vm_fault.c
@@ -129,6 +129,7 @@ vm_fault_cleanup(
vm_object_t object,
vm_page_t top_page)
{
+ assert(have_vm_object_lock(object));
vm_object_paging_end(object);
vm_object_unlock(object);
@@ -245,6 +246,8 @@ vm_fault_return_t vm_fault_page(
goto after_thread_block;
}
+ assert(have_vm_object_lock(first_object));
+
vm_stat_sample(SAMPLED_PC_VM_FAULTS_ANY);
vm_stat.faults++; /* needs lock XXX */
current_task()->faults++;
diff --git a/vm/vm_map.c b/vm/vm_map.c
index 1ebccac..57d931b 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -516,6 +516,8 @@ kern_return_t vm_map_find_entry(
vm_offset_t start;
vm_offset_t end;
+ assert(have_vm_map_lock(map));
+
/*
* Look for the first possible address;
* if there's already something at this
@@ -586,6 +588,7 @@ kern_return_t vm_map_find_entry(
*
* the map should be locked.
*/
+ assert(have_vm_map_lock(map));
*address = start;
@@ -903,6 +906,7 @@ kern_return_t vm_map_enter(
*
* the map should be locked.
*/
+ assert(have_vm_map_lock(map));
/*
* See whether we can avoid creating a new entry (and object) by
diff --git a/vm/vm_map.h b/vm/vm_map.h
index 9b31f90..c9f828a 100644
--- a/vm/vm_map.h
+++ b/vm/vm_map.h
@@ -358,6 +358,9 @@ MACRO_END
lock_set_recursive(&(map)->lock)
#define vm_map_lock_clear_recursive(map) \
lock_clear_recursive(&(map)->lock)
+#define have_vm_map_lock(map) have_lock(&(map)->lock)
+#define have_vm_map_read_lock(map) have_read_lock(&(map)->lock)
+#define have_vm_map_write_lock(map) have_write_lock(&(map)->lock)
/*
* Exported procedures that operate on vm_map_t.
diff --git a/vm/vm_object.c b/vm/vm_object.c
index c650b8c..6622c99 100644
--- a/vm/vm_object.c
+++ b/vm/vm_object.c
@@ -189,6 +189,8 @@ struct lock vm_object_cache_lock_data;
lock_try_write(&vm_object_cache_lock_data)
#define vm_object_cache_unlock() \
lock_write_done(&vm_object_cache_lock_data)
+#define have_vm_object_cache_lock() \
+ have_write_lock(&vm_object_cache_lock_data)
/*
* Number of physical pages referenced by cached objects.
@@ -514,7 +516,6 @@ void vm_object_deallocate(
* Destroy the object; the cache lock will
* be released in the process.
*/
-
vm_object_terminate(object);
/*
@@ -546,6 +547,9 @@ void vm_object_terminate(
vm_page_t p;
vm_object_t shadow_object;
+ assert(have_vm_object_lock(object));
+ assert(have_vm_object_cache_lock());
+
/*
* Make sure the object isn't already being terminated
*/
@@ -737,6 +741,8 @@ void vm_object_abort_activity(
{
vm_page_t p, old_p;
+ assert(have_vm_object_lock(object));
+
/*
* Abort all activity that would be waiting
* for a result on this memory object.
@@ -780,6 +786,7 @@ void vm_object_abort_activity(
object->pager_ready = TRUE;
vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
+ assert(have_vm_object_lock(object));
}
/*
@@ -878,6 +885,8 @@ void vm_object_deactivate_pages(
{
vm_page_t p;
+ assert(have_vm_object_lock(object));
+
struct rdxtree_iter iter;
rdxtree_for_each(&object->memt, &iter, p) {
vm_page_lock_queues();
@@ -1336,6 +1345,8 @@ kern_return_t vm_object_copy_call(
vm_object_t new_object;
vm_page_t p;
+ assert(have_vm_object_lock(src_object));
+
/*
* Create a memory object port to be associated
* with this new vm_object.
@@ -2171,6 +2182,7 @@ restart:
/*
* [At this point, the object must be locked]
*/
+ assert(have_vm_object_lock(object));
/*
* Wait for the work above to be done by the first
@@ -2300,6 +2312,8 @@ void vm_object_remove(
{
ipc_port_t port;
+ assert(have_vm_object_cache_lock());
+
if ((port = object->pager) != IP_NULL) {
if (ip_kotype(port) == IKOT_PAGER)
ipc_kobject_set(port, IKO_NULL,
@@ -2355,6 +2369,8 @@ void vm_object_collapse(
vm_page_t p, pp;
ipc_port_t old_name_port;
+ assert(have_vm_object_lock(object));
+
if (!vm_object_collapse_allowed)
return;
@@ -2723,6 +2739,9 @@ void vm_object_page_remove(
vm_offset_t end)
{
vm_page_t p;
+
+ assert(have_vm_object_lock(object));
+
struct rdxtree_iter iter;
rdxtree_for_each(&object->memt, &iter, p) {
if ((start <= p->offset) && (p->offset < end)) {
diff --git a/vm/vm_object.h b/vm/vm_object.h
index 142404a..5805a58 100644
--- a/vm/vm_object.h
+++ b/vm/vm_object.h
@@ -378,8 +378,9 @@ MACRO_END
#define vm_object_sleep(event, object, interruptible) \
thread_sleep_lock((event_t)(event), &(object)->Lock, \
(interruptible))
-#define vm_object_lock_taken(object) lock_taken(&(object)->Lock)
#endif /* VM_OBJECT_DEBUG */
+#define have_vm_object_lock(object) \
+ (object == NULL || have_write_lock(&(object)->Lock))
/*
* Page cache accounting.
diff --git a/vm/vm_page.h b/vm/vm_page.h
index 81ab3df..3fe6572 100644
--- a/vm/vm_page.h
+++ b/vm/vm_page.h
@@ -432,6 +432,7 @@ extern void vm_page_unwire(vm_page_t);
#define vm_page_lock_queues() lock_write(&vm_page_queue_lock)
#define vm_page_unlock_queues() lock_write_done(&vm_page_queue_lock)
+#define have_vm_page_queue_lock() have_write_lock(&vm_page_queue_lock)
#define VM_PAGE_QUEUES_REMOVE(mem) \
MACRO_BEGIN \
diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c
index 51a6a0d..5e8630f 100644
--- a/vm/vm_pageout.c
+++ b/vm/vm_pageout.c
@@ -427,6 +427,7 @@ vm_pageout_page(
boolean_t precious_clean;
assert(m->busy);
+ assert(have_vm_object_lock(m->object));
/*
* Cleaning but not flushing a clean precious page is a
diff --git a/vm/vm_resident.c b/vm/vm_resident.c
index 8b10e00..5959989 100644
--- a/vm/vm_resident.c
+++ b/vm/vm_resident.c
@@ -274,6 +274,7 @@ void vm_page_insert(
vm_object_t object,
vm_offset_t offset)
{
+ assert(have_vm_object_lock(object));
VM_PAGE_CHECK(mem);
if (mem->tabled)
@@ -337,6 +338,7 @@ void vm_page_replace(
struct vm_page *old;
void **slot;
+ assert(have_vm_object_lock(object));
VM_PAGE_CHECK(mem);
if (mem->tabled)
@@ -393,6 +395,7 @@ void vm_page_replace(
void vm_page_remove(
vm_page_t mem)
{
+ assert(have_vm_object_lock(mem->object));
assert(mem->tabled);
VM_PAGE_CHECK(mem);
@@ -425,6 +428,7 @@ vm_page_t vm_page_lookup(
vm_object_t object,
vm_offset_t offset)
{
+ assert(have_vm_object_lock(object));
return rdxtree_lookup(&object->memt, offset_key(offset));
}
@@ -441,6 +445,8 @@ void vm_page_rename(
vm_object_t new_object,
vm_offset_t new_offset)
{
+ assert(have_vm_object_lock(new_object));
+
/*
* Changes to mem->object require the page lock because
* the pageout daemon uses that lock to get the object.
@@ -534,6 +540,8 @@ boolean_t vm_page_convert(
struct vm_page *real_m, *fict_m, *old;
void **slot;
+ assert(have_vm_object_lock((*mp)->object));
+
fict_m = *mp;
assert(fict_m->fictitious);
@@ -731,6 +739,8 @@ vm_page_t vm_page_alloc(
{
vm_page_t mem;
+ assert(have_vm_object_lock(object));
+
mem = vm_page_grab(!object->internal);
if (mem == VM_PAGE_NULL)
return VM_PAGE_NULL;
@@ -753,6 +763,9 @@ vm_page_t vm_page_alloc(
void vm_page_free(
vm_page_t mem)
{
+ assert(have_vm_object_lock(mem->object));
+ assert(have_vm_page_queue_lock());
+
if (mem->tabled)
vm_page_remove(mem);
VM_PAGE_QUEUES_REMOVE(mem);
@@ -793,6 +806,8 @@ void vm_page_free(
void vm_page_wire(
vm_page_t mem)
{
+ assert(have_vm_object_lock(mem->object));
+ assert(have_vm_page_queue_lock());
VM_PAGE_CHECK(mem);
if (mem->wire_count == 0) {
@@ -814,6 +829,8 @@ void vm_page_wire(
void vm_page_unwire(
vm_page_t mem)
{
+ assert(have_vm_object_lock(mem->object));
+ assert(have_vm_page_queue_lock());
VM_PAGE_CHECK(mem);
if (--mem->wire_count == 0) {
@@ -837,6 +854,7 @@ void vm_page_unwire(
void vm_page_deactivate(
vm_page_t m)
{
+ assert(have_vm_page_queue_lock());
VM_PAGE_CHECK(m);
/*
@@ -871,6 +889,7 @@ void vm_page_deactivate(
void vm_page_activate(
vm_page_t m)
{
+ assert(have_vm_page_queue_lock());
VM_PAGE_CHECK(m);
if (m->inactive) {