summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJustus Winter <4winter@informatik.uni-hamburg.de>2015-08-16 02:18:47 +0200
committerJustus Winter <4winter@informatik.uni-hamburg.de>2015-08-16 02:19:38 +0200
commit80ffa7de3147b27c82667a7e2224439f92106f92 (patch)
treef4af5eae93780a12306c9026cfd381d8d7b33a4b
parent03955afa0a3cc6e9dece39ec631ce3744b1b08ba (diff)
codify locking contractspmm-2015-08-16
-rw-r--r--ipc/ipc_object.h1
-rw-r--r--ipc/ipc_port.c25
-rw-r--r--ipc/ipc_port.h1
-rw-r--r--ipc/ipc_right.c2
-rw-r--r--ipc/ipc_space.h12
-rw-r--r--vm/vm_fault.c3
-rw-r--r--vm/vm_map.c4
-rw-r--r--vm/vm_map.h3
-rw-r--r--vm/vm_object.c21
-rw-r--r--vm/vm_object.h3
-rw-r--r--vm/vm_page.h1
-rw-r--r--vm/vm_pageout.c1
-rw-r--r--vm/vm_resident.c19
13 files changed, 92 insertions, 4 deletions
diff --git a/ipc/ipc_object.h b/ipc/ipc_object.h
index 8504a23..e43c7d2 100644
--- a/ipc/ipc_object.h
+++ b/ipc/ipc_object.h
@@ -89,6 +89,7 @@ extern struct kmem_cache ipc_object_caches[IOT_NUMBER];
#define io_lock(io) lock_write(&(io)->io_lock_data)
#define io_lock_try(io) lock_try_write(&(io)->io_lock_data)
#define io_unlock(io) lock_write_done(&(io)->io_lock_data)
+#define have_io_lock(io) have_write_lock(&(io)->io_lock_data)
#define io_check_unlock(io) \
MACRO_BEGIN \
diff --git a/ipc/ipc_port.c b/ipc/ipc_port.c
index 86a4ee2..3aa6ce6 100644
--- a/ipc/ipc_port.c
+++ b/ipc/ipc_port.c
@@ -103,6 +103,7 @@ ipc_port_dnrequest(
ipc_port_request_t ipr, table;
ipc_port_request_index_t index;
+ assert(have_ip_lock(port));
assert(ip_active(port));
assert(name != MACH_PORT_NULL);
assert(soright != IP_NULL);
@@ -147,6 +148,7 @@ ipc_port_dngrow(ipc_port_t port)
ipc_table_size_t its;
ipc_port_request_t otable, ntable;
+ assert(have_ip_lock(port));
assert(ip_active(port));
otable = port->ip_dnrequests;
@@ -240,6 +242,7 @@ ipc_port_dncancel(
ipc_port_request_t ipr, table;
ipc_port_t dnrequest;
+ assert(have_ip_lock(port));
assert(ip_active(port));
assert(name != MACH_PORT_NULL);
assert(index != 0);
@@ -280,6 +283,7 @@ ipc_port_pdrequest(
{
ipc_port_t previous;
+ assert(have_ip_lock(port));
assert(ip_active(port));
previous = port->ip_pdrequest;
@@ -311,6 +315,7 @@ ipc_port_nsrequest(
ipc_port_t previous;
mach_port_mscount_t mscount;
+ assert(have_ip_lock(port));
assert(ip_active(port));
previous = port->ip_nsrequest;
@@ -344,6 +349,7 @@ ipc_port_set_qlimit(
ipc_port_t port,
mach_port_msgcount_t qlimit)
{
+ assert(have_ip_lock(port));
assert(ip_active(port));
/* wake up senders allowed by the new qlimit */
@@ -383,6 +389,9 @@ ipc_port_set_qlimit(
ipc_mqueue_t
ipc_port_lock_mqueue(ipc_port_t port)
{
+ assert(have_ip_lock(port));
+ assert(ip_active(port));
+
if (port->ip_pset != IPS_NULL) {
ipc_pset_t pset = port->ip_pset;
@@ -417,6 +426,9 @@ ipc_port_set_seqno(
{
ipc_mqueue_t mqueue;
+ assert(have_ip_lock(port));
+ assert(ip_active(port));
+
mqueue = ipc_port_lock_mqueue(port);
port->ip_seqno = seqno;
imq_unlock(mqueue);
@@ -435,6 +447,9 @@ ipc_port_set_protected_payload(ipc_port_t port, unsigned long payload)
{
ipc_mqueue_t mqueue;
+ assert(have_ip_lock(port));
+ assert(ip_active(port));
+
mqueue = ipc_port_lock_mqueue(port);
port->ip_protected_payload = payload;
ipc_port_flag_protected_payload_set(port);
@@ -454,6 +469,9 @@ ipc_port_clear_protected_payload(ipc_port_t port)
{
ipc_mqueue_t mqueue;
+ assert(have_ip_lock(port));
+ assert(ip_active(port));
+
mqueue = ipc_port_lock_mqueue(port);
ipc_port_flag_protected_payload_clear(port);
imq_unlock(mqueue);
@@ -474,6 +492,7 @@ ipc_port_clear_receiver(
{
ipc_pset_t pset;
+ assert(have_ip_lock(port));
assert(ip_active(port));
pset = port->ip_pset;
@@ -566,7 +585,7 @@ ipc_port_alloc(
if (kr != KERN_SUCCESS)
return kr;
- /* port is locked */
+ assert(have_ip_lock(port)); /* port is locked */
ipc_port_init(port, space, name);
@@ -604,7 +623,7 @@ ipc_port_alloc_name(
name, (ipc_object_t *) &port);
if (kr != KERN_SUCCESS)
return kr;
- /* port is locked */
+ assert(have_ip_lock(port)); /* port is locked */
ipc_port_init(port, space, name);
@@ -636,6 +655,7 @@ ipc_port_destroy(
ipc_thread_t sender;
ipc_port_request_t dnrequests;
+ assert(have_ip_lock(port));
assert(ip_active(port));
/* port->ip_receiver_name is garbage */
/* port->ip_receiver/port->ip_destination is garbage */
@@ -916,6 +936,7 @@ ipc_port_lookup_notify(
ipc_port_t port;
ipc_entry_t entry;
+ assert(have_is_lock(space));
assert(space->is_active);
entry = ipc_entry_lookup(space, name);
diff --git a/ipc/ipc_port.h b/ipc/ipc_port.h
index ade6967..7390950 100644
--- a/ipc/ipc_port.h
+++ b/ipc/ipc_port.h
@@ -119,6 +119,7 @@ struct ipc_port {
#define ip_lock(port) io_lock(&(port)->ip_object)
#define ip_lock_try(port) io_lock_try(&(port)->ip_object)
#define ip_unlock(port) io_unlock(&(port)->ip_object)
+#define have_ip_lock(port) have_io_lock(&(port)->ip_object)
#define ip_check_unlock(port) io_check_unlock(&(port)->ip_object)
#define ip_reference(port) io_reference(&(port)->ip_object)
#define ip_release(port) io_release(&(port)->ip_object)
diff --git a/ipc/ipc_right.c b/ipc/ipc_right.c
index 773b3b1..2e73cc3 100644
--- a/ipc/ipc_right.c
+++ b/ipc/ipc_right.c
@@ -114,6 +114,7 @@ ipc_right_reverse(
/* would switch on io_otype to handle multiple types of object */
+ assert(have_is_lock(space));
assert(space->is_active);
assert(io_otype(object) == IOT_PORT);
@@ -311,6 +312,7 @@ ipc_right_dncancel(
{
ipc_port_t dnrequest;
+ assert(have_is_write_lock(space));
assert(ip_active(port));
assert(port == (ipc_port_t) entry->ie_object);
diff --git a/ipc/ipc_space.h b/ipc/ipc_space.h
index a2aac40..fcc36b0 100644
--- a/ipc/ipc_space.h
+++ b/ipc/ipc_space.h
@@ -118,6 +118,10 @@ MACRO_END
#define is_write_to_read_lock(is) lock_write_to_read(&(is)->is_lock_data)
+#define have_is_lock(is) have_lock(&(is)->is_lock_data)
+#define have_is_read_lock(is) have_read_lock(&(is)->is_lock_data)
+#define have_is_write_lock(is) have_write_lock(&(is)->is_lock_data)
+
extern void ipc_space_reference(struct ipc_space *space);
extern void ipc_space_release(struct ipc_space *space);
@@ -147,6 +151,8 @@ ipc_entry_lookup(
ipc_entry_t entry;
assert(space->is_active);
+ assert(have_is_lock(space));
+
entry = rdxtree_lookup(&space->is_map, (rdxtree_key_t) name);
if (entry != IE_NULL
&& IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE)
@@ -177,6 +183,7 @@ ipc_entry_get(
ipc_entry_t free_entry;
assert(space->is_active);
+ assert(have_is_write_lock(space));
/* Get entry from the free list. */
free_entry = space->is_free_list;
@@ -233,6 +240,7 @@ ipc_entry_dealloc(
ipc_entry_t entry)
{
assert(space->is_active);
+ assert(have_is_write_lock(space));
assert(entry->ie_object == IO_NULL);
assert(entry->ie_request == 0);
@@ -267,6 +275,7 @@ ipc_reverse_insert(ipc_space_t space,
{
assert(space != IS_NULL);
assert(obj != IO_NULL);
+ assert(have_is_write_lock(space));
return (kern_return_t) rdxtree_insert(&space->is_reverse_map,
KEY(obj), entry);
}
@@ -279,6 +288,7 @@ ipc_reverse_remove(ipc_space_t space,
{
assert(space != IS_NULL);
assert(obj != IO_NULL);
+ assert(have_is_write_lock(space));
return rdxtree_remove(&space->is_reverse_map, KEY(obj));
}
@@ -288,6 +298,7 @@ static inline void
ipc_reverse_remove_all(ipc_space_t space)
{
assert(space != IS_NULL);
+ assert(have_is_write_lock(space));
rdxtree_remove_all(&space->is_reverse_map);
assert(space->is_reverse_map.height == 0);
assert(space->is_reverse_map.root == NULL);
@@ -302,6 +313,7 @@ ipc_reverse_lookup(ipc_space_t space,
{
assert(space != IS_NULL);
assert(obj != IO_NULL);
+ assert(have_is_lock(space));
return rdxtree_lookup(&space->is_reverse_map, KEY(obj));
}
diff --git a/vm/vm_fault.c b/vm/vm_fault.c
index 96f53fb..1024da6 100644
--- a/vm/vm_fault.c
+++ b/vm/vm_fault.c
@@ -129,6 +129,7 @@ vm_fault_cleanup(
vm_object_t object,
vm_page_t top_page)
{
+ assert(have_vm_object_lock(object));
vm_object_paging_end(object);
vm_object_unlock(object);
@@ -245,6 +246,8 @@ vm_fault_return_t vm_fault_page(
goto after_thread_block;
}
+ assert(have_vm_object_lock(first_object));
+
vm_stat_sample(SAMPLED_PC_VM_FAULTS_ANY);
vm_stat.faults++; /* needs lock XXX */
current_task()->faults++;
diff --git a/vm/vm_map.c b/vm/vm_map.c
index 1ebccac..57d931b 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -516,6 +516,8 @@ kern_return_t vm_map_find_entry(
vm_offset_t start;
vm_offset_t end;
+ assert(have_vm_map_lock(map));
+
/*
* Look for the first possible address;
* if there's already something at this
@@ -586,6 +588,7 @@ kern_return_t vm_map_find_entry(
*
* the map should be locked.
*/
+ assert(have_vm_map_lock(map));
*address = start;
@@ -903,6 +906,7 @@ kern_return_t vm_map_enter(
*
* the map should be locked.
*/
+ assert(have_vm_map_lock(map));
/*
* See whether we can avoid creating a new entry (and object) by
diff --git a/vm/vm_map.h b/vm/vm_map.h
index 9b31f90..c9f828a 100644
--- a/vm/vm_map.h
+++ b/vm/vm_map.h
@@ -358,6 +358,9 @@ MACRO_END
lock_set_recursive(&(map)->lock)
#define vm_map_lock_clear_recursive(map) \
lock_clear_recursive(&(map)->lock)
+#define have_vm_map_lock(map) have_lock(&(map)->lock)
+#define have_vm_map_read_lock(map) have_read_lock(&(map)->lock)
+#define have_vm_map_write_lock(map) have_write_lock(&(map)->lock)
/*
* Exported procedures that operate on vm_map_t.
diff --git a/vm/vm_object.c b/vm/vm_object.c
index c650b8c..6622c99 100644
--- a/vm/vm_object.c
+++ b/vm/vm_object.c
@@ -189,6 +189,8 @@ struct lock vm_object_cache_lock_data;
lock_try_write(&vm_object_cache_lock_data)
#define vm_object_cache_unlock() \
lock_write_done(&vm_object_cache_lock_data)
+#define have_vm_object_cache_lock() \
+ have_write_lock(&vm_object_cache_lock_data)
/*
* Number of physical pages referenced by cached objects.
@@ -514,7 +516,6 @@ void vm_object_deallocate(
* Destroy the object; the cache lock will
* be released in the process.
*/
-
vm_object_terminate(object);
/*
@@ -546,6 +547,9 @@ void vm_object_terminate(
vm_page_t p;
vm_object_t shadow_object;
+ assert(have_vm_object_lock(object));
+ assert(have_vm_object_cache_lock());
+
/*
* Make sure the object isn't already being terminated
*/
@@ -737,6 +741,8 @@ void vm_object_abort_activity(
{
vm_page_t p, old_p;
+ assert(have_vm_object_lock(object));
+
/*
* Abort all activity that would be waiting
* for a result on this memory object.
@@ -780,6 +786,7 @@ void vm_object_abort_activity(
object->pager_ready = TRUE;
vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
+ assert(have_vm_object_lock(object));
}
/*
@@ -878,6 +885,8 @@ void vm_object_deactivate_pages(
{
vm_page_t p;
+ assert(have_vm_object_lock(object));
+
struct rdxtree_iter iter;
rdxtree_for_each(&object->memt, &iter, p) {
vm_page_lock_queues();
@@ -1336,6 +1345,8 @@ kern_return_t vm_object_copy_call(
vm_object_t new_object;
vm_page_t p;
+ assert(have_vm_object_lock(src_object));
+
/*
* Create a memory object port to be associated
* with this new vm_object.
@@ -2171,6 +2182,7 @@ restart:
/*
* [At this point, the object must be locked]
*/
+ assert(have_vm_object_lock(object));
/*
* Wait for the work above to be done by the first
@@ -2300,6 +2312,8 @@ void vm_object_remove(
{
ipc_port_t port;
+ assert(have_vm_object_cache_lock());
+
if ((port = object->pager) != IP_NULL) {
if (ip_kotype(port) == IKOT_PAGER)
ipc_kobject_set(port, IKO_NULL,
@@ -2355,6 +2369,8 @@ void vm_object_collapse(
vm_page_t p, pp;
ipc_port_t old_name_port;
+ assert(have_vm_object_lock(object));
+
if (!vm_object_collapse_allowed)
return;
@@ -2723,6 +2739,9 @@ void vm_object_page_remove(
vm_offset_t end)
{
vm_page_t p;
+
+ assert(have_vm_object_lock(object));
+
struct rdxtree_iter iter;
rdxtree_for_each(&object->memt, &iter, p) {
if ((start <= p->offset) && (p->offset < end)) {
diff --git a/vm/vm_object.h b/vm/vm_object.h
index 142404a..5805a58 100644
--- a/vm/vm_object.h
+++ b/vm/vm_object.h
@@ -378,8 +378,9 @@ MACRO_END
#define vm_object_sleep(event, object, interruptible) \
thread_sleep_lock((event_t)(event), &(object)->Lock, \
(interruptible))
-#define vm_object_lock_taken(object) lock_taken(&(object)->Lock)
#endif /* VM_OBJECT_DEBUG */
+#define have_vm_object_lock(object) \
+ (object == NULL || have_write_lock(&(object)->Lock))
/*
* Page cache accounting.
diff --git a/vm/vm_page.h b/vm/vm_page.h
index 81ab3df..3fe6572 100644
--- a/vm/vm_page.h
+++ b/vm/vm_page.h
@@ -432,6 +432,7 @@ extern void vm_page_unwire(vm_page_t);
#define vm_page_lock_queues() lock_write(&vm_page_queue_lock)
#define vm_page_unlock_queues() lock_write_done(&vm_page_queue_lock)
+#define have_vm_page_queue_lock() have_write_lock(&vm_page_queue_lock)
#define VM_PAGE_QUEUES_REMOVE(mem) \
MACRO_BEGIN \
diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c
index 51a6a0d..5e8630f 100644
--- a/vm/vm_pageout.c
+++ b/vm/vm_pageout.c
@@ -427,6 +427,7 @@ vm_pageout_page(
boolean_t precious_clean;
assert(m->busy);
+ assert(have_vm_object_lock(m->object));
/*
* Cleaning but not flushing a clean precious page is a
diff --git a/vm/vm_resident.c b/vm/vm_resident.c
index 8b10e00..5959989 100644
--- a/vm/vm_resident.c
+++ b/vm/vm_resident.c
@@ -274,6 +274,7 @@ void vm_page_insert(
vm_object_t object,
vm_offset_t offset)
{
+ assert(have_vm_object_lock(object));
VM_PAGE_CHECK(mem);
if (mem->tabled)
@@ -337,6 +338,7 @@ void vm_page_replace(
struct vm_page *old;
void **slot;
+ assert(have_vm_object_lock(object));
VM_PAGE_CHECK(mem);
if (mem->tabled)
@@ -393,6 +395,7 @@ void vm_page_replace(
void vm_page_remove(
vm_page_t mem)
{
+ assert(have_vm_object_lock(mem->object));
assert(mem->tabled);
VM_PAGE_CHECK(mem);
@@ -425,6 +428,7 @@ vm_page_t vm_page_lookup(
vm_object_t object,
vm_offset_t offset)
{
+ assert(have_vm_object_lock(object));
return rdxtree_lookup(&object->memt, offset_key(offset));
}
@@ -441,6 +445,8 @@ void vm_page_rename(
vm_object_t new_object,
vm_offset_t new_offset)
{
+ assert(have_vm_object_lock(new_object));
+
/*
* Changes to mem->object require the page lock because
* the pageout daemon uses that lock to get the object.
@@ -534,6 +540,8 @@ boolean_t vm_page_convert(
struct vm_page *real_m, *fict_m, *old;
void **slot;
+ assert(have_vm_object_lock((*mp)->object));
+
fict_m = *mp;
assert(fict_m->fictitious);
@@ -731,6 +739,8 @@ vm_page_t vm_page_alloc(
{
vm_page_t mem;
+ assert(have_vm_object_lock(object));
+
mem = vm_page_grab(!object->internal);
if (mem == VM_PAGE_NULL)
return VM_PAGE_NULL;
@@ -753,6 +763,9 @@ vm_page_t vm_page_alloc(
void vm_page_free(
vm_page_t mem)
{
+ assert(have_vm_object_lock(mem->object));
+ assert(have_vm_page_queue_lock());
+
if (mem->tabled)
vm_page_remove(mem);
VM_PAGE_QUEUES_REMOVE(mem);
@@ -793,6 +806,8 @@ void vm_page_free(
void vm_page_wire(
vm_page_t mem)
{
+ assert(have_vm_object_lock(mem->object));
+ assert(have_vm_page_queue_lock());
VM_PAGE_CHECK(mem);
if (mem->wire_count == 0) {
@@ -814,6 +829,8 @@ void vm_page_wire(
void vm_page_unwire(
vm_page_t mem)
{
+ assert(have_vm_object_lock(mem->object));
+ assert(have_vm_page_queue_lock());
VM_PAGE_CHECK(mem);
if (--mem->wire_count == 0) {
@@ -837,6 +854,7 @@ void vm_page_unwire(
void vm_page_deactivate(
vm_page_t m)
{
+ assert(have_vm_page_queue_lock());
VM_PAGE_CHECK(m);
/*
@@ -871,6 +889,7 @@ void vm_page_deactivate(
void vm_page_activate(
vm_page_t m)
{
+ assert(have_vm_page_queue_lock());
VM_PAGE_CHECK(m);
if (m->inactive) {