summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJustus Winter <4winter@informatik.uni-hamburg.de>2015-08-16 02:18:47 +0200
committerJustus Winter <4winter@informatik.uni-hamburg.de>2015-08-28 15:47:56 +0200
commit787a6034170b59d55afe8d0a12cb0904f20cb74d (patch)
treed20fcc9b98882c7bb62500185cbfb08006fec26a
parent069a8a3decf5981cd420014384ead470eac8561c (diff)
codify locking contracts
Conflicts: vm/vm_page.h
-rw-r--r--ipc/ipc_mqueue.c7
-rw-r--r--ipc/ipc_mqueue.h3
-rw-r--r--ipc/ipc_object.h3
-rw-r--r--ipc/ipc_port.c25
-rw-r--r--ipc/ipc_port.h3
-rw-r--r--ipc/ipc_pset.h1
-rw-r--r--ipc/ipc_right.c2
-rw-r--r--ipc/ipc_space.h12
-rw-r--r--ipc/mach_msg.c19
-rw-r--r--kern/exception.c4
-rw-r--r--vm/vm_fault.c3
-rw-r--r--vm/vm_map.c4
-rw-r--r--vm/vm_map.h3
-rw-r--r--vm/vm_object.c19
-rw-r--r--vm/vm_object.h3
-rw-r--r--vm/vm_page.h1
-rw-r--r--vm/vm_pageout.c1
-rw-r--r--vm/vm_resident.c20
18 files changed, 129 insertions, 4 deletions
diff --git a/ipc/ipc_mqueue.c b/ipc/ipc_mqueue.c
index 9138aec..22d8f43 100644
--- a/ipc/ipc_mqueue.c
+++ b/ipc/ipc_mqueue.c
@@ -88,6 +88,9 @@ ipc_mqueue_move(
ipc_kmsg_t kmsg, next;
ipc_thread_t th;
+ assert(have_imq_lock(dest));
+ assert(have_imq_lock(source));
+
oldq = &source->imq_messages;
newq = &dest->imq_messages;
blockedq = &dest->imq_threads;
@@ -146,6 +149,8 @@ ipc_mqueue_changed(
{
ipc_thread_t th;
+ assert(have_imq_lock(mqueue));
+
while ((th = ipc_thread_dequeue(&mqueue->imq_threads)) != ITH_NULL) {
th->ith_state = mr;
thread_go(th);
@@ -535,6 +540,8 @@ ipc_mqueue_receive(
if (resume)
goto after_thread_block;
+ assert(have_imq_lock(mqueue));
+
for (;;) {
kmsg = ipc_kmsg_queue_first(kmsgs);
if (kmsg != IKM_NULL) {
diff --git a/ipc/ipc_mqueue.h b/ipc/ipc_mqueue.h
index e7d5670..761e67d 100644
--- a/ipc/ipc_mqueue.h
+++ b/ipc/ipc_mqueue.h
@@ -54,6 +54,9 @@ typedef struct ipc_mqueue {
#define imq_lock(mq) lock_write(&(mq)->imq_lock_data)
#define imq_lock_try(mq) lock_try_write(&(mq)->imq_lock_data)
#define imq_unlock(mq) lock_write_done(&(mq)->imq_lock_data)
+#define have_imq_lock(mq) have_write_lock(&(mq)->imq_lock_data)
+#define imq_lock_surrender(mq) lock_write_surrender(&(mq)->imq_lock_data)
+#define imq_lock_steal(mq) lock_write_steal(&(mq)->imq_lock_data)
extern void
ipc_mqueue_init(ipc_mqueue_t);
diff --git a/ipc/ipc_object.h b/ipc/ipc_object.h
index 8504a23..be85dd6 100644
--- a/ipc/ipc_object.h
+++ b/ipc/ipc_object.h
@@ -87,8 +87,11 @@ extern struct kmem_cache ipc_object_caches[IOT_NUMBER];
#define io_lock_init(io) lock_init(&(io)->io_lock_data, TRUE)
#define io_lock(io) lock_write(&(io)->io_lock_data)
+#define io_lock_surrender(io) lock_write_surrender(&(io)->io_lock_data)
+#define io_lock_steal(io) lock_write_steal(&(io)->io_lock_data)
#define io_lock_try(io) lock_try_write(&(io)->io_lock_data)
#define io_unlock(io) lock_write_done(&(io)->io_lock_data)
+#define have_io_lock(io) have_write_lock(&(io)->io_lock_data)
#define io_check_unlock(io) \
MACRO_BEGIN \
diff --git a/ipc/ipc_port.c b/ipc/ipc_port.c
index bc9ea58..2a45fe0 100644
--- a/ipc/ipc_port.c
+++ b/ipc/ipc_port.c
@@ -103,6 +103,7 @@ ipc_port_dnrequest(
ipc_port_request_t ipr, table;
ipc_port_request_index_t index;
+ assert(have_ip_lock(port));
assert(ip_active(port));
assert(name != MACH_PORT_NULL);
assert(soright != IP_NULL);
@@ -147,6 +148,7 @@ ipc_port_dngrow(ipc_port_t port)
ipc_table_size_t its;
ipc_port_request_t otable, ntable;
+ assert(have_ip_lock(port));
assert(ip_active(port));
otable = port->ip_dnrequests;
@@ -240,6 +242,7 @@ ipc_port_dncancel(
ipc_port_request_t ipr, table;
ipc_port_t dnrequest;
+ assert(have_ip_lock(port));
assert(ip_active(port));
assert(name != MACH_PORT_NULL);
assert(index != 0);
@@ -280,6 +283,7 @@ ipc_port_pdrequest(
{
ipc_port_t previous;
+ assert(have_ip_lock(port));
assert(ip_active(port));
previous = port->ip_pdrequest;
@@ -311,6 +315,7 @@ ipc_port_nsrequest(
ipc_port_t previous;
mach_port_mscount_t mscount;
+ assert(have_ip_lock(port));
assert(ip_active(port));
previous = port->ip_nsrequest;
@@ -344,6 +349,7 @@ ipc_port_set_qlimit(
ipc_port_t port,
mach_port_msgcount_t qlimit)
{
+ assert(have_ip_lock(port));
assert(ip_active(port));
/* wake up senders allowed by the new qlimit */
@@ -383,6 +389,9 @@ ipc_port_set_qlimit(
ipc_mqueue_t
ipc_port_lock_mqueue(ipc_port_t port)
{
+ assert(have_ip_lock(port));
+ assert(ip_active(port));
+
if (port->ip_pset != IPS_NULL) {
ipc_pset_t pset = port->ip_pset;
@@ -417,6 +426,9 @@ ipc_port_set_seqno(
{
ipc_mqueue_t mqueue;
+ assert(have_ip_lock(port));
+ assert(ip_active(port));
+
mqueue = ipc_port_lock_mqueue(port);
port->ip_seqno = seqno;
imq_unlock(mqueue);
@@ -435,6 +447,9 @@ ipc_port_set_protected_payload(ipc_port_t port, unsigned long payload)
{
ipc_mqueue_t mqueue;
+ assert(have_ip_lock(port));
+ assert(ip_active(port));
+
mqueue = ipc_port_lock_mqueue(port);
port->ip_protected_payload = payload;
ipc_port_flag_protected_payload_set(port);
@@ -454,6 +469,9 @@ ipc_port_clear_protected_payload(ipc_port_t port)
{
ipc_mqueue_t mqueue;
+ assert(have_ip_lock(port));
+ assert(ip_active(port));
+
mqueue = ipc_port_lock_mqueue(port);
ipc_port_flag_protected_payload_clear(port);
imq_unlock(mqueue);
@@ -474,6 +492,7 @@ ipc_port_clear_receiver(
{
ipc_pset_t pset;
+ assert(have_ip_lock(port));
assert(ip_active(port));
pset = port->ip_pset;
@@ -566,7 +585,7 @@ ipc_port_alloc(
if (kr != KERN_SUCCESS)
return kr;
- /* port is locked */
+ assert(have_ip_lock(port)); /* port is locked */
ipc_port_init(port, space, name);
@@ -604,7 +623,7 @@ ipc_port_alloc_name(
name, (ipc_object_t *) &port);
if (kr != KERN_SUCCESS)
return kr;
- /* port is locked */
+ assert(have_ip_lock(port)); /* port is locked */
ipc_port_init(port, space, name);
@@ -636,6 +655,7 @@ ipc_port_destroy(
ipc_thread_t sender;
ipc_port_request_t dnrequests;
+ assert(have_ip_lock(port));
assert(ip_active(port));
/* port->ip_receiver_name is garbage */
/* port->ip_receiver/port->ip_destination is garbage */
@@ -916,6 +936,7 @@ ipc_port_lookup_notify(
ipc_port_t port;
ipc_entry_t entry;
+ assert(have_is_lock(space));
assert(space->is_active);
entry = ipc_entry_lookup(space, name);
diff --git a/ipc/ipc_port.h b/ipc/ipc_port.h
index 97e6068..3595dfd 100644
--- a/ipc/ipc_port.h
+++ b/ipc/ipc_port.h
@@ -119,8 +119,11 @@ struct ipc_port {
#define ip_active(port) io_active(&(port)->ip_object)
#define ip_lock_init(port) io_lock_init(&(port)->ip_object)
#define ip_lock(port) io_lock(&(port)->ip_object)
+#define ip_lock_surrender(port) io_lock_surrender(&(port)->ip_object)
+#define ip_lock_steal(port) io_lock_steal(&(port)->ip_object)
#define ip_lock_try(port) io_lock_try(&(port)->ip_object)
#define ip_unlock(port) io_unlock(&(port)->ip_object)
+#define have_ip_lock(port) have_io_lock(&(port)->ip_object)
#define ip_check_unlock(port) io_check_unlock(&(port)->ip_object)
#define ip_reference(port) io_reference(&(port)->ip_object)
#define ip_release(port) io_release(&(port)->ip_object)
diff --git a/ipc/ipc_pset.h b/ipc/ipc_pset.h
index e9936fe..11c99c3 100644
--- a/ipc/ipc_pset.h
+++ b/ipc/ipc_pset.h
@@ -62,6 +62,7 @@ typedef struct ipc_pset {
#define ips_lock_try(pset) io_lock_try(&(pset)->ips_object)
#define ips_unlock(pset) io_unlock(&(pset)->ips_object)
#define ips_check_unlock(pset) io_check_unlock(&(pset)->ips_object)
+#define have_ips_lock(pset) have_io_lock(&(pset)->ips_object)
#define ips_reference(pset) io_reference(&(pset)->ips_object)
#define ips_release(pset) io_release(&(pset)->ips_object)
diff --git a/ipc/ipc_right.c b/ipc/ipc_right.c
index 773b3b1..2e73cc3 100644
--- a/ipc/ipc_right.c
+++ b/ipc/ipc_right.c
@@ -114,6 +114,7 @@ ipc_right_reverse(
/* would switch on io_otype to handle multiple types of object */
+ assert(have_is_lock(space));
assert(space->is_active);
assert(io_otype(object) == IOT_PORT);
@@ -311,6 +312,7 @@ ipc_right_dncancel(
{
ipc_port_t dnrequest;
+ assert(have_is_write_lock(space));
assert(ip_active(port));
assert(port == (ipc_port_t) entry->ie_object);
diff --git a/ipc/ipc_space.h b/ipc/ipc_space.h
index a2aac40..fcc36b0 100644
--- a/ipc/ipc_space.h
+++ b/ipc/ipc_space.h
@@ -118,6 +118,10 @@ MACRO_END
#define is_write_to_read_lock(is) lock_write_to_read(&(is)->is_lock_data)
+#define have_is_lock(is) have_lock(&(is)->is_lock_data)
+#define have_is_read_lock(is) have_read_lock(&(is)->is_lock_data)
+#define have_is_write_lock(is) have_write_lock(&(is)->is_lock_data)
+
extern void ipc_space_reference(struct ipc_space *space);
extern void ipc_space_release(struct ipc_space *space);
@@ -147,6 +151,8 @@ ipc_entry_lookup(
ipc_entry_t entry;
assert(space->is_active);
+ assert(have_is_lock(space));
+
entry = rdxtree_lookup(&space->is_map, (rdxtree_key_t) name);
if (entry != IE_NULL
&& IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE)
@@ -177,6 +183,7 @@ ipc_entry_get(
ipc_entry_t free_entry;
assert(space->is_active);
+ assert(have_is_write_lock(space));
/* Get entry from the free list. */
free_entry = space->is_free_list;
@@ -233,6 +240,7 @@ ipc_entry_dealloc(
ipc_entry_t entry)
{
assert(space->is_active);
+ assert(have_is_write_lock(space));
assert(entry->ie_object == IO_NULL);
assert(entry->ie_request == 0);
@@ -267,6 +275,7 @@ ipc_reverse_insert(ipc_space_t space,
{
assert(space != IS_NULL);
assert(obj != IO_NULL);
+ assert(have_is_write_lock(space));
return (kern_return_t) rdxtree_insert(&space->is_reverse_map,
KEY(obj), entry);
}
@@ -279,6 +288,7 @@ ipc_reverse_remove(ipc_space_t space,
{
assert(space != IS_NULL);
assert(obj != IO_NULL);
+ assert(have_is_write_lock(space));
return rdxtree_remove(&space->is_reverse_map, KEY(obj));
}
@@ -288,6 +298,7 @@ static inline void
ipc_reverse_remove_all(ipc_space_t space)
{
assert(space != IS_NULL);
+ assert(have_is_write_lock(space));
rdxtree_remove_all(&space->is_reverse_map);
assert(space->is_reverse_map.height == 0);
assert(space->is_reverse_map.root == NULL);
@@ -302,6 +313,7 @@ ipc_reverse_lookup(ipc_space_t space,
{
assert(space != IS_NULL);
assert(obj != IO_NULL);
+ assert(have_is_lock(space));
return rdxtree_lookup(&space->is_reverse_map, KEY(obj));
}
diff --git a/ipc/mach_msg.c b/ipc/mach_msg.c
index 371d725..6bd0d29 100644
--- a/ipc/mach_msg.c
+++ b/ipc/mach_msg.c
@@ -742,6 +742,8 @@ mach_msg_trap(
* and not abort when we try to lock dest_mqueue.
*/
+ assert(have_ip_lock(dest_port));
+ assert(have_imq_lock(rcv_mqueue));
assert(ip_active(dest_port));
assert(dest_port->ip_receiver != ipc_space_kernel);
assert((dest_port->ip_msgcount < dest_port->ip_qlimit) ||
@@ -791,19 +793,30 @@ mach_msg_trap(
self->ith_object = rcv_object;
self->ith_mqueue = rcv_mqueue;
+ ip_lock_surrender(dest_port);
+ imq_lock_surrender(rcv_mqueue);
+ imq_lock_surrender(dest_mqueue);
+
if ((receiver->swap_func == (void (*)()) mach_msg_continue) &&
thread_handoff(self, mach_msg_continue, receiver)) {
assert(current_thread() == receiver);
+ ip_lock_steal(dest_port);
+ imq_lock_steal(rcv_mqueue);
+ imq_lock_steal(dest_mqueue);
/*
* We can use the optimized receive code,
* because the receiver is using no options.
*/
+
} else if ((receiver->swap_func ==
(void (*)()) exception_raise_continue) &&
thread_handoff(self, mach_msg_continue, receiver)) {
counter(c_mach_msg_trap_block_exc++);
assert(current_thread() == receiver);
+ ip_lock_steal(dest_port);
+ imq_lock_steal(rcv_mqueue);
+ imq_lock_steal(dest_mqueue);
/*
* We are a reply message coming back through
@@ -830,6 +843,9 @@ mach_msg_trap(
} else if ((send_size <= receiver->ith_msize) &&
thread_handoff(self, mach_msg_continue, receiver)) {
assert(current_thread() == receiver);
+ ip_lock_steal(dest_port);
+ imq_lock_steal(rcv_mqueue);
+ imq_lock_steal(dest_mqueue);
if ((receiver->swap_func ==
(void (*)()) mach_msg_receive_continue) &&
@@ -875,6 +891,9 @@ mach_msg_trap(
* The receiver can't accept the message,
* or we can't switch to the receiver.
*/
+ ip_lock_steal(dest_port);
+ imq_lock_steal(rcv_mqueue);
+ imq_lock_steal(dest_mqueue);
imq_unlock(dest_mqueue);
goto abort_send_receive;
diff --git a/kern/exception.c b/kern/exception.c
index 63a63d6..5338274 100644
--- a/kern/exception.c
+++ b/kern/exception.c
@@ -454,6 +454,8 @@ exception_raise(
(sizeof(struct mach_exception) <= receiver->ith_msize) &&
((receiver->ith_option & MACH_RCV_NOTIFY) == 0))) ||
!thread_handoff(self, exception_raise_continue, receiver)) {
+ imq_lock_steal(reply_mqueue);
+ imq_lock_steal(dest_mqueue);
imq_unlock(reply_mqueue);
imq_unlock(dest_mqueue);
goto slow_exception_raise;
@@ -461,6 +463,8 @@ exception_raise(
counter(c_exception_raise_block++);
assert(current_thread() == receiver);
+ imq_lock_steal(reply_mqueue);
+ imq_lock_steal(dest_mqueue);
/*
* We need to finish preparing self for its
diff --git a/vm/vm_fault.c b/vm/vm_fault.c
index 101ebce..40d1cbc 100644
--- a/vm/vm_fault.c
+++ b/vm/vm_fault.c
@@ -127,6 +127,7 @@ vm_fault_cleanup(
vm_object_t object,
vm_page_t top_page)
{
+ assert(have_vm_object_lock(object));
vm_object_paging_end(object);
vm_object_unlock(object);
@@ -254,6 +255,8 @@ vm_fault_return_t vm_fault_page(
goto after_thread_block;
}
+ assert(have_vm_object_lock(first_object));
+
vm_stat_sample(SAMPLED_PC_VM_FAULTS_ANY);
vm_stat.faults++; /* needs lock XXX */
current_task()->faults++;
diff --git a/vm/vm_map.c b/vm/vm_map.c
index 9098dfd..5099283 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -531,6 +531,8 @@ kern_return_t vm_map_find_entry(
vm_offset_t start;
vm_offset_t end;
+ assert(have_vm_map_lock(map));
+
/*
* Look for the first possible address;
* if there's already something at this
@@ -601,6 +603,7 @@ kern_return_t vm_map_find_entry(
*
* the map should be locked.
*/
+ assert(have_vm_map_lock(map));
*address = start;
@@ -918,6 +921,7 @@ kern_return_t vm_map_enter(
*
* the map should be locked.
*/
+ assert(have_vm_map_lock(map));
/*
* See whether we can avoid creating a new entry (and object) by
diff --git a/vm/vm_map.h b/vm/vm_map.h
index 9b31f90..c9f828a 100644
--- a/vm/vm_map.h
+++ b/vm/vm_map.h
@@ -358,6 +358,9 @@ MACRO_END
lock_set_recursive(&(map)->lock)
#define vm_map_lock_clear_recursive(map) \
lock_clear_recursive(&(map)->lock)
+#define have_vm_map_lock(map) have_lock(&(map)->lock)
+#define have_vm_map_read_lock(map) have_read_lock(&(map)->lock)
+#define have_vm_map_write_lock(map) have_write_lock(&(map)->lock)
/*
* Exported procedures that operate on vm_map_t.
diff --git a/vm/vm_object.c b/vm/vm_object.c
index 4b31482..836f5ef 100644
--- a/vm/vm_object.c
+++ b/vm/vm_object.c
@@ -188,6 +188,8 @@ struct lock vm_object_cache_lock_data;
lock_try_write(&vm_object_cache_lock_data)
#define vm_object_cache_unlock() \
lock_write_done(&vm_object_cache_lock_data)
+#define have_vm_object_cache_lock() \
+ have_write_lock(&vm_object_cache_lock_data)
/*
* Number of physical pages referenced by cached objects.
@@ -516,7 +518,6 @@ void vm_object_deallocate(
* Destroy the object; the cache lock will
* be released in the process.
*/
-
vm_object_terminate(object);
/*
@@ -554,6 +555,9 @@ void vm_object_terminate(
vm_external_t existence_info;
#endif /* MACH_PAGEMAP */
+ assert(have_vm_object_lock(object));
+ assert(have_vm_object_cache_lock());
+
/*
* Make sure the object isn't already being terminated
*/
@@ -754,6 +758,8 @@ void vm_object_abort_activity(
vm_page_t p;
vm_page_t next;
+ assert(have_vm_object_lock(object));
+
/*
* Abort all activity that would be waiting
* for a result on this memory object.
@@ -791,6 +797,7 @@ void vm_object_abort_activity(
object->pager_ready = TRUE;
vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
+ assert(have_vm_object_lock(object));
}
/*
@@ -889,6 +896,7 @@ void vm_object_deactivate_pages(
{
vm_page_t p;
+ assert(have_vm_object_lock(object));
queue_iterate(&object->memq, p, vm_page_t, listq) {
vm_page_lock_queues();
if (!p->busy)
@@ -1348,6 +1356,8 @@ kern_return_t vm_object_copy_call(
vm_object_t new_object;
vm_page_t p;
+ assert(have_vm_object_lock(src_object));
+
/*
* Create a memory object port to be associated
* with this new vm_object.
@@ -2196,6 +2206,7 @@ restart:
/*
* [At this point, the object must be locked]
*/
+ assert(have_vm_object_lock(object));
/*
* Wait for the work above to be done by the first
@@ -2325,6 +2336,8 @@ void vm_object_remove(
{
ipc_port_t port;
+ assert(have_vm_object_cache_lock());
+
if ((port = object->pager) != IP_NULL) {
if (ip_kotype(port) == IKOT_PAGER)
ipc_kobject_set(port, IKO_NULL,
@@ -2380,6 +2393,8 @@ void vm_object_collapse(
vm_page_t p, pp;
ipc_port_t old_name_port;
+ assert(have_vm_object_lock(object));
+
if (!vm_object_collapse_allowed)
return;
@@ -2729,6 +2744,8 @@ void vm_object_page_remove(
{
vm_page_t p, next;
+ assert(have_vm_object_lock(object));
+
/*
* One and two page removals are most popular.
* The factor of 16 here is somewhat arbitrary.
diff --git a/vm/vm_object.h b/vm/vm_object.h
index 3c9055f..01ce4ad 100644
--- a/vm/vm_object.h
+++ b/vm/vm_object.h
@@ -376,8 +376,9 @@ MACRO_END
#define vm_object_sleep(event, object, interruptible) \
thread_sleep_lock((event_t)(event), &(object)->Lock, \
(interruptible))
-#define vm_object_lock_taken(object) lock_taken(&(object)->Lock)
#endif /* VM_OBJECT_DEBUG */
+#define have_vm_object_lock(object) \
+ (object == NULL || have_write_lock(&(object)->Lock))
/*
* Page cache accounting.
diff --git a/vm/vm_page.h b/vm/vm_page.h
index 90599a1..dd571e2 100644
--- a/vm/vm_page.h
+++ b/vm/vm_page.h
@@ -293,6 +293,7 @@ extern unsigned int vm_page_info(
#define vm_page_lock_queues() lock_write(&vm_page_queue_lock)
#define vm_page_unlock_queues() lock_write_done(&vm_page_queue_lock)
+#define have_vm_page_queue_lock() have_write_lock(&vm_page_queue_lock)
#define VM_PAGE_QUEUES_REMOVE(mem) \
MACRO_BEGIN \
diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c
index b676c7b..29cfca7 100644
--- a/vm/vm_pageout.c
+++ b/vm/vm_pageout.c
@@ -424,6 +424,7 @@ vm_pageout_page(
boolean_t precious_clean;
assert(m->busy);
+ assert(have_vm_object_lock(m->object));
/*
* Cleaning but not flushing a clean precious page is a
diff --git a/vm/vm_resident.c b/vm/vm_resident.c
index 427c8f5..4e1eed5 100644
--- a/vm/vm_resident.c
+++ b/vm/vm_resident.c
@@ -489,6 +489,7 @@ void vm_page_insert(
{
vm_page_bucket_t *bucket;
+ assert(have_vm_object_lock(object));
VM_PAGE_CHECK(mem);
if (mem->tabled)
@@ -561,6 +562,7 @@ void vm_page_replace(
{
vm_page_bucket_t *bucket;
+ assert(have_vm_object_lock(object));
VM_PAGE_CHECK(mem);
if (mem->tabled)
@@ -651,6 +653,7 @@ void vm_page_remove(
vm_page_bucket_t *bucket;
vm_page_t this;
+ assert(have_vm_object_lock(mem->object));
assert(mem->tabled);
VM_PAGE_CHECK(mem);
@@ -710,6 +713,8 @@ vm_page_t vm_page_lookup(
vm_page_t mem;
vm_page_bucket_t *bucket;
+ assert(have_vm_object_lock(object));
+
/*
* Search the hash table for this object/offset pair
*/
@@ -739,6 +744,8 @@ void vm_page_rename(
vm_object_t new_object,
vm_offset_t new_offset)
{
+ assert(have_vm_object_lock(new_object));
+
/*
* Changes to mem->object require the page lock because
* the pageout daemon uses that lock to get the object.
@@ -844,6 +851,8 @@ boolean_t vm_page_convert(
{
vm_page_t real_m;
+ assert(have_vm_object_lock(m->object));
+
real_m = vm_page_grab(external);
if (real_m == VM_PAGE_NULL)
return FALSE;
@@ -1259,6 +1268,8 @@ vm_page_t vm_page_alloc(
{
vm_page_t mem;
+ assert(have_vm_object_lock(object));
+
mem = vm_page_grab(!object->internal);
if (mem == VM_PAGE_NULL)
return VM_PAGE_NULL;
@@ -1281,6 +1292,9 @@ vm_page_t vm_page_alloc(
void vm_page_free(
vm_page_t mem)
{
+ assert(have_vm_object_lock(mem->object));
+ assert(have_vm_page_queue_lock());
+
if (mem->free)
panic("vm_page_free");
@@ -1332,6 +1346,8 @@ void vm_page_free(
void vm_page_wire(
vm_page_t mem)
{
+ assert(have_vm_object_lock(mem->object));
+ assert(have_vm_page_queue_lock());
VM_PAGE_CHECK(mem);
if (mem->wire_count == 0) {
@@ -1353,6 +1369,8 @@ void vm_page_wire(
void vm_page_unwire(
vm_page_t mem)
{
+ assert(have_vm_object_lock(mem->object));
+ assert(have_vm_page_queue_lock());
VM_PAGE_CHECK(mem);
if (--mem->wire_count == 0) {
@@ -1376,6 +1394,7 @@ void vm_page_unwire(
void vm_page_deactivate(
vm_page_t m)
{
+ assert(have_vm_page_queue_lock());
VM_PAGE_CHECK(m);
/*
@@ -1410,6 +1429,7 @@ void vm_page_deactivate(
void vm_page_activate(
vm_page_t m)
{
+ assert(have_vm_page_queue_lock());
VM_PAGE_CHECK(m);
if (m->inactive) {