summaryrefslogtreecommitdiff
path: root/vm/vm_fault.c
diff options
context:
space:
mode:
Diffstat (limited to 'vm/vm_fault.c')
-rw-r--r--vm/vm_fault.c106
1 files changed, 46 insertions, 60 deletions
diff --git a/vm/vm_fault.c b/vm/vm_fault.c
index d9c3d7b..686156c 100644
--- a/vm/vm_fault.c
+++ b/vm/vm_fault.c
@@ -123,9 +123,9 @@ void vm_fault_init(void)
* "object" must be locked.
*/
void
-vm_fault_cleanup(object, top_page)
- vm_object_t object;
- vm_page_t top_page;
+vm_fault_cleanup(
+ vm_object_t object,
+ vm_page_t top_page)
{
vm_object_paging_end(object);
vm_object_unlock(object);
@@ -202,27 +202,23 @@ vm_fault_cleanup(object, top_page)
* The "result_page" is also left busy. It is not removed
* from the pageout queues.
*/
-vm_fault_return_t vm_fault_page(first_object, first_offset,
- fault_type, must_be_resident, interruptible,
- protection,
- result_page, top_page,
- resume, continuation)
+vm_fault_return_t vm_fault_page(
/* Arguments: */
- vm_object_t first_object; /* Object to begin search */
- vm_offset_t first_offset; /* Offset into object */
- vm_prot_t fault_type; /* What access is requested */
- boolean_t must_be_resident;/* Must page be resident? */
- boolean_t interruptible; /* May fault be interrupted? */
+ vm_object_t first_object, /* Object to begin search */
+ vm_offset_t first_offset, /* Offset into object */
+ vm_prot_t fault_type, /* What access is requested */
+ boolean_t must_be_resident,/* Must page be resident? */
+ boolean_t interruptible, /* May fault be interrupted? */
/* Modifies in place: */
- vm_prot_t *protection; /* Protection for mapping */
+ vm_prot_t *protection, /* Protection for mapping */
/* Returns: */
- vm_page_t *result_page; /* Page found, if successful */
- vm_page_t *top_page; /* Page in top object, if
+ vm_page_t *result_page, /* Page found, if successful */
+ vm_page_t *top_page, /* Page in top object, if
* not result_page.
*/
/* More arguments: */
- boolean_t resume; /* We are restarting. */
- void (*continuation)(); /* Continuation for blocking. */
+ boolean_t resume, /* We are restarting. */
+ void (*continuation)()) /* Continuation for blocking. */
{
vm_page_t m;
vm_object_t object;
@@ -1149,14 +1145,13 @@ vm_fault_continue(void)
/*NOTREACHED*/
}
-kern_return_t vm_fault(map, vaddr, fault_type, change_wiring,
- resume, continuation)
- vm_map_t map;
- vm_offset_t vaddr;
- vm_prot_t fault_type;
- boolean_t change_wiring;
- boolean_t resume;
- void (*continuation)();
+kern_return_t vm_fault(
+ vm_map_t map,
+ vm_offset_t vaddr,
+ vm_prot_t fault_type,
+ boolean_t change_wiring,
+ boolean_t resume,
+ void (*continuation)())
{
vm_map_version_t version; /* Map version for verificiation */
boolean_t wired; /* Should mapping be wired down? */
@@ -1500,9 +1495,9 @@ kern_return_t vm_fault(map, vaddr, fault_type, change_wiring,
*
* Wire down a range of virtual addresses in a map.
*/
-void vm_fault_wire(map, entry)
- vm_map_t map;
- vm_map_entry_t entry;
+void vm_fault_wire(
+ vm_map_t map,
+ vm_map_entry_t entry)
{
vm_offset_t va;
@@ -1536,9 +1531,9 @@ void vm_fault_wire(map, entry)
*
* Unwire a range of virtual addresses in a map.
*/
-void vm_fault_unwire(map, entry)
- vm_map_t map;
- vm_map_entry_t entry;
+void vm_fault_unwire(
+ vm_map_t map,
+ vm_map_entry_t entry)
{
vm_offset_t va;
pmap_t pmap;
@@ -1625,10 +1620,10 @@ void vm_fault_unwire(map, entry)
* other than the common case will return KERN_FAILURE, and the caller
* is expected to call vm_fault().
*/
-kern_return_t vm_fault_wire_fast(map, va, entry)
- vm_map_t map;
- vm_offset_t va;
- vm_map_entry_t entry;
+kern_return_t vm_fault_wire_fast(
+ vm_map_t map,
+ vm_offset_t va,
+ vm_map_entry_t entry)
{
vm_object_t object;
vm_offset_t offset;
@@ -1774,9 +1769,9 @@ kern_return_t vm_fault_wire_fast(map, va, entry)
* Release a page used by vm_fault_copy.
*/
-void vm_fault_copy_cleanup(page, top_page)
- vm_page_t page;
- vm_page_t top_page;
+void vm_fault_copy_cleanup(
+ vm_page_t page,
+ vm_page_t top_page)
{
vm_object_t object = page->object;
@@ -1817,23 +1812,14 @@ void vm_fault_copy_cleanup(page, top_page)
* requested.
*/
kern_return_t vm_fault_copy(
- src_object,
- src_offset,
- src_size,
- dst_object,
- dst_offset,
- dst_map,
- dst_version,
- interruptible
- )
- vm_object_t src_object;
- vm_offset_t src_offset;
- vm_size_t *src_size; /* INOUT */
- vm_object_t dst_object;
- vm_offset_t dst_offset;
- vm_map_t dst_map;
- vm_map_version_t *dst_version;
- boolean_t interruptible;
+ vm_object_t src_object,
+ vm_offset_t src_offset,
+ vm_size_t *src_size, /* INOUT */
+ vm_object_t dst_object,
+ vm_offset_t dst_offset,
+ vm_map_t dst_map,
+ vm_map_version_t *dst_version,
+ boolean_t interruptible)
{
vm_page_t result_page;
vm_prot_t prot;
@@ -2014,10 +2000,10 @@ kern_return_t vm_fault_copy(
* XXX Untested. Also unused. Eventually, this technology
* could be used in vm_fault_copy() to advantage.
*/
-vm_fault_return_t vm_fault_page_overwrite(dst_object, dst_offset, result_page)
- vm_object_t dst_object;
- vm_offset_t dst_offset;
- vm_page_t *result_page; /* OUT */
+vm_fault_return_t vm_fault_page_overwrite(
+ vm_object_t dst_object,
+ vm_offset_t dst_offset,
+ vm_page_t *result_page) /* OUT */
{
vm_page_t dst_page;