summaryrefslogtreecommitdiff
path: root/vm
diff options
context:
space:
mode:
Diffstat (limited to 'vm')
-rw-r--r--vm/memory_object.c138
-rw-r--r--vm/vm_debug.c31
-rw-r--r--vm/vm_external.c14
-rw-r--r--vm/vm_fault.c106
-rw-r--r--vm/vm_kern.c180
-rw-r--r--vm/vm_map.c342
-rw-r--r--vm/vm_pageout.c20
-rw-r--r--vm/vm_user.c117
8 files changed, 454 insertions, 494 deletions
diff --git a/vm/memory_object.c b/vm/memory_object.c
index 024856d..097ed23 100644
--- a/vm/memory_object.c
+++ b/vm/memory_object.c
@@ -82,16 +82,15 @@ decl_simple_lock_data(,memory_manager_default_lock)
* argument conversion. Explicit deallocation is necessary.
*/
-kern_return_t memory_object_data_supply(object, offset, data_copy, data_cnt,
- lock_value, precious, reply_to, reply_to_type)
- vm_object_t object;
- vm_offset_t offset;
- vm_map_copy_t data_copy;
- unsigned int data_cnt;
- vm_prot_t lock_value;
- boolean_t precious;
- ipc_port_t reply_to;
- mach_msg_type_name_t reply_to_type;
+kern_return_t memory_object_data_supply(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_map_copy_t data_copy,
+ unsigned int data_cnt,
+ vm_prot_t lock_value,
+ boolean_t precious,
+ ipc_port_t reply_to,
+ mach_msg_type_name_t reply_to_type)
{
kern_return_t result = KERN_SUCCESS;
vm_offset_t error_offset = 0;
@@ -303,29 +302,26 @@ retry_lookup:
return(result);
}
-
/*
* If successful, destroys the map copy object.
*/
-kern_return_t memory_object_data_provided(object, offset, data, data_cnt,
- lock_value)
- vm_object_t object;
- vm_offset_t offset;
- pointer_t data;
- unsigned int data_cnt;
- vm_prot_t lock_value;
+kern_return_t memory_object_data_provided(
+ vm_object_t object,
+ vm_offset_t offset,
+ pointer_t data,
+ unsigned int data_cnt,
+ vm_prot_t lock_value)
{
return memory_object_data_supply(object, offset, (vm_map_copy_t) data,
data_cnt, lock_value, FALSE, IP_NULL,
0);
}
-
-kern_return_t memory_object_data_error(object, offset, size, error_value)
- vm_object_t object;
- vm_offset_t offset;
- vm_size_t size;
- kern_return_t error_value;
+kern_return_t memory_object_data_error(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_size_t size,
+ kern_return_t error_value)
{
if (object == VM_OBJECT_NULL)
return(KERN_INVALID_ARGUMENT);
@@ -361,10 +357,10 @@ kern_return_t memory_object_data_error(object, offset, size, error_value)
return(KERN_SUCCESS);
}
-kern_return_t memory_object_data_unavailable(object, offset, size)
- vm_object_t object;
- vm_offset_t offset;
- vm_size_t size;
+kern_return_t memory_object_data_unavailable(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_size_t size)
{
#if MACH_PAGEMAP
vm_external_t existence_info = VM_EXTERNAL_NULL;
@@ -444,12 +440,11 @@ kern_return_t memory_object_data_unavailable(object, offset, size)
#define MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN 2
#define MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN 3
-memory_object_lock_result_t memory_object_lock_page(m, should_return,
- should_flush, prot)
- vm_page_t m;
- memory_object_return_t should_return;
- boolean_t should_flush;
- vm_prot_t prot;
+memory_object_lock_result_t memory_object_lock_page(
+ vm_page_t m,
+ memory_object_return_t should_return,
+ boolean_t should_flush,
+ vm_prot_t prot)
{
/*
* Don't worry about pages for which the kernel
@@ -647,17 +642,15 @@ memory_object_lock_result_t memory_object_lock_page(m, should_return,
*/
kern_return_t
-memory_object_lock_request(object, offset, size,
- should_return, should_flush, prot,
- reply_to, reply_to_type)
- vm_object_t object;
- vm_offset_t offset;
- vm_size_t size;
- memory_object_return_t should_return;
- boolean_t should_flush;
- vm_prot_t prot;
- ipc_port_t reply_to;
- mach_msg_type_name_t reply_to_type;
+memory_object_lock_request(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_size_t size,
+ memory_object_return_t should_return,
+ boolean_t should_flush,
+ vm_prot_t prot,
+ ipc_port_t reply_to,
+ mach_msg_type_name_t reply_to_type)
{
vm_page_t m;
vm_offset_t original_offset = offset;
@@ -883,13 +876,12 @@ MACRO_END
}
kern_return_t
-memory_object_set_attributes_common(object, object_ready, may_cache,
- copy_strategy, use_old_pageout)
- vm_object_t object;
- boolean_t object_ready;
- boolean_t may_cache;
- memory_object_copy_strategy_t copy_strategy;
- boolean_t use_old_pageout;
+memory_object_set_attributes_common(
+ vm_object_t object,
+ boolean_t object_ready,
+ boolean_t may_cache,
+ memory_object_copy_strategy_t copy_strategy,
+ boolean_t use_old_pageout)
{
if (object == VM_OBJECT_NULL)
return(KERN_INVALID_ARGUMENT);
@@ -950,13 +942,12 @@ memory_object_set_attributes_common(object, object_ready, may_cache,
* XXX stub that made change_attributes an RPC. Need investigation.
*/
-kern_return_t memory_object_change_attributes(object, may_cache,
- copy_strategy, reply_to, reply_to_type)
- vm_object_t object;
- boolean_t may_cache;
- memory_object_copy_strategy_t copy_strategy;
- ipc_port_t reply_to;
- mach_msg_type_name_t reply_to_type;
+kern_return_t memory_object_change_attributes(
+ vm_object_t object,
+ boolean_t may_cache,
+ memory_object_copy_strategy_t copy_strategy,
+ ipc_port_t reply_to,
+ mach_msg_type_name_t reply_to_type)
{
kern_return_t result;
@@ -986,33 +977,32 @@ kern_return_t memory_object_change_attributes(object, may_cache,
}
kern_return_t
-memory_object_set_attributes(object, object_ready, may_cache, copy_strategy)
- vm_object_t object;
- boolean_t object_ready;
- boolean_t may_cache;
- memory_object_copy_strategy_t copy_strategy;
+memory_object_set_attributes(
+ vm_object_t object,
+ boolean_t object_ready,
+ boolean_t may_cache,
+ memory_object_copy_strategy_t copy_strategy)
{
return memory_object_set_attributes_common(object, object_ready,
may_cache, copy_strategy,
TRUE);
}
-kern_return_t memory_object_ready(object, may_cache, copy_strategy)
- vm_object_t object;
- boolean_t may_cache;
- memory_object_copy_strategy_t copy_strategy;
+kern_return_t memory_object_ready(
+ vm_object_t object,
+ boolean_t may_cache,
+ memory_object_copy_strategy_t copy_strategy)
{
return memory_object_set_attributes_common(object, TRUE,
may_cache, copy_strategy,
FALSE);
}
-kern_return_t memory_object_get_attributes(object, object_ready,
- may_cache, copy_strategy)
- vm_object_t object;
- boolean_t *object_ready;
- boolean_t *may_cache;
- memory_object_copy_strategy_t *copy_strategy;
+kern_return_t memory_object_get_attributes(
+ vm_object_t object,
+ boolean_t *object_ready,
+ boolean_t *may_cache,
+ memory_object_copy_strategy_t *copy_strategy)
{
if (object == VM_OBJECT_NULL)
return(KERN_INVALID_ARGUMENT);
diff --git a/vm/vm_debug.c b/vm/vm_debug.c
index e4a4b8b..227090e 100644
--- a/vm/vm_debug.c
+++ b/vm/vm_debug.c
@@ -65,8 +65,7 @@
*/
ipc_port_t
-vm_object_real_name(object)
- vm_object_t object;
+vm_object_real_name(vm_object_t object)
{
ipc_port_t port = IP_NULL;
@@ -94,11 +93,11 @@ vm_object_real_name(object)
*/
kern_return_t
-mach_vm_region_info(map, address, regionp, portp)
- vm_map_t map;
- vm_offset_t address;
- vm_region_info_t *regionp;
- ipc_port_t *portp;
+mach_vm_region_info(
+ vm_map_t map,
+ vm_offset_t address,
+ vm_region_info_t *regionp,
+ ipc_port_t *portp)
{
vm_map_t cmap; /* current map in traversal */
vm_map_t nmap; /* next map to look at */
@@ -184,11 +183,11 @@ mach_vm_region_info(map, address, regionp, portp)
*/
kern_return_t
-mach_vm_object_info(object, infop, shadowp, copyp)
- vm_object_t object;
- vm_object_info_t *infop;
- ipc_port_t *shadowp;
- ipc_port_t *copyp;
+mach_vm_object_info(
+ vm_object_t object,
+ vm_object_info_t *infop,
+ ipc_port_t *shadowp,
+ ipc_port_t *copyp)
{
vm_object_info_t info;
vm_object_info_state_t state;
@@ -278,10 +277,10 @@ mach_vm_object_info(object, infop, shadowp, copyp)
*/
kern_return_t
-mach_vm_object_pages(object, pagesp, countp)
- vm_object_t object;
- vm_page_info_array_t *pagesp;
- natural_t *countp;
+mach_vm_object_pages(
+ vm_object_t object,
+ vm_page_info_array_t *pagesp,
+ natural_t *countp)
{
vm_size_t size;
vm_offset_t addr;
diff --git a/vm/vm_external.c b/vm/vm_external.c
index 77bd44b..2e2593b 100644
--- a/vm/vm_external.c
+++ b/vm/vm_external.c
@@ -56,8 +56,7 @@ struct kmem_cache vm_object_small_existence_map_cache;
struct kmem_cache vm_object_large_existence_map_cache;
-vm_external_t vm_external_create(size)
- vm_offset_t size;
+vm_external_t vm_external_create(vm_offset_t size)
{
vm_external_t result;
vm_size_t bytes;
@@ -78,8 +77,7 @@ vm_external_t vm_external_create(size)
return(result);
}
-void vm_external_destroy(e)
- vm_external_t e;
+void vm_external_destroy(vm_external_t e)
{
if (e == VM_EXTERNAL_NULL)
return;
@@ -115,10 +113,10 @@ vm_external_state_t _vm_external_state_get(e, offset)
VM_EXTERNAL_STATE_EXISTS : VM_EXTERNAL_STATE_ABSENT );
}
-void vm_external_state_set(e, offset, state)
- vm_external_t e;
- vm_offset_t offset;
- vm_external_state_t state;
+void vm_external_state_set(
+ vm_external_t e,
+ vm_offset_t offset,
+ vm_external_state_t state)
{
unsigned
int bit, byte;
diff --git a/vm/vm_fault.c b/vm/vm_fault.c
index d9c3d7b..686156c 100644
--- a/vm/vm_fault.c
+++ b/vm/vm_fault.c
@@ -123,9 +123,9 @@ void vm_fault_init(void)
* "object" must be locked.
*/
void
-vm_fault_cleanup(object, top_page)
- vm_object_t object;
- vm_page_t top_page;
+vm_fault_cleanup(
+ vm_object_t object,
+ vm_page_t top_page)
{
vm_object_paging_end(object);
vm_object_unlock(object);
@@ -202,27 +202,23 @@ vm_fault_cleanup(object, top_page)
* The "result_page" is also left busy. It is not removed
* from the pageout queues.
*/
-vm_fault_return_t vm_fault_page(first_object, first_offset,
- fault_type, must_be_resident, interruptible,
- protection,
- result_page, top_page,
- resume, continuation)
+vm_fault_return_t vm_fault_page(
/* Arguments: */
- vm_object_t first_object; /* Object to begin search */
- vm_offset_t first_offset; /* Offset into object */
- vm_prot_t fault_type; /* What access is requested */
- boolean_t must_be_resident;/* Must page be resident? */
- boolean_t interruptible; /* May fault be interrupted? */
+ vm_object_t first_object, /* Object to begin search */
+ vm_offset_t first_offset, /* Offset into object */
+ vm_prot_t fault_type, /* What access is requested */
+ boolean_t must_be_resident,/* Must page be resident? */
+ boolean_t interruptible, /* May fault be interrupted? */
/* Modifies in place: */
- vm_prot_t *protection; /* Protection for mapping */
+ vm_prot_t *protection, /* Protection for mapping */
/* Returns: */
- vm_page_t *result_page; /* Page found, if successful */
- vm_page_t *top_page; /* Page in top object, if
+ vm_page_t *result_page, /* Page found, if successful */
+ vm_page_t *top_page, /* Page in top object, if
* not result_page.
*/
/* More arguments: */
- boolean_t resume; /* We are restarting. */
- void (*continuation)(); /* Continuation for blocking. */
+ boolean_t resume, /* We are restarting. */
+ void (*continuation)()) /* Continuation for blocking. */
{
vm_page_t m;
vm_object_t object;
@@ -1149,14 +1145,13 @@ vm_fault_continue(void)
/*NOTREACHED*/
}
-kern_return_t vm_fault(map, vaddr, fault_type, change_wiring,
- resume, continuation)
- vm_map_t map;
- vm_offset_t vaddr;
- vm_prot_t fault_type;
- boolean_t change_wiring;
- boolean_t resume;
- void (*continuation)();
+kern_return_t vm_fault(
+ vm_map_t map,
+ vm_offset_t vaddr,
+ vm_prot_t fault_type,
+ boolean_t change_wiring,
+ boolean_t resume,
+ void (*continuation)())
{
vm_map_version_t version; /* Map version for verificiation */
boolean_t wired; /* Should mapping be wired down? */
@@ -1500,9 +1495,9 @@ kern_return_t vm_fault(map, vaddr, fault_type, change_wiring,
*
* Wire down a range of virtual addresses in a map.
*/
-void vm_fault_wire(map, entry)
- vm_map_t map;
- vm_map_entry_t entry;
+void vm_fault_wire(
+ vm_map_t map,
+ vm_map_entry_t entry)
{
vm_offset_t va;
@@ -1536,9 +1531,9 @@ void vm_fault_wire(map, entry)
*
* Unwire a range of virtual addresses in a map.
*/
-void vm_fault_unwire(map, entry)
- vm_map_t map;
- vm_map_entry_t entry;
+void vm_fault_unwire(
+ vm_map_t map,
+ vm_map_entry_t entry)
{
vm_offset_t va;
pmap_t pmap;
@@ -1625,10 +1620,10 @@ void vm_fault_unwire(map, entry)
* other than the common case will return KERN_FAILURE, and the caller
* is expected to call vm_fault().
*/
-kern_return_t vm_fault_wire_fast(map, va, entry)
- vm_map_t map;
- vm_offset_t va;
- vm_map_entry_t entry;
+kern_return_t vm_fault_wire_fast(
+ vm_map_t map,
+ vm_offset_t va,
+ vm_map_entry_t entry)
{
vm_object_t object;
vm_offset_t offset;
@@ -1774,9 +1769,9 @@ kern_return_t vm_fault_wire_fast(map, va, entry)
* Release a page used by vm_fault_copy.
*/
-void vm_fault_copy_cleanup(page, top_page)
- vm_page_t page;
- vm_page_t top_page;
+void vm_fault_copy_cleanup(
+ vm_page_t page,
+ vm_page_t top_page)
{
vm_object_t object = page->object;
@@ -1817,23 +1812,14 @@ void vm_fault_copy_cleanup(page, top_page)
* requested.
*/
kern_return_t vm_fault_copy(
- src_object,
- src_offset,
- src_size,
- dst_object,
- dst_offset,
- dst_map,
- dst_version,
- interruptible
- )
- vm_object_t src_object;
- vm_offset_t src_offset;
- vm_size_t *src_size; /* INOUT */
- vm_object_t dst_object;
- vm_offset_t dst_offset;
- vm_map_t dst_map;
- vm_map_version_t *dst_version;
- boolean_t interruptible;
+ vm_object_t src_object,
+ vm_offset_t src_offset,
+ vm_size_t *src_size, /* INOUT */
+ vm_object_t dst_object,
+ vm_offset_t dst_offset,
+ vm_map_t dst_map,
+ vm_map_version_t *dst_version,
+ boolean_t interruptible)
{
vm_page_t result_page;
vm_prot_t prot;
@@ -2014,10 +2000,10 @@ kern_return_t vm_fault_copy(
* XXX Untested. Also unused. Eventually, this technology
* could be used in vm_fault_copy() to advantage.
*/
-vm_fault_return_t vm_fault_page_overwrite(dst_object, dst_offset, result_page)
- vm_object_t dst_object;
- vm_offset_t dst_offset;
- vm_page_t *result_page; /* OUT */
+vm_fault_return_t vm_fault_page_overwrite(
+ vm_object_t dst_object,
+ vm_offset_t dst_offset,
+ vm_page_t *result_page) /* OUT */
{
vm_page_t dst_page;
diff --git a/vm/vm_kern.c b/vm/vm_kern.c
index 3eaf0a8..b997cb5 100644
--- a/vm/vm_kern.c
+++ b/vm/vm_kern.c
@@ -80,15 +80,14 @@ vm_map_t kernel_pageable_map;
*/
kern_return_t
-projected_buffer_allocate(map, size, persistence, kernel_p,
- user_p, protection, inheritance)
- vm_map_t map;
- vm_size_t size;
- int persistence;
- vm_offset_t *kernel_p;
- vm_offset_t *user_p;
- vm_prot_t protection;
- vm_inherit_t inheritance; /*Currently only VM_INHERIT_NONE supported*/
+projected_buffer_allocate(
+ vm_map_t map,
+ vm_size_t size,
+ int persistence,
+ vm_offset_t *kernel_p,
+ vm_offset_t *user_p,
+ vm_prot_t protection,
+ vm_inherit_t inheritance) /*Currently only VM_INHERIT_NONE supported*/
{
vm_object_t object;
vm_map_entry_t u_entry, k_entry;
@@ -178,13 +177,13 @@ projected_buffer_allocate(map, size, persistence, kernel_p,
*/
kern_return_t
-projected_buffer_map(map, kernel_addr, size, user_p, protection, inheritance)
- vm_map_t map;
- vm_offset_t kernel_addr;
- vm_size_t size;
- vm_offset_t *user_p;
- vm_prot_t protection;
- vm_inherit_t inheritance; /*Currently only VM_INHERIT_NONE supported*/
+projected_buffer_map(
+ vm_map_t map,
+ vm_offset_t kernel_addr,
+ vm_size_t size,
+ vm_offset_t *user_p,
+ vm_prot_t protection,
+ vm_inherit_t inheritance) /*Currently only VM_INHERIT_NONE supported*/
{
vm_map_entry_t u_entry, k_entry;
vm_offset_t physical_addr, user_addr;
@@ -251,9 +250,10 @@ projected_buffer_map(map, kernel_addr, size, user_p, protection, inheritance)
*/
kern_return_t
-projected_buffer_deallocate(map, start, end)
- vm_map_t map;
- vm_offset_t start, end;
+projected_buffer_deallocate(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end)
{
vm_map_entry_t entry, k_entry;
@@ -303,8 +303,7 @@ projected_buffer_deallocate(map, start, end)
*/
kern_return_t
-projected_buffer_collect(map)
- vm_map_t map;
+projected_buffer_collect(vm_map_t map)
{
vm_map_entry_t entry, next;
@@ -330,9 +329,10 @@ projected_buffer_collect(map)
*/
boolean_t
-projected_buffer_in_range(map, start, end)
- vm_map_t map;
- vm_offset_t start, end;
+projected_buffer_in_range(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end)
{
vm_map_entry_t entry;
@@ -359,10 +359,10 @@ projected_buffer_in_range(map, start, end)
*/
kern_return_t
-kmem_alloc(map, addrp, size)
- vm_map_t map;
- vm_offset_t *addrp;
- vm_size_t size;
+kmem_alloc(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size)
{
vm_object_t object;
vm_map_entry_t entry;
@@ -440,12 +440,12 @@ retry:
* If successful, the pages in the old region are mapped twice.
* The old region is unchanged. Use kmem_free to get rid of it.
*/
-kern_return_t kmem_realloc(map, oldaddr, oldsize, newaddrp, newsize)
- vm_map_t map;
- vm_offset_t oldaddr;
- vm_size_t oldsize;
- vm_offset_t *newaddrp;
- vm_size_t newsize;
+kern_return_t kmem_realloc(
+ vm_map_t map,
+ vm_offset_t oldaddr,
+ vm_size_t oldsize,
+ vm_offset_t *newaddrp,
+ vm_size_t newsize)
{
vm_offset_t oldmin, oldmax;
vm_offset_t newaddr;
@@ -541,10 +541,10 @@ retry:
*/
kern_return_t
-kmem_alloc_wired(map, addrp, size)
- vm_map_t map;
- vm_offset_t *addrp;
- vm_size_t size;
+kmem_alloc_wired(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size)
{
vm_map_entry_t entry;
vm_offset_t offset;
@@ -624,10 +624,10 @@ retry:
*/
kern_return_t
-kmem_alloc_aligned(map, addrp, size)
- vm_map_t map;
- vm_offset_t *addrp;
- vm_size_t size;
+kmem_alloc_aligned(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size)
{
vm_map_entry_t entry;
vm_offset_t offset;
@@ -709,10 +709,10 @@ retry:
*/
kern_return_t
-kmem_alloc_pageable(map, addrp, size)
- vm_map_t map;
- vm_offset_t *addrp;
- vm_size_t size;
+kmem_alloc_pageable(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size)
{
vm_offset_t addr;
kern_return_t kr;
@@ -740,10 +740,10 @@ kmem_alloc_pageable(map, addrp, size)
*/
void
-kmem_free(map, addr, size)
- vm_map_t map;
- vm_offset_t addr;
- vm_size_t size;
+kmem_free(
+ vm_map_t map,
+ vm_offset_t addr,
+ vm_size_t size)
{
kern_return_t kr;
@@ -758,11 +758,12 @@ kmem_free(map, addr, size)
* a submap.
*/
void
-kmem_alloc_pages(object, offset, start, end, protection)
- vm_object_t object;
- vm_offset_t offset;
- vm_offset_t start, end;
- vm_prot_t protection;
+kmem_alloc_pages(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t protection)
{
/*
* Mark the pmap region as not pageable.
@@ -813,11 +814,12 @@ kmem_alloc_pages(object, offset, start, end, protection)
* a submap.
*/
void
-kmem_remap_pages(object, offset, start, end, protection)
- vm_object_t object;
- vm_offset_t offset;
- vm_offset_t start, end;
- vm_prot_t protection;
+kmem_remap_pages(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t protection)
{
/*
* Mark the pmap region as not pageable.
@@ -871,11 +873,13 @@ kmem_remap_pages(object, offset, start, end, protection)
*/
void
-kmem_submap(map, parent, min, max, size, pageable)
- vm_map_t map, parent;
- vm_offset_t *min, *max;
- vm_size_t size;
- boolean_t pageable;
+kmem_submap(
+ vm_map_t map,
+ vm_map_t parent,
+ vm_offset_t *min,
+ vm_offset_t *max,
+ vm_size_t size,
+ boolean_t pageable)
{
vm_offset_t addr;
kern_return_t kr;
@@ -913,9 +917,9 @@ kmem_submap(map, parent, min, max, size, pageable)
* Initialize the kernel's virtual memory map, taking
* into account all memory allocated up to this time.
*/
-void kmem_init(start, end)
- vm_offset_t start;
- vm_offset_t end;
+void kmem_init(
+ vm_offset_t start,
+ vm_offset_t end)
{
vm_map_setup(kernel_map, pmap_kernel(), VM_MIN_KERNEL_ADDRESS, end,
FALSE);
@@ -951,13 +955,13 @@ void kmem_init(start, end)
*/
kern_return_t
-kmem_io_map_copyout(map, addr, alloc_addr, alloc_size, copy, min_size)
- vm_map_t map;
- vm_offset_t *addr; /* actual addr of data */
- vm_offset_t *alloc_addr; /* page aligned addr */
- vm_size_t *alloc_size; /* size allocated */
- vm_map_copy_t copy;
- vm_size_t min_size; /* Do at least this much */
+kmem_io_map_copyout(
+ vm_map_t map,
+ vm_offset_t *addr, /* actual addr of data */
+ vm_offset_t *alloc_addr, /* page aligned addr */
+ vm_size_t *alloc_size, /* size allocated */
+ vm_map_copy_t copy,
+ vm_size_t min_size) /* Do at least this much */
{
vm_offset_t myaddr, offset;
vm_size_t mysize, copy_size;
@@ -1055,10 +1059,10 @@ kmem_io_map_copyout(map, addr, alloc_addr, alloc_size, copy, min_size)
*/
void
-kmem_io_map_deallocate(map, addr, size)
- vm_map_t map;
- vm_offset_t addr;
- vm_size_t size;
+kmem_io_map_deallocate(
+ vm_map_t map,
+ vm_offset_t addr,
+ vm_size_t size)
{
/*
* Remove the mappings. The pmap_remove is needed.
@@ -1077,10 +1081,11 @@ kmem_io_map_deallocate(map, addr, size)
* and the kernel map/submaps.
*/
-int copyinmap(map, fromaddr, toaddr, length)
- vm_map_t map;
- char *fromaddr, *toaddr;
- int length;
+int copyinmap(
+ vm_map_t map,
+ char *fromaddr,
+ char *toaddr,
+ int length)
{
if (vm_map_pmap(map) == kernel_pmap) {
/* assume a correct copy */
@@ -1103,10 +1108,11 @@ int copyinmap(map, fromaddr, toaddr, length)
* and the kernel map/submaps.
*/
-int copyoutmap(map, fromaddr, toaddr, length)
- vm_map_t map;
- char *fromaddr, *toaddr;
- int length;
+int copyoutmap(
+ vm_map_t map,
+ char *fromaddr,
+ char *toaddr,
+ int length)
{
if (vm_map_pmap(map) == kernel_pmap) {
/* assume a correct copy */
diff --git a/vm/vm_map.c b/vm/vm_map.c
index e6eabdb..6b13724 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -194,11 +194,12 @@ void vm_map_init(void)
*/
}
-void vm_map_setup(map, pmap, min, max, pageable)
- vm_map_t map;
- pmap_t pmap;
- vm_offset_t min, max;
- boolean_t pageable;
+void vm_map_setup(
+ vm_map_t map,
+ pmap_t pmap,
+ vm_offset_t min,
+ vm_offset_t max,
+ boolean_t pageable)
{
vm_map_first_entry(map) = vm_map_to_entry(map);
vm_map_last_entry(map) = vm_map_to_entry(map);
@@ -227,10 +228,11 @@ void vm_map_setup(map, pmap, min, max, pageable)
* the given physical map structure, and having
* the given lower and upper address bounds.
*/
-vm_map_t vm_map_create(pmap, min, max, pageable)
- pmap_t pmap;
- vm_offset_t min, max;
- boolean_t pageable;
+vm_map_t vm_map_create(
+ pmap_t pmap,
+ vm_offset_t min,
+ vm_offset_t max,
+ boolean_t pageable)
{
vm_map_t result;
@@ -370,8 +372,7 @@ static inline int vm_map_entry_cmp_insert(const struct rbtree_node *a,
* Creates another valid reference to the given map.
*
*/
-void vm_map_reference(map)
- vm_map_t map;
+void vm_map_reference(vm_map_t map)
{
if (map == VM_MAP_NULL)
return;
@@ -388,8 +389,7 @@ void vm_map_reference(map)
* destroying it if no references remain.
* The map should not be locked.
*/
-void vm_map_deallocate(map)
- vm_map_t map;
+void vm_map_deallocate(vm_map_t map)
{
int c;
@@ -433,10 +433,10 @@ void vm_map_deallocate(map)
* result indicates whether the address is
* actually contained in the map.
*/
-boolean_t vm_map_lookup_entry(map, address, entry)
- vm_map_t map;
- vm_offset_t address;
- vm_map_entry_t *entry; /* OUT */
+boolean_t vm_map_lookup_entry(
+ vm_map_t map,
+ vm_offset_t address,
+ vm_map_entry_t *entry) /* OUT */
{
struct rbtree_node *node;
vm_map_entry_t hint;
@@ -490,10 +490,11 @@ boolean_t vm_map_lookup_entry(map, address, entry)
*/
boolean_t
-invalid_user_access(map, start, end, prot)
- vm_map_t map;
- vm_offset_t start, end;
- vm_prot_t prot;
+invalid_user_access(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t prot)
{
vm_map_entry_t entry;
@@ -517,13 +518,13 @@ invalid_user_access(map, start, end, prot)
* are initialized to zero. If an object is supplied,
* then an existing entry may be extended.
*/
-kern_return_t vm_map_find_entry(map, address, size, mask, object, o_entry)
- vm_map_t map;
- vm_offset_t *address; /* OUT */
- vm_size_t size;
- vm_offset_t mask;
- vm_object_t object;
- vm_map_entry_t *o_entry; /* OUT */
+kern_return_t vm_map_find_entry(
+ vm_map_t map,
+ vm_offset_t *address, /* OUT */
+ vm_size_t size,
+ vm_offset_t mask,
+ vm_object_t object,
+ vm_map_entry_t *o_entry) /* OUT */
{
vm_map_entry_t entry, new_entry;
vm_offset_t start;
@@ -689,13 +690,13 @@ boolean_t vm_map_pmap_enter_enable = FALSE;
* The source map should not be locked on entry.
*/
void
-vm_map_pmap_enter(map, addr, end_addr, object, offset, protection)
- vm_map_t map;
- vm_offset_t addr;
- vm_offset_t end_addr;
- vm_object_t object;
- vm_offset_t offset;
- vm_prot_t protection;
+vm_map_pmap_enter(
+ vm_map_t map,
+ vm_offset_t addr,
+ vm_offset_t end_addr,
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_prot_t protection)
{
while (addr < end_addr) {
vm_page_t m;
@@ -747,21 +748,17 @@ vm_map_pmap_enter(map, addr, end_addr, object, offset, protection)
* Arguments are as defined in the vm_map call.
*/
kern_return_t vm_map_enter(
- map,
- address, size, mask, anywhere,
- object, offset, needs_copy,
- cur_protection, max_protection, inheritance)
- vm_map_t map;
- vm_offset_t *address; /* IN/OUT */
- vm_size_t size;
- vm_offset_t mask;
- boolean_t anywhere;
- vm_object_t object;
- vm_offset_t offset;
- boolean_t needs_copy;
- vm_prot_t cur_protection;
- vm_prot_t max_protection;
- vm_inherit_t inheritance;
+ vm_map_t map,
+ vm_offset_t *address, /* IN/OUT */
+ vm_size_t size,
+ vm_offset_t mask,
+ boolean_t anywhere,
+ vm_object_t object,
+ vm_offset_t offset,
+ boolean_t needs_copy,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance)
{
vm_map_entry_t entry;
vm_offset_t start;
@@ -1047,10 +1044,10 @@ kern_return_t vm_map_enter(
* This routine is called only when it is known that
* the entry must be split.
*/
-void _vm_map_clip_start(map_header, entry, start)
- struct vm_map_header *map_header;
- vm_map_entry_t entry;
- vm_offset_t start;
+void _vm_map_clip_start(
+ struct vm_map_header *map_header,
+ vm_map_entry_t entry,
+ vm_offset_t start)
{
vm_map_entry_t new_entry;
@@ -1100,10 +1097,10 @@ void _vm_map_clip_start(map_header, entry, start)
* This routine is called only when it is known that
* the entry must be split.
*/
-void _vm_map_clip_end(map_header, entry, end)
- struct vm_map_header *map_header;
- vm_map_entry_t entry;
- vm_offset_t end;
+void _vm_map_clip_end(
+ struct vm_map_header *map_header,
+ vm_map_entry_t entry,
+ vm_offset_t end)
{
vm_map_entry_t new_entry;
@@ -1160,11 +1157,11 @@ void _vm_map_clip_end(map_header, entry, end)
* range from the superior map, and then destroy the
* submap (if desired). [Better yet, don't try it.]
*/
-kern_return_t vm_map_submap(map, start, end, submap)
- vm_map_t map;
- vm_offset_t start;
- vm_offset_t end;
- vm_map_t submap;
+kern_return_t vm_map_submap(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_map_t submap)
{
vm_map_entry_t entry;
kern_return_t result = KERN_INVALID_ARGUMENT;
@@ -1208,12 +1205,12 @@ kern_return_t vm_map_submap(map, start, end, submap)
* specified, the maximum protection is to be set;
* otherwise, only the current protection is affected.
*/
-kern_return_t vm_map_protect(map, start, end, new_prot, set_max)
- vm_map_t map;
- vm_offset_t start;
- vm_offset_t end;
- vm_prot_t new_prot;
- boolean_t set_max;
+kern_return_t vm_map_protect(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t new_prot,
+ boolean_t set_max)
{
vm_map_entry_t current;
vm_map_entry_t entry;
@@ -1296,11 +1293,11 @@ kern_return_t vm_map_protect(map, start, end, new_prot, set_max)
* affects how the map will be shared with
* child maps at the time of vm_map_fork.
*/
-kern_return_t vm_map_inherit(map, start, end, new_inheritance)
- vm_map_t map;
- vm_offset_t start;
- vm_offset_t end;
- vm_inherit_t new_inheritance;
+kern_return_t vm_map_inherit(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_inherit_t new_inheritance)
{
vm_map_entry_t entry;
vm_map_entry_t temp_entry;
@@ -1345,12 +1342,12 @@ kern_return_t vm_map_inherit(map, start, end, new_inheritance)
* Callers should use macros in vm/vm_map.h (i.e. vm_map_pageable,
* or vm_map_pageable_user); don't call vm_map_pageable directly.
*/
-kern_return_t vm_map_pageable_common(map, start, end, access_type, user_wire)
- vm_map_t map;
- vm_offset_t start;
- vm_offset_t end;
- vm_prot_t access_type;
- boolean_t user_wire;
+kern_return_t vm_map_pageable_common(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t access_type,
+ boolean_t user_wire)
{
vm_map_entry_t entry;
vm_map_entry_t start_entry;
@@ -1594,9 +1591,9 @@ kern_return_t vm_map_pageable_common(map, start, end, access_type, user_wire)
*
* Deallocate the given entry from the target map.
*/
-void vm_map_entry_delete(map, entry)
- vm_map_t map;
- vm_map_entry_t entry;
+void vm_map_entry_delete(
+ vm_map_t map,
+ vm_map_entry_t entry)
{
vm_offset_t s, e;
vm_object_t object;
@@ -1678,10 +1675,10 @@ void vm_map_entry_delete(map, entry)
* map.
*/
-kern_return_t vm_map_delete(map, start, end)
- vm_map_t map;
- vm_offset_t start;
- vm_offset_t end;
+kern_return_t vm_map_delete(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end)
{
vm_map_entry_t entry;
vm_map_entry_t first_entry;
@@ -1761,10 +1758,10 @@ kern_return_t vm_map_delete(map, start, end)
* Remove the given address range from the target map.
* This is the exported form of vm_map_delete.
*/
-kern_return_t vm_map_remove(map, start, end)
- vm_map_t map;
- vm_offset_t start;
- vm_offset_t end;
+kern_return_t vm_map_remove(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end)
{
kern_return_t result;
@@ -1784,8 +1781,7 @@ kern_return_t vm_map_remove(map, start, end)
* that have not already been stolen.
*/
void
-vm_map_copy_steal_pages(copy)
-vm_map_copy_t copy;
+vm_map_copy_steal_pages(vm_map_copy_t copy)
{
vm_page_t m, new_m;
int i;
@@ -1831,8 +1827,7 @@ vm_map_copy_t copy;
* stolen, they are freed. If the pages are not stolen, they
* are unbusied, and associated state is cleaned up.
*/
-void vm_map_copy_page_discard(copy)
-vm_map_copy_t copy;
+void vm_map_copy_page_discard(vm_map_copy_t copy)
{
while (copy->cpy_npages > 0) {
vm_page_t m;
@@ -1877,8 +1872,7 @@ vm_map_copy_t copy;
* vm_map_copyin).
*/
void
-vm_map_copy_discard(copy)
- vm_map_copy_t copy;
+vm_map_copy_discard(vm_map_copy_t copy)
{
free_next_copy:
if (copy == VM_MAP_COPY_NULL)
@@ -1954,8 +1948,7 @@ free_next_copy:
* deallocation will not fail.
*/
vm_map_copy_t
-vm_map_copy_copy(copy)
- vm_map_copy_t copy;
+vm_map_copy_copy(vm_map_copy_t copy)
{
vm_map_copy_t new_copy;
@@ -2001,9 +1994,9 @@ vm_map_copy_copy(copy)
* A version of vm_map_copy_discard that can be called
* as a continuation from a vm_map_copy page list.
*/
-kern_return_t vm_map_copy_discard_cont(cont_args, copy_result)
-vm_map_copyin_args_t cont_args;
-vm_map_copy_t *copy_result; /* OUT */
+kern_return_t vm_map_copy_discard_cont(
+vm_map_copyin_args_t cont_args,
+vm_map_copy_t *copy_result) /* OUT */
{
vm_map_copy_discard((vm_map_copy_t) cont_args);
if (copy_result != (vm_map_copy_t *)0)
@@ -2058,11 +2051,11 @@ vm_map_copy_t *copy_result; /* OUT */
* atomically and interruptibly, an error indication is
* returned.
*/
-kern_return_t vm_map_copy_overwrite(dst_map, dst_addr, copy, interruptible)
- vm_map_t dst_map;
- vm_offset_t dst_addr;
- vm_map_copy_t copy;
- boolean_t interruptible;
+kern_return_t vm_map_copy_overwrite(
+ vm_map_t dst_map,
+ vm_offset_t dst_addr,
+ vm_map_copy_t copy,
+ boolean_t interruptible)
{
vm_size_t size;
vm_offset_t start;
@@ -2435,10 +2428,10 @@ start_pass_1:
* If successful, consumes the copy object.
* Otherwise, the caller is responsible for it.
*/
-kern_return_t vm_map_copyout(dst_map, dst_addr, copy)
- vm_map_t dst_map;
- vm_offset_t *dst_addr; /* OUT */
- vm_map_copy_t copy;
+kern_return_t vm_map_copyout(
+ vm_map_t dst_map,
+ vm_offset_t *dst_addr, /* OUT */
+ vm_map_copy_t copy)
{
vm_size_t size;
vm_size_t adjustment;
@@ -2689,10 +2682,10 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy)
* Version of vm_map_copyout() for page list vm map copies.
*
*/
-kern_return_t vm_map_copyout_page_list(dst_map, dst_addr, copy)
- vm_map_t dst_map;
- vm_offset_t *dst_addr; /* OUT */
- vm_map_copy_t copy;
+kern_return_t vm_map_copyout_page_list(
+ vm_map_t dst_map,
+ vm_offset_t *dst_addr, /* OUT */
+ vm_map_copy_t copy)
{
vm_size_t size;
vm_offset_t start;
@@ -3076,12 +3069,12 @@ error:
* In/out conditions:
* The source map should not be locked on entry.
*/
-kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result)
- vm_map_t src_map;
- vm_offset_t src_addr;
- vm_size_t len;
- boolean_t src_destroy;
- vm_map_copy_t *copy_result; /* OUT */
+kern_return_t vm_map_copyin(
+ vm_map_t src_map,
+ vm_offset_t src_addr,
+ vm_size_t len,
+ boolean_t src_destroy,
+ vm_map_copy_t *copy_result) /* OUT */
{
vm_map_entry_t tmp_entry; /* Result of last map lookup --
* in multi-level lookup, this
@@ -3438,11 +3431,11 @@ kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result)
* Our caller donates an object reference.
*/
-kern_return_t vm_map_copyin_object(object, offset, size, copy_result)
- vm_object_t object;
- vm_offset_t offset; /* offset of region in object */
- vm_size_t size; /* size of region in object */
- vm_map_copy_t *copy_result; /* OUT */
+kern_return_t vm_map_copyin_object(
+ vm_object_t object,
+ vm_offset_t offset, /* offset of region in object */
+ vm_size_t size, /* size of region in object */
+ vm_map_copy_t *copy_result) /* OUT */
{
vm_map_copy_t copy; /* Resulting copy */
@@ -3483,9 +3476,9 @@ kern_return_t vm_map_copyin_object(object, offset, size, copy_result)
* the scheduler.
*/
-kern_return_t vm_map_copyin_page_list_cont(cont_args, copy_result)
-vm_map_copyin_args_t cont_args;
-vm_map_copy_t *copy_result; /* OUT */
+kern_return_t vm_map_copyin_page_list_cont(
+ vm_map_copyin_args_t cont_args,
+ vm_map_copy_t *copy_result) /* OUT */
{
kern_return_t result = 0; /* '=0' to quiet gcc warnings */
boolean_t do_abort, src_destroy, src_destroy_only;
@@ -3539,15 +3532,14 @@ vm_map_copy_t *copy_result; /* OUT */
* the recipient of this copy_result must be prepared to deal with it.
*/
-kern_return_t vm_map_copyin_page_list(src_map, src_addr, len, src_destroy,
- steal_pages, copy_result, is_cont)
- vm_map_t src_map;
- vm_offset_t src_addr;
- vm_size_t len;
- boolean_t src_destroy;
- boolean_t steal_pages;
- vm_map_copy_t *copy_result; /* OUT */
- boolean_t is_cont;
+kern_return_t vm_map_copyin_page_list(
+ vm_map_t src_map,
+ vm_offset_t src_addr,
+ vm_size_t len,
+ boolean_t src_destroy,
+ boolean_t steal_pages,
+ vm_map_copy_t *copy_result, /* OUT */
+ boolean_t is_cont)
{
vm_map_entry_t src_entry;
vm_page_t m;
@@ -4067,8 +4059,7 @@ error:
*
* The source map must not be locked.
*/
-vm_map_t vm_map_fork(old_map)
- vm_map_t old_map;
+vm_map_t vm_map_fork(vm_map_t old_map)
{
vm_map_t new_map;
vm_map_entry_t old_entry;
@@ -4338,17 +4329,16 @@ vm_map_t vm_map_fork(old_map)
* copying operations, although the data referenced will
* remain the same.
*/
-kern_return_t vm_map_lookup(var_map, vaddr, fault_type, out_version,
- object, offset, out_prot, wired)
- vm_map_t *var_map; /* IN/OUT */
- vm_offset_t vaddr;
- vm_prot_t fault_type;
-
- vm_map_version_t *out_version; /* OUT */
- vm_object_t *object; /* OUT */
- vm_offset_t *offset; /* OUT */
- vm_prot_t *out_prot; /* OUT */
- boolean_t *wired; /* OUT */
+kern_return_t vm_map_lookup(
+ vm_map_t *var_map, /* IN/OUT */
+ vm_offset_t vaddr,
+ vm_prot_t fault_type,
+
+ vm_map_version_t *out_version, /* OUT */
+ vm_object_t *object, /* OUT */
+ vm_offset_t *offset, /* OUT */
+ vm_prot_t *out_prot, /* OUT */
+ boolean_t *wired) /* OUT */
{
vm_map_entry_t entry;
vm_map_t map = *var_map;
@@ -4520,9 +4510,9 @@ kern_return_t vm_map_lookup(var_map, vaddr, fault_type, out_version,
* since the given version. If successful, the map
* will not change until vm_map_verify_done() is called.
*/
-boolean_t vm_map_verify(map, version)
- vm_map_t map;
- vm_map_version_t *version; /* REF */
+boolean_t vm_map_verify(
+ vm_map_t map,
+ vm_map_version_t *version) /* REF */
{
boolean_t result;
@@ -4551,19 +4541,16 @@ boolean_t vm_map_verify(map, version)
* a task's address map.
*/
-kern_return_t vm_region(map, address, size,
- protection, max_protection,
- inheritance, is_shared,
- object_name, offset_in_object)
- vm_map_t map;
- vm_offset_t *address; /* IN/OUT */
- vm_size_t *size; /* OUT */
- vm_prot_t *protection; /* OUT */
- vm_prot_t *max_protection; /* OUT */
- vm_inherit_t *inheritance; /* OUT */
- boolean_t *is_shared; /* OUT */
- ipc_port_t *object_name; /* OUT */
- vm_offset_t *offset_in_object; /* OUT */
+kern_return_t vm_region(
+ vm_map_t map,
+ vm_offset_t *address, /* IN/OUT */
+ vm_size_t *size, /* OUT */
+ vm_prot_t *protection, /* OUT */
+ vm_prot_t *max_protection, /* OUT */
+ vm_inherit_t *inheritance, /* OUT */
+ boolean_t *is_shared, /* OUT */
+ ipc_port_t *object_name, /* OUT */
+ vm_offset_t *offset_in_object) /* OUT */
{
vm_map_entry_t tmp_entry;
vm_map_entry_t entry;
@@ -4623,9 +4610,9 @@ kern_return_t vm_region(map, address, size,
* at allocation time because the adjacent entry
* is often wired down.
*/
-void vm_map_simplify(map, start)
- vm_map_t map;
- vm_offset_t start;
+void vm_map_simplify(
+ vm_map_t map,
+ vm_offset_t start)
{
vm_map_entry_t this_entry;
vm_map_entry_t prev_entry;
@@ -4684,12 +4671,12 @@ void vm_map_simplify(map, start)
* it itself. [This assumes that attributes do not
* need to be inherited, which seems ok to me]
*/
-kern_return_t vm_map_machine_attribute(map, address, size, attribute, value)
- vm_map_t map;
- vm_offset_t address;
- vm_size_t size;
- vm_machine_attribute_t attribute;
- vm_machine_attribute_val_t* value; /* IN/OUT */
+kern_return_t vm_map_machine_attribute(
+ vm_map_t map,
+ vm_offset_t address,
+ vm_size_t size,
+ vm_machine_attribute_t attribute,
+ vm_machine_attribute_val_t* value) /* IN/OUT */
{
kern_return_t ret;
@@ -4714,8 +4701,7 @@ kern_return_t vm_map_machine_attribute(map, address, size, attribute, value)
/*
* vm_map_print: [ debug ]
*/
-void vm_map_print(map)
- vm_map_t map;
+void vm_map_print(vm_map_t map)
{
vm_map_entry_t entry;
diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c
index f4f1fef..7ac43d6 100644
--- a/vm/vm_pageout.c
+++ b/vm/vm_pageout.c
@@ -226,12 +226,12 @@ unsigned int vm_pageout_inactive_cleaned_external = 0;
* not busy on exit.
*/
vm_page_t
-vm_pageout_setup(m, paging_offset, new_object, new_offset, flush)
- vm_page_t m;
- vm_offset_t paging_offset;
- vm_object_t new_object;
- vm_offset_t new_offset;
- boolean_t flush;
+vm_pageout_setup(
+ vm_page_t m,
+ vm_offset_t paging_offset,
+ vm_object_t new_object,
+ vm_offset_t new_offset,
+ boolean_t flush)
{
vm_object_t old_object = m->object;
vm_page_t holding_page = 0; /*'=0'to quiet gcc warnings*/
@@ -413,10 +413,10 @@ vm_pageout_setup(m, paging_offset, new_object, new_offset, flush)
* copy to a new page in a new object, if not.
*/
void
-vm_pageout_page(m, initial, flush)
- vm_page_t m;
- boolean_t initial;
- boolean_t flush;
+vm_pageout_page(
+ vm_page_t m,
+ boolean_t initial,
+ boolean_t flush)
{
vm_map_copy_t copy;
vm_object_t old_object;
diff --git a/vm/vm_user.c b/vm/vm_user.c
index 9ba5e1c..f7c87cc 100644
--- a/vm/vm_user.c
+++ b/vm/vm_user.c
@@ -56,11 +56,11 @@ vm_statistics_data_t vm_stat;
* vm_allocate allocates "zero fill" memory in the specfied
* map.
*/
-kern_return_t vm_allocate(map, addr, size, anywhere)
- vm_map_t map;
- vm_offset_t *addr;
- vm_size_t size;
- boolean_t anywhere;
+kern_return_t vm_allocate(
+ vm_map_t map,
+ vm_offset_t *addr,
+ vm_size_t size,
+ boolean_t anywhere)
{
kern_return_t result;
@@ -97,10 +97,10 @@ kern_return_t vm_allocate(map, addr, size, anywhere)
* vm_deallocate deallocates the specified range of addresses in the
* specified address map.
*/
-kern_return_t vm_deallocate(map, start, size)
- vm_map_t map;
- vm_offset_t start;
- vm_size_t size;
+kern_return_t vm_deallocate(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_size_t size)
{
if (map == VM_MAP_NULL)
return(KERN_INVALID_ARGUMENT);
@@ -115,11 +115,11 @@ kern_return_t vm_deallocate(map, start, size)
* vm_inherit sets the inheritance of the specified range in the
* specified map.
*/
-kern_return_t vm_inherit(map, start, size, new_inheritance)
- vm_map_t map;
- vm_offset_t start;
- vm_size_t size;
- vm_inherit_t new_inheritance;
+kern_return_t vm_inherit(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_size_t size,
+ vm_inherit_t new_inheritance)
{
if (map == VM_MAP_NULL)
return(KERN_INVALID_ARGUMENT);
@@ -149,12 +149,12 @@ kern_return_t vm_inherit(map, start, size, new_inheritance)
* specified map.
*/
-kern_return_t vm_protect(map, start, size, set_maximum, new_protection)
- vm_map_t map;
- vm_offset_t start;
- vm_size_t size;
- boolean_t set_maximum;
- vm_prot_t new_protection;
+kern_return_t vm_protect(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_size_t size,
+ boolean_t set_maximum,
+ vm_prot_t new_protection)
{
if ((map == VM_MAP_NULL) ||
(new_protection & ~(VM_PROT_ALL|VM_PROT_NOTIFY)))
@@ -172,9 +172,9 @@ kern_return_t vm_protect(map, start, size, set_maximum, new_protection)
set_maximum));
}
-kern_return_t vm_statistics(map, stat)
- vm_map_t map;
- vm_statistics_data_t *stat;
+kern_return_t vm_statistics(
+ vm_map_t map,
+ vm_statistics_data_t *stat)
{
if (map == VM_MAP_NULL)
return(KERN_INVALID_ARGUMENT);
@@ -217,12 +217,12 @@ kern_return_t vm_cache_statistics(
* Handle machine-specific attributes for a mapping, such
* as cachability, migrability, etc.
*/
-kern_return_t vm_machine_attribute(map, address, size, attribute, value)
- vm_map_t map;
- vm_address_t address;
- vm_size_t size;
- vm_machine_attribute_t attribute;
- vm_machine_attribute_val_t* value; /* IN/OUT */
+kern_return_t vm_machine_attribute(
+ vm_map_t map,
+ vm_address_t address,
+ vm_size_t size,
+ vm_machine_attribute_t attribute,
+ vm_machine_attribute_val_t* value) /* IN/OUT */
{
if (map == VM_MAP_NULL)
return(KERN_INVALID_ARGUMENT);
@@ -235,12 +235,12 @@ kern_return_t vm_machine_attribute(map, address, size, attribute, value)
return vm_map_machine_attribute(map, address, size, attribute, value);
}
-kern_return_t vm_read(map, address, size, data, data_size)
- vm_map_t map;
- vm_address_t address;
- vm_size_t size;
- pointer_t *data;
- vm_size_t *data_size;
+kern_return_t vm_read(
+ vm_map_t map,
+ vm_address_t address,
+ vm_size_t size,
+ pointer_t *data,
+ vm_size_t *data_size)
{
kern_return_t error;
vm_map_copy_t ipc_address;
@@ -259,11 +259,11 @@ kern_return_t vm_read(map, address, size, data, data_size)
return(error);
}
-kern_return_t vm_write(map, address, data, size)
- vm_map_t map;
- vm_address_t address;
- pointer_t data;
- vm_size_t size;
+kern_return_t vm_write(
+ vm_map_t map,
+ vm_address_t address,
+ pointer_t data,
+ vm_size_t size)
{
if (map == VM_MAP_NULL)
return KERN_INVALID_ARGUMENT;
@@ -272,11 +272,11 @@ kern_return_t vm_write(map, address, data, size)
FALSE /* interruptible XXX */);
}
-kern_return_t vm_copy(map, source_address, size, dest_address)
- vm_map_t map;
- vm_address_t source_address;
- vm_size_t size;
- vm_address_t dest_address;
+kern_return_t vm_copy(
+ vm_map_t map,
+ vm_address_t source_address,
+ vm_size_t size,
+ vm_address_t dest_address)
{
vm_map_copy_t copy;
kern_return_t kr;
@@ -304,22 +304,17 @@ kern_return_t vm_copy(map, source_address, size, dest_address)
* Routine: vm_map
*/
kern_return_t vm_map(
- target_map,
- address, size, mask, anywhere,
- memory_object, offset,
- copy,
- cur_protection, max_protection, inheritance)
- vm_map_t target_map;
- vm_offset_t *address;
- vm_size_t size;
- vm_offset_t mask;
- boolean_t anywhere;
- ipc_port_t memory_object;
- vm_offset_t offset;
- boolean_t copy;
- vm_prot_t cur_protection;
- vm_prot_t max_protection;
- vm_inherit_t inheritance;
+ vm_map_t target_map,
+ vm_offset_t *address,
+ vm_size_t size,
+ vm_offset_t mask,
+ boolean_t anywhere,
+ ipc_port_t memory_object,
+ vm_offset_t offset,
+ boolean_t copy,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance)
{
vm_object_t object;
kern_return_t result;