summaryrefslogtreecommitdiff
path: root/vm/vm_kern.c
diff options
context:
space:
mode:
Diffstat (limited to 'vm/vm_kern.c')
-rw-r--r--vm/vm_kern.c180
1 files changed, 93 insertions, 87 deletions
diff --git a/vm/vm_kern.c b/vm/vm_kern.c
index 3eaf0a8..b997cb5 100644
--- a/vm/vm_kern.c
+++ b/vm/vm_kern.c
@@ -80,15 +80,14 @@ vm_map_t kernel_pageable_map;
*/
kern_return_t
-projected_buffer_allocate(map, size, persistence, kernel_p,
- user_p, protection, inheritance)
- vm_map_t map;
- vm_size_t size;
- int persistence;
- vm_offset_t *kernel_p;
- vm_offset_t *user_p;
- vm_prot_t protection;
- vm_inherit_t inheritance; /*Currently only VM_INHERIT_NONE supported*/
+projected_buffer_allocate(
+ vm_map_t map,
+ vm_size_t size,
+ int persistence,
+ vm_offset_t *kernel_p,
+ vm_offset_t *user_p,
+ vm_prot_t protection,
+ vm_inherit_t inheritance) /*Currently only VM_INHERIT_NONE supported*/
{
vm_object_t object;
vm_map_entry_t u_entry, k_entry;
@@ -178,13 +177,13 @@ projected_buffer_allocate(map, size, persistence, kernel_p,
*/
kern_return_t
-projected_buffer_map(map, kernel_addr, size, user_p, protection, inheritance)
- vm_map_t map;
- vm_offset_t kernel_addr;
- vm_size_t size;
- vm_offset_t *user_p;
- vm_prot_t protection;
- vm_inherit_t inheritance; /*Currently only VM_INHERIT_NONE supported*/
+projected_buffer_map(
+ vm_map_t map,
+ vm_offset_t kernel_addr,
+ vm_size_t size,
+ vm_offset_t *user_p,
+ vm_prot_t protection,
+ vm_inherit_t inheritance) /*Currently only VM_INHERIT_NONE supported*/
{
vm_map_entry_t u_entry, k_entry;
vm_offset_t physical_addr, user_addr;
@@ -251,9 +250,10 @@ projected_buffer_map(map, kernel_addr, size, user_p, protection, inheritance)
*/
kern_return_t
-projected_buffer_deallocate(map, start, end)
- vm_map_t map;
- vm_offset_t start, end;
+projected_buffer_deallocate(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end)
{
vm_map_entry_t entry, k_entry;
@@ -303,8 +303,7 @@ projected_buffer_deallocate(map, start, end)
*/
kern_return_t
-projected_buffer_collect(map)
- vm_map_t map;
+projected_buffer_collect(vm_map_t map)
{
vm_map_entry_t entry, next;
@@ -330,9 +329,10 @@ projected_buffer_collect(map)
*/
boolean_t
-projected_buffer_in_range(map, start, end)
- vm_map_t map;
- vm_offset_t start, end;
+projected_buffer_in_range(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end)
{
vm_map_entry_t entry;
@@ -359,10 +359,10 @@ projected_buffer_in_range(map, start, end)
*/
kern_return_t
-kmem_alloc(map, addrp, size)
- vm_map_t map;
- vm_offset_t *addrp;
- vm_size_t size;
+kmem_alloc(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size)
{
vm_object_t object;
vm_map_entry_t entry;
@@ -440,12 +440,12 @@ retry:
* If successful, the pages in the old region are mapped twice.
* The old region is unchanged. Use kmem_free to get rid of it.
*/
-kern_return_t kmem_realloc(map, oldaddr, oldsize, newaddrp, newsize)
- vm_map_t map;
- vm_offset_t oldaddr;
- vm_size_t oldsize;
- vm_offset_t *newaddrp;
- vm_size_t newsize;
+kern_return_t kmem_realloc(
+ vm_map_t map,
+ vm_offset_t oldaddr,
+ vm_size_t oldsize,
+ vm_offset_t *newaddrp,
+ vm_size_t newsize)
{
vm_offset_t oldmin, oldmax;
vm_offset_t newaddr;
@@ -541,10 +541,10 @@ retry:
*/
kern_return_t
-kmem_alloc_wired(map, addrp, size)
- vm_map_t map;
- vm_offset_t *addrp;
- vm_size_t size;
+kmem_alloc_wired(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size)
{
vm_map_entry_t entry;
vm_offset_t offset;
@@ -624,10 +624,10 @@ retry:
*/
kern_return_t
-kmem_alloc_aligned(map, addrp, size)
- vm_map_t map;
- vm_offset_t *addrp;
- vm_size_t size;
+kmem_alloc_aligned(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size)
{
vm_map_entry_t entry;
vm_offset_t offset;
@@ -709,10 +709,10 @@ retry:
*/
kern_return_t
-kmem_alloc_pageable(map, addrp, size)
- vm_map_t map;
- vm_offset_t *addrp;
- vm_size_t size;
+kmem_alloc_pageable(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size)
{
vm_offset_t addr;
kern_return_t kr;
@@ -740,10 +740,10 @@ kmem_alloc_pageable(map, addrp, size)
*/
void
-kmem_free(map, addr, size)
- vm_map_t map;
- vm_offset_t addr;
- vm_size_t size;
+kmem_free(
+ vm_map_t map,
+ vm_offset_t addr,
+ vm_size_t size)
{
kern_return_t kr;
@@ -758,11 +758,12 @@ kmem_free(map, addr, size)
* a submap.
*/
void
-kmem_alloc_pages(object, offset, start, end, protection)
- vm_object_t object;
- vm_offset_t offset;
- vm_offset_t start, end;
- vm_prot_t protection;
+kmem_alloc_pages(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t protection)
{
/*
* Mark the pmap region as not pageable.
@@ -813,11 +814,12 @@ kmem_alloc_pages(object, offset, start, end, protection)
* a submap.
*/
void
-kmem_remap_pages(object, offset, start, end, protection)
- vm_object_t object;
- vm_offset_t offset;
- vm_offset_t start, end;
- vm_prot_t protection;
+kmem_remap_pages(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t protection)
{
/*
* Mark the pmap region as not pageable.
@@ -871,11 +873,13 @@ kmem_remap_pages(object, offset, start, end, protection)
*/
void
-kmem_submap(map, parent, min, max, size, pageable)
- vm_map_t map, parent;
- vm_offset_t *min, *max;
- vm_size_t size;
- boolean_t pageable;
+kmem_submap(
+ vm_map_t map,
+ vm_map_t parent,
+ vm_offset_t *min,
+ vm_offset_t *max,
+ vm_size_t size,
+ boolean_t pageable)
{
vm_offset_t addr;
kern_return_t kr;
@@ -913,9 +917,9 @@ kmem_submap(map, parent, min, max, size, pageable)
* Initialize the kernel's virtual memory map, taking
* into account all memory allocated up to this time.
*/
-void kmem_init(start, end)
- vm_offset_t start;
- vm_offset_t end;
+void kmem_init(
+ vm_offset_t start,
+ vm_offset_t end)
{
vm_map_setup(kernel_map, pmap_kernel(), VM_MIN_KERNEL_ADDRESS, end,
FALSE);
@@ -951,13 +955,13 @@ void kmem_init(start, end)
*/
kern_return_t
-kmem_io_map_copyout(map, addr, alloc_addr, alloc_size, copy, min_size)
- vm_map_t map;
- vm_offset_t *addr; /* actual addr of data */
- vm_offset_t *alloc_addr; /* page aligned addr */
- vm_size_t *alloc_size; /* size allocated */
- vm_map_copy_t copy;
- vm_size_t min_size; /* Do at least this much */
+kmem_io_map_copyout(
+ vm_map_t map,
+ vm_offset_t *addr, /* actual addr of data */
+ vm_offset_t *alloc_addr, /* page aligned addr */
+ vm_size_t *alloc_size, /* size allocated */
+ vm_map_copy_t copy,
+ vm_size_t min_size) /* Do at least this much */
{
vm_offset_t myaddr, offset;
vm_size_t mysize, copy_size;
@@ -1055,10 +1059,10 @@ kmem_io_map_copyout(map, addr, alloc_addr, alloc_size, copy, min_size)
*/
void
-kmem_io_map_deallocate(map, addr, size)
- vm_map_t map;
- vm_offset_t addr;
- vm_size_t size;
+kmem_io_map_deallocate(
+ vm_map_t map,
+ vm_offset_t addr,
+ vm_size_t size)
{
/*
* Remove the mappings. The pmap_remove is needed.
@@ -1077,10 +1081,11 @@ kmem_io_map_deallocate(map, addr, size)
* and the kernel map/submaps.
*/
-int copyinmap(map, fromaddr, toaddr, length)
- vm_map_t map;
- char *fromaddr, *toaddr;
- int length;
+int copyinmap(
+ vm_map_t map,
+ char *fromaddr,
+ char *toaddr,
+ int length)
{
if (vm_map_pmap(map) == kernel_pmap) {
/* assume a correct copy */
@@ -1103,10 +1108,11 @@ int copyinmap(map, fromaddr, toaddr, length)
* and the kernel map/submaps.
*/
-int copyoutmap(map, fromaddr, toaddr, length)
- vm_map_t map;
- char *fromaddr, *toaddr;
- int length;
+int copyoutmap(
+ vm_map_t map,
+ char *fromaddr,
+ char *toaddr,
+ int length)
{
if (vm_map_pmap(map) == kernel_pmap) {
/* assume a correct copy */