From fe0231ba14d8597b0d78bf6121dd15a82bbab34a Mon Sep 17 00:00:00 2001 From: Marin Ramesa Date: Fri, 4 Apr 2014 22:32:14 +0200 Subject: Convert from K&R to ANSI Convert from K&R style function definitions to ANSI style function definitions. * ddb/db_access.c: Convert function prototypes from K&R to ANSI. * ddb/db_aout.c: Likewise. * ddb/db_break.c: Likewise. * ddb/db_command.c: Likewise. * ddb/db_cond.c: Likewise. * ddb/db_examine.c: Likewise. * ddb/db_expr.c: Likewise. * ddb/db_ext_symtab.c: Likewise. * ddb/db_input.c: Likewise. * ddb/db_lex.c: Likewise. * ddb/db_macro.c: Likewise. * ddb/db_mp.c: Likewise. * ddb/db_output.c: Likewise. * ddb/db_print.c: Likewise. * ddb/db_run.c: Likewise. * ddb/db_sym.c: Likewise. * ddb/db_task_thread.c: Likewise. * ddb/db_trap.c: Likewise. * ddb/db_variables.c: Likewise. * ddb/db_watch.c: Likewise. * device/blkio.c: Likewise. * device/chario.c: Likewise. * device/dev_lookup.c: Likewise. * device/dev_name.c: Likewise. * device/dev_pager.c: Likewise. * device/ds_routines.c: Likewise. * device/net_io.c: Likewise. * device/subrs.c: Likewise. * i386/i386/db_interface.c: Likewise. * i386/i386/fpu.c: Likewise. * i386/i386/io_map.c: Likewise. * i386/i386/loose_ends.c: Likewise. * i386/i386/mp_desc.c: Likewise. * i386/i386/pcb.c: Likewise. * i386/i386/phys.c: Likewise. * i386/i386/trap.c: Likewise. * i386/i386/user_ldt.c: Likewise. * i386/i386at/com.c: Likewise. * i386/i386at/kd.c: Likewise. * i386/i386at/kd_event.c: Likewise. * i386/i386at/kd_mouse.c: Likewise. * i386/i386at/kd_queue.c: Likewise. * i386/i386at/lpr.c: Likewise. * i386/i386at/model_dep.c: Likewise. * i386/i386at/rtc.c: Likewise. * i386/intel/pmap.c: Likewise. * i386/intel/read_fault.c: Likewise. * ipc/ipc_entry.c: Likewise. * ipc/ipc_hash.c: Likewise. * ipc/ipc_kmsg.c: Likewise. * ipc/ipc_marequest.c: Likewise. * ipc/ipc_mqueue.c: Likewise. * ipc/ipc_notify.c: Likewise. * ipc/ipc_port.c: Likewise. * ipc/ipc_right.c: Likewise. * ipc/mach_debug.c: Likewise. * ipc/mach_msg.c: Likewise. * ipc/mach_port.c: Likewise. * ipc/mach_rpc.c: Likewise. * kern/act.c: Likewise. * kern/exception.c: Likewise. * kern/ipc_mig.c: Likewise. * kern/ipc_tt.c: Likewise. * kern/lock_mon.c: Likewise. * kern/mach_clock.c: Likewise. * kern/machine.c: Likewise. * kern/printf.c: Likewise. * kern/priority.c: Likewise. * kern/startup.c: Likewise. * kern/syscall_emulation.c: Likewise. * kern/syscall_subr.c: Likewise. * kern/thread_swap.c: Likewise. * kern/time_stamp.c: Likewise. * kern/timer.c: Likewise. * kern/xpr.c: Likewise. * vm/memory_object.c: Likewise. * vm/vm_debug.c: Likewise. * vm/vm_external.c: Likewise. * vm/vm_fault.c: Likewise. * vm/vm_kern.c: Likewise. * vm/vm_map.c: Likewise. * vm/vm_pageout.c: Likewise. * vm/vm_user.c: Likewise. --- vm/vm_map.c | 342 +++++++++++++++++++++++++++++------------------------------- 1 file changed, 164 insertions(+), 178 deletions(-) (limited to 'vm/vm_map.c') diff --git a/vm/vm_map.c b/vm/vm_map.c index e6eabdb..6b13724 100644 --- a/vm/vm_map.c +++ b/vm/vm_map.c @@ -194,11 +194,12 @@ void vm_map_init(void) */ } -void vm_map_setup(map, pmap, min, max, pageable) - vm_map_t map; - pmap_t pmap; - vm_offset_t min, max; - boolean_t pageable; +void vm_map_setup( + vm_map_t map, + pmap_t pmap, + vm_offset_t min, + vm_offset_t max, + boolean_t pageable) { vm_map_first_entry(map) = vm_map_to_entry(map); vm_map_last_entry(map) = vm_map_to_entry(map); @@ -227,10 +228,11 @@ void vm_map_setup(map, pmap, min, max, pageable) * the given physical map structure, and having * the given lower and upper address bounds. */ -vm_map_t vm_map_create(pmap, min, max, pageable) - pmap_t pmap; - vm_offset_t min, max; - boolean_t pageable; +vm_map_t vm_map_create( + pmap_t pmap, + vm_offset_t min, + vm_offset_t max, + boolean_t pageable) { vm_map_t result; @@ -370,8 +372,7 @@ static inline int vm_map_entry_cmp_insert(const struct rbtree_node *a, * Creates another valid reference to the given map. * */ -void vm_map_reference(map) - vm_map_t map; +void vm_map_reference(vm_map_t map) { if (map == VM_MAP_NULL) return; @@ -388,8 +389,7 @@ void vm_map_reference(map) * destroying it if no references remain. * The map should not be locked. */ -void vm_map_deallocate(map) - vm_map_t map; +void vm_map_deallocate(vm_map_t map) { int c; @@ -433,10 +433,10 @@ void vm_map_deallocate(map) * result indicates whether the address is * actually contained in the map. */ -boolean_t vm_map_lookup_entry(map, address, entry) - vm_map_t map; - vm_offset_t address; - vm_map_entry_t *entry; /* OUT */ +boolean_t vm_map_lookup_entry( + vm_map_t map, + vm_offset_t address, + vm_map_entry_t *entry) /* OUT */ { struct rbtree_node *node; vm_map_entry_t hint; @@ -490,10 +490,11 @@ boolean_t vm_map_lookup_entry(map, address, entry) */ boolean_t -invalid_user_access(map, start, end, prot) - vm_map_t map; - vm_offset_t start, end; - vm_prot_t prot; +invalid_user_access( + vm_map_t map, + vm_offset_t start, + vm_offset_t end, + vm_prot_t prot) { vm_map_entry_t entry; @@ -517,13 +518,13 @@ invalid_user_access(map, start, end, prot) * are initialized to zero. If an object is supplied, * then an existing entry may be extended. */ -kern_return_t vm_map_find_entry(map, address, size, mask, object, o_entry) - vm_map_t map; - vm_offset_t *address; /* OUT */ - vm_size_t size; - vm_offset_t mask; - vm_object_t object; - vm_map_entry_t *o_entry; /* OUT */ +kern_return_t vm_map_find_entry( + vm_map_t map, + vm_offset_t *address, /* OUT */ + vm_size_t size, + vm_offset_t mask, + vm_object_t object, + vm_map_entry_t *o_entry) /* OUT */ { vm_map_entry_t entry, new_entry; vm_offset_t start; @@ -689,13 +690,13 @@ boolean_t vm_map_pmap_enter_enable = FALSE; * The source map should not be locked on entry. */ void -vm_map_pmap_enter(map, addr, end_addr, object, offset, protection) - vm_map_t map; - vm_offset_t addr; - vm_offset_t end_addr; - vm_object_t object; - vm_offset_t offset; - vm_prot_t protection; +vm_map_pmap_enter( + vm_map_t map, + vm_offset_t addr, + vm_offset_t end_addr, + vm_object_t object, + vm_offset_t offset, + vm_prot_t protection) { while (addr < end_addr) { vm_page_t m; @@ -747,21 +748,17 @@ vm_map_pmap_enter(map, addr, end_addr, object, offset, protection) * Arguments are as defined in the vm_map call. */ kern_return_t vm_map_enter( - map, - address, size, mask, anywhere, - object, offset, needs_copy, - cur_protection, max_protection, inheritance) - vm_map_t map; - vm_offset_t *address; /* IN/OUT */ - vm_size_t size; - vm_offset_t mask; - boolean_t anywhere; - vm_object_t object; - vm_offset_t offset; - boolean_t needs_copy; - vm_prot_t cur_protection; - vm_prot_t max_protection; - vm_inherit_t inheritance; + vm_map_t map, + vm_offset_t *address, /* IN/OUT */ + vm_size_t size, + vm_offset_t mask, + boolean_t anywhere, + vm_object_t object, + vm_offset_t offset, + boolean_t needs_copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance) { vm_map_entry_t entry; vm_offset_t start; @@ -1047,10 +1044,10 @@ kern_return_t vm_map_enter( * This routine is called only when it is known that * the entry must be split. */ -void _vm_map_clip_start(map_header, entry, start) - struct vm_map_header *map_header; - vm_map_entry_t entry; - vm_offset_t start; +void _vm_map_clip_start( + struct vm_map_header *map_header, + vm_map_entry_t entry, + vm_offset_t start) { vm_map_entry_t new_entry; @@ -1100,10 +1097,10 @@ void _vm_map_clip_start(map_header, entry, start) * This routine is called only when it is known that * the entry must be split. */ -void _vm_map_clip_end(map_header, entry, end) - struct vm_map_header *map_header; - vm_map_entry_t entry; - vm_offset_t end; +void _vm_map_clip_end( + struct vm_map_header *map_header, + vm_map_entry_t entry, + vm_offset_t end) { vm_map_entry_t new_entry; @@ -1160,11 +1157,11 @@ void _vm_map_clip_end(map_header, entry, end) * range from the superior map, and then destroy the * submap (if desired). [Better yet, don't try it.] */ -kern_return_t vm_map_submap(map, start, end, submap) - vm_map_t map; - vm_offset_t start; - vm_offset_t end; - vm_map_t submap; +kern_return_t vm_map_submap( + vm_map_t map, + vm_offset_t start, + vm_offset_t end, + vm_map_t submap) { vm_map_entry_t entry; kern_return_t result = KERN_INVALID_ARGUMENT; @@ -1208,12 +1205,12 @@ kern_return_t vm_map_submap(map, start, end, submap) * specified, the maximum protection is to be set; * otherwise, only the current protection is affected. */ -kern_return_t vm_map_protect(map, start, end, new_prot, set_max) - vm_map_t map; - vm_offset_t start; - vm_offset_t end; - vm_prot_t new_prot; - boolean_t set_max; +kern_return_t vm_map_protect( + vm_map_t map, + vm_offset_t start, + vm_offset_t end, + vm_prot_t new_prot, + boolean_t set_max) { vm_map_entry_t current; vm_map_entry_t entry; @@ -1296,11 +1293,11 @@ kern_return_t vm_map_protect(map, start, end, new_prot, set_max) * affects how the map will be shared with * child maps at the time of vm_map_fork. */ -kern_return_t vm_map_inherit(map, start, end, new_inheritance) - vm_map_t map; - vm_offset_t start; - vm_offset_t end; - vm_inherit_t new_inheritance; +kern_return_t vm_map_inherit( + vm_map_t map, + vm_offset_t start, + vm_offset_t end, + vm_inherit_t new_inheritance) { vm_map_entry_t entry; vm_map_entry_t temp_entry; @@ -1345,12 +1342,12 @@ kern_return_t vm_map_inherit(map, start, end, new_inheritance) * Callers should use macros in vm/vm_map.h (i.e. vm_map_pageable, * or vm_map_pageable_user); don't call vm_map_pageable directly. */ -kern_return_t vm_map_pageable_common(map, start, end, access_type, user_wire) - vm_map_t map; - vm_offset_t start; - vm_offset_t end; - vm_prot_t access_type; - boolean_t user_wire; +kern_return_t vm_map_pageable_common( + vm_map_t map, + vm_offset_t start, + vm_offset_t end, + vm_prot_t access_type, + boolean_t user_wire) { vm_map_entry_t entry; vm_map_entry_t start_entry; @@ -1594,9 +1591,9 @@ kern_return_t vm_map_pageable_common(map, start, end, access_type, user_wire) * * Deallocate the given entry from the target map. */ -void vm_map_entry_delete(map, entry) - vm_map_t map; - vm_map_entry_t entry; +void vm_map_entry_delete( + vm_map_t map, + vm_map_entry_t entry) { vm_offset_t s, e; vm_object_t object; @@ -1678,10 +1675,10 @@ void vm_map_entry_delete(map, entry) * map. */ -kern_return_t vm_map_delete(map, start, end) - vm_map_t map; - vm_offset_t start; - vm_offset_t end; +kern_return_t vm_map_delete( + vm_map_t map, + vm_offset_t start, + vm_offset_t end) { vm_map_entry_t entry; vm_map_entry_t first_entry; @@ -1761,10 +1758,10 @@ kern_return_t vm_map_delete(map, start, end) * Remove the given address range from the target map. * This is the exported form of vm_map_delete. */ -kern_return_t vm_map_remove(map, start, end) - vm_map_t map; - vm_offset_t start; - vm_offset_t end; +kern_return_t vm_map_remove( + vm_map_t map, + vm_offset_t start, + vm_offset_t end) { kern_return_t result; @@ -1784,8 +1781,7 @@ kern_return_t vm_map_remove(map, start, end) * that have not already been stolen. */ void -vm_map_copy_steal_pages(copy) -vm_map_copy_t copy; +vm_map_copy_steal_pages(vm_map_copy_t copy) { vm_page_t m, new_m; int i; @@ -1831,8 +1827,7 @@ vm_map_copy_t copy; * stolen, they are freed. If the pages are not stolen, they * are unbusied, and associated state is cleaned up. */ -void vm_map_copy_page_discard(copy) -vm_map_copy_t copy; +void vm_map_copy_page_discard(vm_map_copy_t copy) { while (copy->cpy_npages > 0) { vm_page_t m; @@ -1877,8 +1872,7 @@ vm_map_copy_t copy; * vm_map_copyin). */ void -vm_map_copy_discard(copy) - vm_map_copy_t copy; +vm_map_copy_discard(vm_map_copy_t copy) { free_next_copy: if (copy == VM_MAP_COPY_NULL) @@ -1954,8 +1948,7 @@ free_next_copy: * deallocation will not fail. */ vm_map_copy_t -vm_map_copy_copy(copy) - vm_map_copy_t copy; +vm_map_copy_copy(vm_map_copy_t copy) { vm_map_copy_t new_copy; @@ -2001,9 +1994,9 @@ vm_map_copy_copy(copy) * A version of vm_map_copy_discard that can be called * as a continuation from a vm_map_copy page list. */ -kern_return_t vm_map_copy_discard_cont(cont_args, copy_result) -vm_map_copyin_args_t cont_args; -vm_map_copy_t *copy_result; /* OUT */ +kern_return_t vm_map_copy_discard_cont( +vm_map_copyin_args_t cont_args, +vm_map_copy_t *copy_result) /* OUT */ { vm_map_copy_discard((vm_map_copy_t) cont_args); if (copy_result != (vm_map_copy_t *)0) @@ -2058,11 +2051,11 @@ vm_map_copy_t *copy_result; /* OUT */ * atomically and interruptibly, an error indication is * returned. */ -kern_return_t vm_map_copy_overwrite(dst_map, dst_addr, copy, interruptible) - vm_map_t dst_map; - vm_offset_t dst_addr; - vm_map_copy_t copy; - boolean_t interruptible; +kern_return_t vm_map_copy_overwrite( + vm_map_t dst_map, + vm_offset_t dst_addr, + vm_map_copy_t copy, + boolean_t interruptible) { vm_size_t size; vm_offset_t start; @@ -2435,10 +2428,10 @@ start_pass_1: * If successful, consumes the copy object. * Otherwise, the caller is responsible for it. */ -kern_return_t vm_map_copyout(dst_map, dst_addr, copy) - vm_map_t dst_map; - vm_offset_t *dst_addr; /* OUT */ - vm_map_copy_t copy; +kern_return_t vm_map_copyout( + vm_map_t dst_map, + vm_offset_t *dst_addr, /* OUT */ + vm_map_copy_t copy) { vm_size_t size; vm_size_t adjustment; @@ -2689,10 +2682,10 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy) * Version of vm_map_copyout() for page list vm map copies. * */ -kern_return_t vm_map_copyout_page_list(dst_map, dst_addr, copy) - vm_map_t dst_map; - vm_offset_t *dst_addr; /* OUT */ - vm_map_copy_t copy; +kern_return_t vm_map_copyout_page_list( + vm_map_t dst_map, + vm_offset_t *dst_addr, /* OUT */ + vm_map_copy_t copy) { vm_size_t size; vm_offset_t start; @@ -3076,12 +3069,12 @@ error: * In/out conditions: * The source map should not be locked on entry. */ -kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) - vm_map_t src_map; - vm_offset_t src_addr; - vm_size_t len; - boolean_t src_destroy; - vm_map_copy_t *copy_result; /* OUT */ +kern_return_t vm_map_copyin( + vm_map_t src_map, + vm_offset_t src_addr, + vm_size_t len, + boolean_t src_destroy, + vm_map_copy_t *copy_result) /* OUT */ { vm_map_entry_t tmp_entry; /* Result of last map lookup -- * in multi-level lookup, this @@ -3438,11 +3431,11 @@ kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) * Our caller donates an object reference. */ -kern_return_t vm_map_copyin_object(object, offset, size, copy_result) - vm_object_t object; - vm_offset_t offset; /* offset of region in object */ - vm_size_t size; /* size of region in object */ - vm_map_copy_t *copy_result; /* OUT */ +kern_return_t vm_map_copyin_object( + vm_object_t object, + vm_offset_t offset, /* offset of region in object */ + vm_size_t size, /* size of region in object */ + vm_map_copy_t *copy_result) /* OUT */ { vm_map_copy_t copy; /* Resulting copy */ @@ -3483,9 +3476,9 @@ kern_return_t vm_map_copyin_object(object, offset, size, copy_result) * the scheduler. */ -kern_return_t vm_map_copyin_page_list_cont(cont_args, copy_result) -vm_map_copyin_args_t cont_args; -vm_map_copy_t *copy_result; /* OUT */ +kern_return_t vm_map_copyin_page_list_cont( + vm_map_copyin_args_t cont_args, + vm_map_copy_t *copy_result) /* OUT */ { kern_return_t result = 0; /* '=0' to quiet gcc warnings */ boolean_t do_abort, src_destroy, src_destroy_only; @@ -3539,15 +3532,14 @@ vm_map_copy_t *copy_result; /* OUT */ * the recipient of this copy_result must be prepared to deal with it. */ -kern_return_t vm_map_copyin_page_list(src_map, src_addr, len, src_destroy, - steal_pages, copy_result, is_cont) - vm_map_t src_map; - vm_offset_t src_addr; - vm_size_t len; - boolean_t src_destroy; - boolean_t steal_pages; - vm_map_copy_t *copy_result; /* OUT */ - boolean_t is_cont; +kern_return_t vm_map_copyin_page_list( + vm_map_t src_map, + vm_offset_t src_addr, + vm_size_t len, + boolean_t src_destroy, + boolean_t steal_pages, + vm_map_copy_t *copy_result, /* OUT */ + boolean_t is_cont) { vm_map_entry_t src_entry; vm_page_t m; @@ -4067,8 +4059,7 @@ error: * * The source map must not be locked. */ -vm_map_t vm_map_fork(old_map) - vm_map_t old_map; +vm_map_t vm_map_fork(vm_map_t old_map) { vm_map_t new_map; vm_map_entry_t old_entry; @@ -4338,17 +4329,16 @@ vm_map_t vm_map_fork(old_map) * copying operations, although the data referenced will * remain the same. */ -kern_return_t vm_map_lookup(var_map, vaddr, fault_type, out_version, - object, offset, out_prot, wired) - vm_map_t *var_map; /* IN/OUT */ - vm_offset_t vaddr; - vm_prot_t fault_type; - - vm_map_version_t *out_version; /* OUT */ - vm_object_t *object; /* OUT */ - vm_offset_t *offset; /* OUT */ - vm_prot_t *out_prot; /* OUT */ - boolean_t *wired; /* OUT */ +kern_return_t vm_map_lookup( + vm_map_t *var_map, /* IN/OUT */ + vm_offset_t vaddr, + vm_prot_t fault_type, + + vm_map_version_t *out_version, /* OUT */ + vm_object_t *object, /* OUT */ + vm_offset_t *offset, /* OUT */ + vm_prot_t *out_prot, /* OUT */ + boolean_t *wired) /* OUT */ { vm_map_entry_t entry; vm_map_t map = *var_map; @@ -4520,9 +4510,9 @@ kern_return_t vm_map_lookup(var_map, vaddr, fault_type, out_version, * since the given version. If successful, the map * will not change until vm_map_verify_done() is called. */ -boolean_t vm_map_verify(map, version) - vm_map_t map; - vm_map_version_t *version; /* REF */ +boolean_t vm_map_verify( + vm_map_t map, + vm_map_version_t *version) /* REF */ { boolean_t result; @@ -4551,19 +4541,16 @@ boolean_t vm_map_verify(map, version) * a task's address map. */ -kern_return_t vm_region(map, address, size, - protection, max_protection, - inheritance, is_shared, - object_name, offset_in_object) - vm_map_t map; - vm_offset_t *address; /* IN/OUT */ - vm_size_t *size; /* OUT */ - vm_prot_t *protection; /* OUT */ - vm_prot_t *max_protection; /* OUT */ - vm_inherit_t *inheritance; /* OUT */ - boolean_t *is_shared; /* OUT */ - ipc_port_t *object_name; /* OUT */ - vm_offset_t *offset_in_object; /* OUT */ +kern_return_t vm_region( + vm_map_t map, + vm_offset_t *address, /* IN/OUT */ + vm_size_t *size, /* OUT */ + vm_prot_t *protection, /* OUT */ + vm_prot_t *max_protection, /* OUT */ + vm_inherit_t *inheritance, /* OUT */ + boolean_t *is_shared, /* OUT */ + ipc_port_t *object_name, /* OUT */ + vm_offset_t *offset_in_object) /* OUT */ { vm_map_entry_t tmp_entry; vm_map_entry_t entry; @@ -4623,9 +4610,9 @@ kern_return_t vm_region(map, address, size, * at allocation time because the adjacent entry * is often wired down. */ -void vm_map_simplify(map, start) - vm_map_t map; - vm_offset_t start; +void vm_map_simplify( + vm_map_t map, + vm_offset_t start) { vm_map_entry_t this_entry; vm_map_entry_t prev_entry; @@ -4684,12 +4671,12 @@ void vm_map_simplify(map, start) * it itself. [This assumes that attributes do not * need to be inherited, which seems ok to me] */ -kern_return_t vm_map_machine_attribute(map, address, size, attribute, value) - vm_map_t map; - vm_offset_t address; - vm_size_t size; - vm_machine_attribute_t attribute; - vm_machine_attribute_val_t* value; /* IN/OUT */ +kern_return_t vm_map_machine_attribute( + vm_map_t map, + vm_offset_t address, + vm_size_t size, + vm_machine_attribute_t attribute, + vm_machine_attribute_val_t* value) /* IN/OUT */ { kern_return_t ret; @@ -4714,8 +4701,7 @@ kern_return_t vm_map_machine_attribute(map, address, size, attribute, value) /* * vm_map_print: [ debug ] */ -void vm_map_print(map) - vm_map_t map; +void vm_map_print(vm_map_t map) { vm_map_entry_t entry; -- cgit v1.2.3