summaryrefslogtreecommitdiff
path: root/vm
diff options
context:
space:
mode:
authorRoland McGrath <roland@gnu.org>2001-04-05 06:39:21 +0000
committerRoland McGrath <roland@gnu.org>2001-04-05 06:39:21 +0000
commitb42bd0d675ec7d1e1fbbf274fadbea984c8dab22 (patch)
treea7f61083f2dfa365d5d9e7101c72e8cb5b3b16a9 /vm
parent2d43f4abdedd003eb070d633bac0e3a6fb3a746b (diff)
2001-04-04 Roland McGrath <roland@frob.com>
* ddb/db_access.h: Fix obsolescent #else/#endif syntax. * ddb/db_aout.c: Likewise. * ddb/db_break.c: Likewise. * ddb/db_break.h: Likewise. * ddb/db_command.c: Likewise. * ddb/db_command.h: Likewise. * ddb/db_cond.c: Likewise. * ddb/db_examine.c: Likewise. * ddb/db_expr.c: Likewise. * ddb/db_ext_symtab.c: Likewise. * ddb/db_input.c: Likewise. * ddb/db_lex.c: Likewise. * ddb/db_macro.c: Likewise. * ddb/db_mp.c: Likewise. * ddb/db_output.c: Likewise. * ddb/db_print.c: Likewise. * ddb/db_run.c: Likewise. * ddb/db_sym.c: Likewise. * ddb/db_task_thread.c: Likewise. * ddb/db_task_thread.h: Likewise. * ddb/db_trap.c: Likewise. * ddb/db_variables.c: Likewise. * ddb/db_watch.c: Likewise. * ddb/db_watch.h: Likewise. * ddb/db_write_cmd.c: Likewise. * device/dev_pager.c: Likewise. * device/device_port.h: Likewise. * device/device_types_kernel.h: Likewise. * device/ds_routines.h: Likewise. * device/errno.h: Likewise. * device/if_ether.h: Likewise. * device/if_hdr.h: Likewise. * device/io_req.h: Likewise. * device/net_io.c: Likewise. * device/net_io.h: Likewise. * i386/i386/ast_check.c: Likewise. * i386/i386/cswitch.S: Likewise. * i386/i386/db_disasm.c: Likewise. * i386/i386/db_interface.c: Likewise. * i386/i386/db_trace.c: Likewise. * i386/i386/debug.h: Likewise. * i386/i386/debug_i386.c: Likewise. * i386/i386/debug_trace.S: Likewise. * i386/i386/eflags.h: Likewise. * i386/i386/gdt.h: Likewise. * i386/i386/hardclock.c: Likewise. * i386/i386/idt-gen.h: Likewise. * i386/i386/ipl.h: Likewise. * i386/i386/ktss.h: Likewise. * i386/i386/kttd_interface.c: Likewise. * i386/i386/ldt.h: Likewise. * i386/i386/lock.h: Likewise. * i386/i386/locore.S: Likewise. * i386/i386/mp_desc.h: Likewise. * i386/i386/pic.c: Likewise. * i386/i386/pic.h: Likewise. * i386/i386/pio.h: Likewise. * i386/i386/pit.h: Likewise. * i386/i386/seg.h: Likewise. * i386/i386/thread.h: Likewise. * i386/i386/trap.c: Likewise. * i386/i386/trap.h: Likewise. * i386/i386/vm_param.h: Likewise. * i386/i386/vm_tuning.h: Likewise. * i386/i386at/autoconf.c: Likewise. * i386/i386at/blit.c: Likewise. * i386/i386at/conf.c: Likewise. * i386/i386at/fd.c: Likewise. * i386/i386at/idt.h: Likewise. * i386/i386at/immc.c: Likewise. * i386/i386at/kd.c: Likewise. * i386/i386at/kd_event.c: Likewise. * i386/i386at/kd_mouse.c: Likewise. * i386/i386at/model_dep.c: Likewise. * i386/i386at/rtc.c: Likewise. * i386/include/mach/i386/asm.h: Likewise. * i386/include/mach/i386/eflags.h: Likewise. * i386/include/mach/i386/mach_i386.defs: Likewise. * i386/include/mach/i386/multiboot.h: Likewise. * i386/include/mach/i386/trap.h: Likewise. * i386/include/mach/i386/vm_types.h: Likewise. * i386/include/mach/sa/stdarg.h: Likewise. * i386/intel/pmap.c: Likewise. * i386/intel/pmap.h: Likewise. * include/alloca.h: Likewise. * include/device/device_types.defs: Likewise. * include/device/device_types.h: Likewise. * include/device/disk_status.h: Likewise. * include/device/net_status.h: Likewise. * include/mach/mach.defs: Likewise. * include/mach/memory_object.defs: Likewise. * include/mach/std_types.defs: Likewise. * include/mach_debug/hash_info.h: Likewise. * include/mach_debug/ipc_info.h: Likewise. * include/mach_debug/mach_debug.defs: Likewise. * include/mach_debug/mach_debug_types.defs: Likewise. * include/mach_debug/mach_debug_types.h: Likewise. * include/mach_debug/vm_info.h: Likewise. * include/mach_debug/zone_info.h: Likewise. * include/sys/ioctl.h: Likewise. * include/sys/time.h: Likewise. * ipc/ipc_entry.h: Likewise. * ipc/ipc_hash.h: Likewise. * ipc/ipc_init.c: Likewise. * ipc/ipc_kmsg.c: Likewise. * ipc/ipc_kmsg.h: Likewise. * ipc/ipc_marequest.c: Likewise. * ipc/ipc_marequest.h: Likewise. * ipc/ipc_mqueue.c: Likewise. * ipc/ipc_mqueue.h: Likewise. * ipc/ipc_notify.c: Likewise. * ipc/ipc_notify.h: Likewise. * ipc/ipc_object.c: Likewise. * ipc/ipc_object.h: Likewise. * ipc/ipc_port.c: Likewise. * ipc/ipc_port.h: Likewise. * ipc/ipc_pset.c: Likewise. * ipc/ipc_pset.h: Likewise. * ipc/ipc_right.c: Likewise. * ipc/ipc_right.h: Likewise. * ipc/ipc_space.c: Likewise. * ipc/ipc_space.h: Likewise. * ipc/mach_debug.c: Likewise. * ipc/mach_msg.c: Likewise. * ipc/mach_msg.h: Likewise. * ipc/mach_port.c: Likewise. * kern/act.c: Likewise. * kern/assert.h: Likewise. * kern/ast.c: Likewise. * kern/ast.h: Likewise. * kern/bootstrap.c: Likewise. * kern/counters.c: Likewise. * kern/counters.h: Likewise. * kern/debug.h: Likewise. * kern/exception.c: Likewise. * kern/host.h: Likewise. * kern/ipc_host.c: Likewise. * kern/ipc_host.h: Likewise. * kern/ipc_kobject.c: Likewise. * kern/ipc_mig.c: Likewise. * kern/ipc_tt.c: Likewise. * kern/ipc_tt.h: Likewise. * kern/kalloc.h: Likewise. * kern/lock_mon.c: Likewise. * kern/mach_clock.c: Likewise. * kern/mach_factor.c: Likewise. * kern/mach_param.h: Likewise. * kern/machine.c: Likewise. * kern/processor.c: Likewise. * kern/profile.c: Likewise. * kern/queue.h: Likewise. * kern/sched.h: Likewise. * kern/startup.c: Likewise. * kern/syscall_emulation.h: Likewise. * kern/syscall_subr.c: Likewise. * kern/syscall_subr.h: Likewise. * kern/syscall_sw.c: Likewise. * kern/syscall_sw.h: Likewise. * kern/task.h: Likewise. * kern/thread_swap.h: Likewise. * kern/time_out.h: Likewise. * kern/time_stamp.c: Likewise. * kern/time_stamp.h: Likewise. * kern/timer.c: Likewise. * kern/timer.h: Likewise. * kern/xpr.c: Likewise. * kern/xpr.h: Likewise. * kern/zalloc.c: Likewise. * kern/zalloc.h: Likewise. * linux/dev/drivers/block/ide.c: Likewise. * linux/dev/include/linux/blk.h: Likewise. * linux/src/include/linux/cdrom.h: Likewise. * linux/src/include/linux/md.h: Likewise. * util/cpu.h: Likewise. * vm/memory_object.c: Likewise. * vm/memory_object.h: Likewise. * vm/pmap.h: Likewise. * vm/vm_debug.c: Likewise. * vm/vm_external.h: Likewise. * vm/vm_fault.c: Likewise. * vm/vm_fault.h: Likewise. * vm/vm_kern.h: Likewise. * vm/vm_map.c: Likewise. * vm/vm_map.h: Likewise. * vm/vm_object.h: Likewise. * vm/vm_pageout.c: Likewise. * vm/vm_pageout.h: Likewise. * vm/vm_user.h: Likewise.
Diffstat (limited to 'vm')
-rw-r--r--vm/memory_object.c26
-rw-r--r--vm/memory_object.h16
-rw-r--r--vm/pmap.h28
-rw-r--r--vm/vm_debug.c14
-rw-r--r--vm/vm_external.h14
-rw-r--r--vm/vm_fault.c34
-rw-r--r--vm/vm_fault.h14
-rw-r--r--vm/vm_kern.h14
-rw-r--r--vm/vm_map.c120
-rw-r--r--vm/vm_map.h8
-rw-r--r--vm/vm_object.h24
-rw-r--r--vm/vm_pageout.c32
-rw-r--r--vm/vm_pageout.h14
-rw-r--r--vm/vm_user.h14
14 files changed, 186 insertions, 186 deletions
diff --git a/vm/memory_object.c b/vm/memory_object.c
index a2b0bed..c01b740 100644
--- a/vm/memory_object.c
+++ b/vm/memory_object.c
@@ -67,11 +67,11 @@
#include <norma_ipc.h>
#if NORMA_VM
#include <norma/xmm_server_rename.h>
-#endif NORMA_VM
+#endif /* NORMA_VM */
#include <mach_pagemap.h>
#if MACH_PAGEMAP
-#include <vm/vm_external.h>
-#endif MACH_PAGEMAP
+#include <vm/vm_external.h>
+#endif /* MACH_PAGEMAP */
typedef int memory_object_lock_result_t; /* moved from below */
@@ -102,7 +102,7 @@ kern_return_t memory_object_data_provided(object, offset, data, data_cnt,
data_cnt, lock_value, FALSE, IP_NULL,
0);
}
-#endif !NORMA_VM
+#endif /* !NORMA_VM */
kern_return_t memory_object_data_supply(object, offset, data_copy, data_cnt,
@@ -382,7 +382,7 @@ kern_return_t memory_object_data_unavailable(object, offset, size)
{
#if MACH_PAGEMAP
vm_external_t existence_info = VM_EXTERNAL_NULL;
-#endif MACH_PAGEMAP
+#endif /* MACH_PAGEMAP */
if (object == VM_OBJECT_NULL)
return(KERN_INVALID_ARGUMENT);
@@ -391,11 +391,11 @@ kern_return_t memory_object_data_unavailable(object, offset, size)
return(KERN_INVALID_ARGUMENT);
#if MACH_PAGEMAP
- if ((offset == 0) && (size > VM_EXTERNAL_LARGE_SIZE) &&
+ if ((offset == 0) && (size > VM_EXTERNAL_LARGE_SIZE) &&
(object->existence_info == VM_EXTERNAL_NULL)) {
existence_info = vm_external_create(VM_EXTERNAL_SMALL_SIZE);
}
-#endif MACH_PAGEMAP
+#endif /* MACH_PAGEMAP */
vm_object_lock(object);
#if MACH_PAGEMAP
@@ -407,7 +407,7 @@ kern_return_t memory_object_data_unavailable(object, offset, size)
vm_object_deallocate(object);
return(KERN_SUCCESS);
}
-#endif MACH_PAGEMAP
+#endif /* MACH_PAGEMAP */
offset -= object->paging_offset;
while (size != 0) {
@@ -500,7 +500,7 @@ memory_object_lock_result_t memory_object_lock_page(m, should_return,
* Page is not dirty [2 checks] AND (
* Page is not precious OR
* No request to return precious pages ))
- *
+ *
* Now isn't that straightforward and obvious ?? ;-)
*
* XXX This doesn't handle sending a copy of a wired
@@ -898,7 +898,7 @@ MACRO_END
#if !NORMA_VM
/*
- * Old version of memory_object_lock_request.
+ * Old version of memory_object_lock_request.
*/
kern_return_t
xxx_memory_object_lock_request(object, offset, size,
@@ -924,8 +924,8 @@ xxx_memory_object_lock_request(object, offset, size,
should_return, should_flush, prot,
reply_to, reply_to_type));
}
-#endif !NORMA_VM
-
+#endif /* !NORMA_VM */
+
kern_return_t
memory_object_set_attributes_common(object, object_ready, may_cache,
copy_strategy, use_old_pageout)
@@ -1052,7 +1052,7 @@ kern_return_t memory_object_ready(object, may_cache, copy_strategy)
may_cache, copy_strategy,
FALSE);
}
-#endif !NORMA_VM
+#endif /* !NORMA_VM */
kern_return_t memory_object_get_attributes(object, object_ready,
may_cache, copy_strategy)
diff --git a/vm/memory_object.h b/vm/memory_object.h
index 9afa062..bce3f29 100644
--- a/vm/memory_object.h
+++ b/vm/memory_object.h
@@ -1,25 +1,25 @@
-/*
+/*
* Mach Operating System
* Copyright (c) 1991 Carnegie Mellon University
* All Rights Reserved.
- *
+ *
* Permission to use, copy, modify and distribute this software and its
* documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
* ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
+ *
* Carnegie Mellon requests users of this software to return to
- *
+ *
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
- *
+ *
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*/
@@ -40,4 +40,4 @@ extern void memory_manager_default_init();
extern struct ipc_port *memory_manager_default;
-#endif _VM_MEMORY_OBJECT_H_
+#endif /* _VM_MEMORY_OBJECT_H_ */
diff --git a/vm/pmap.h b/vm/pmap.h
index f9a949e..98ef5ba 100644
--- a/vm/pmap.h
+++ b/vm/pmap.h
@@ -103,7 +103,7 @@ extern void pmap_virtual_space(); /* During VM initialization,
* report virtual space
* available for the kernel.
*/
-#endif MACHINE_PAGES
+#endif /* MACHINE_PAGES */
/*
* Routines to manage the physical map data structure.
@@ -115,10 +115,10 @@ pmap_t pmap_create(vm_size_t size);
/* Return the kernel's pmap_t. */
#ifndef pmap_kernel
extern pmap_t pmap_kernel(void);
-#endif pmap_kernel
+#endif /* pmap_kernel */
/* Gain and release a reference. */
-extern void pmap_reference(pmap_t pmap);
+extern void pmap_reference(pmap_t pmap);
extern void pmap_destroy(pmap_t pmap);
/* Enter a mapping */
@@ -166,7 +166,7 @@ void pmap_clear_reference(vm_offset_t pa);
/* Return reference bit */
#ifndef pmap_is_referenced
boolean_t pmap_is_referenced(vm_offset_t pa);
-#endif pmap_is_referenced
+#endif /* pmap_is_referenced */
/* Clear modify bit */
void pmap_clear_modify(vm_offset_t pa);
@@ -182,7 +182,7 @@ extern void pmap_statistics(); /* Return statistics */
#ifndef pmap_resident_count
extern int pmap_resident_count();
-#endif pmap_resident_count
+#endif /* pmap_resident_count */
/*
* Sundry required routines
@@ -206,7 +206,7 @@ extern vm_offset_t pmap_phys_address(); /* Transform address
* to physical address
* known to this module.
*/
-#endif pmap_phys_address
+#endif /* pmap_phys_address */
#ifndef pmap_phys_to_frame
extern int pmap_phys_to_frame(); /* Inverse of
* pmap_phys_address,
@@ -215,7 +215,7 @@ extern int pmap_phys_to_frame(); /* Inverse of
* machine-independent
* pseudo-devices.
*/
-#endif pmap_phys_to_frame
+#endif /* pmap_phys_to_frame */
/*
* Optional routines
@@ -224,12 +224,12 @@ extern int pmap_phys_to_frame(); /* Inverse of
extern void pmap_copy(); /* Copy range of
* mappings, if desired.
*/
-#endif pmap_copy
+#endif /* pmap_copy */
#ifndef pmap_attribute
extern kern_return_t pmap_attribute(); /* Get/Set special
* memory attributes
*/
-#endif pmap_attribute
+#endif /* pmap_attribute */
/*
* Routines defined as macros.
@@ -239,24 +239,24 @@ extern kern_return_t pmap_attribute(); /* Get/Set special
if ((pmap) != kernel_pmap) \
PMAP_ACTIVATE(pmap, thread, cpu); \
}
-#endif PMAP_ACTIVATE_USER
+#endif /* PMAP_ACTIVATE_USER */
#ifndef PMAP_DEACTIVATE_USER
#define PMAP_DEACTIVATE_USER(pmap, thread, cpu) { \
if ((pmap) != kernel_pmap) \
PMAP_DEACTIVATE(pmap, thread, cpu); \
}
-#endif PMAP_DEACTIVATE_USER
+#endif /* PMAP_DEACTIVATE_USER */
#ifndef PMAP_ACTIVATE_KERNEL
#define PMAP_ACTIVATE_KERNEL(cpu) \
PMAP_ACTIVATE(kernel_pmap, THREAD_NULL, cpu)
-#endif PMAP_ACTIVATE_KERNEL
+#endif /* PMAP_ACTIVATE_KERNEL */
#ifndef PMAP_DEACTIVATE_KERNEL
#define PMAP_DEACTIVATE_KERNEL(cpu) \
PMAP_DEACTIVATE(kernel_pmap, THREAD_NULL, cpu)
-#endif PMAP_DEACTIVATE_KERNEL
+#endif /* PMAP_DEACTIVATE_KERNEL */
/*
* Exported data structures
@@ -264,4 +264,4 @@ extern kern_return_t pmap_attribute(); /* Get/Set special
extern pmap_t kernel_pmap; /* The kernel's map */
-#endif _VM_PMAP_H_
+#endif /* _VM_PMAP_H_ */
diff --git a/vm/vm_debug.c b/vm/vm_debug.c
index 17c8c31..16165ca 100644
--- a/vm/vm_debug.c
+++ b/vm/vm_debug.c
@@ -1,25 +1,25 @@
-/*
+/*
* Mach Operating System
* Copyright (c) 1991,1990 Carnegie Mellon University
* All Rights Reserved.
- *
+ *
* Permission to use, copy, modify and distribute this software and its
* documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
- *
+ *
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
* ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
+ *
* Carnegie Mellon requests users of this software to return to
- *
+ *
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
- *
+ *
* any improvements or extensions that they make and grant Carnegie Mellon
* the rights to redistribute these changes.
*/
@@ -417,7 +417,7 @@ mach_vm_object_pages(object, pagesp, countp)
return KERN_SUCCESS;
}
-#endif MACH_VM_DEBUG
+#endif /* MACH_VM_DEBUG */
/*
* Routine: host_virtual_physical_table_info
diff --git a/vm/vm_external.h b/vm/vm_external.h
index 70ffd65..b263679 100644
--- a/vm/vm_external.h
+++ b/vm/vm_external.h
@@ -1,25 +1,25 @@
-/*
+/*
* Mach Operating System
* Copyright (c) 1991,1990,1989 Carnegie Mellon University
* All Rights Reserved.
- *
+ *
* Permission to use, copy, modify and distribute this software and its
* documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
- *
+ *
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
* ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
+ *
* Carnegie Mellon requests users of this software to return to
- *
+ *
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
- *
+ *
* any improvements or extensions that they make and grant Carnegie Mellon
* the rights to redistribute these changes.
*/
@@ -86,4 +86,4 @@ extern void vm_external_state_set();/* Set state of a page. */
extern vm_external_state_t _vm_external_state_get();
/* HIDDEN routine */
-#endif _VM_VM_EXTERNAL_H_
+#endif /* _VM_VM_EXTERNAL_H_ */
diff --git a/vm/vm_fault.c b/vm/vm_fault.c
index a74d41b..42108ed 100644
--- a/vm/vm_fault.c
+++ b/vm/vm_fault.c
@@ -100,7 +100,7 @@ boolean_t software_reference_bits = TRUE;
#if MACH_KDB
extern struct db_watchpoint *db_watchpoint_list;
-#endif MACH_KDB
+#endif /* MACH_KDB */
/*
* Routine: vm_fault_init
@@ -286,7 +286,7 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
* so that the watchpoint code notices the access.
*/
|| db_watchpoint_list
-#endif MACH_KDB
+#endif /* MACH_KDB */
) {
/*
* If we aren't asking for write permission,
@@ -481,7 +481,7 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
vm_page_zero_fill(m);
vm_stat_sample(SAMPLED_PC_VM_ZFILL_FAULTS);
-
+
vm_stat.zero_fill_count++;
vm_object_lock(object);
pmap_clear_modify(m->phys_addr);
@@ -519,7 +519,7 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
if ((access_required & m->unlock_request) != access_required) {
vm_prot_t new_unlock_request;
kern_return_t rc;
-
+
if (!object->pager_ready) {
vm_object_assert_wait(object,
VM_OBJECT_EVENT_PAGER_READY,
@@ -581,7 +581,7 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
#if MACH_PAGEMAP
&& (vm_external_state_get(object->existence_info, offset + object->paging_offset) !=
VM_EXTERNAL_STATE_ABSENT)
-#endif MACH_PAGEMAP
+#endif /* MACH_PAGEMAP */
;
if ((look_for_page || (object == first_object))
@@ -664,16 +664,16 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
vm_stat.pageins++;
vm_stat_sample(SAMPLED_PC_VM_PAGEIN_FAULTS);
- if ((rc = memory_object_data_request(object->pager,
+ if ((rc = memory_object_data_request(object->pager,
object->pager_request,
- m->offset + object->paging_offset,
+ m->offset + object->paging_offset,
PAGE_SIZE, access_required)) != KERN_SUCCESS) {
if (rc != MACH_SEND_INTERRUPTED)
printf("%s(0x%x, 0x%x, 0x%x, 0x%x, 0x%x) failed, %d\n",
"memory_object_data_request",
object->pager,
object->pager_request,
- m->offset + object->paging_offset,
+ m->offset + object->paging_offset,
PAGE_SIZE, access_required, rc);
/*
* Don't want to leave a busy page around,
@@ -689,7 +689,7 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
VM_FAULT_INTERRUPTED :
VM_FAULT_MEMORY_ERROR);
}
-
+
/*
* Retry with same object/offset, since new data may
* be in a different page (i.e., m is meaningless at
@@ -789,7 +789,7 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
assert((first_m == VM_PAGE_NULL) ||
(first_m->busy && !first_m->absent &&
!first_m->active && !first_m->inactive));
-#endif EXTRA_ASSERTIONS
+#endif /* EXTRA_ASSERTIONS */
/*
* If the page is being written, but isn't
@@ -983,7 +983,7 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
*/
vm_page_copy(m, copy_m);
-
+
/*
* If the old page was in use by any users
* of the copy-object, it must be removed
@@ -1059,7 +1059,7 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
* wait result]. Can't turn off the page's
* busy bit because we're not done with it.
*/
-
+
if (m->wanted) {
m->wanted = FALSE;
thread_wakeup_with_result((event_t) m,
@@ -1749,7 +1749,7 @@ kern_return_t vm_fault_wire_fast(map, va, entry)
/*
* Wire the page down now. All bail outs beyond this
- * point must unwire the page.
+ * point must unwire the page.
*/
vm_page_lock_queues();
@@ -1774,7 +1774,7 @@ kern_return_t vm_fault_wire_fast(map, va, entry)
/*
* Put this page into the physical map.
* We have to unlock the object because pmap_enter
- * may cause other faults.
+ * may cause other faults.
*/
vm_object_unlock(object);
@@ -1865,7 +1865,7 @@ kern_return_t vm_fault_copy(
{
vm_page_t result_page;
vm_prot_t prot;
-
+
vm_page_t src_page;
vm_page_t src_top_page;
@@ -2020,7 +2020,7 @@ kern_return_t vm_fault_copy(
RETURN(KERN_SUCCESS);
#undef RETURN
- /*NOTREACHED*/
+ /*NOTREACHED*/
}
@@ -2179,4 +2179,4 @@ vm_fault_return_t vm_fault_page_overwrite(dst_object, dst_offset, result_page)
#undef DISCARD_PAGE
}
-#endif notdef
+#endif /* notdef */
diff --git a/vm/vm_fault.h b/vm/vm_fault.h
index eee3999..5be18e5 100644
--- a/vm/vm_fault.h
+++ b/vm/vm_fault.h
@@ -1,25 +1,25 @@
-/*
+/*
* Mach Operating System
* Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
* All Rights Reserved.
- *
+ *
* Permission to use, copy, modify and distribute this software and its
* documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
- *
+ *
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
* ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
+ *
* Carnegie Mellon requests users of this software to return to
- *
+ *
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
- *
+ *
* any improvements or extensions that they make and grant Carnegie Mellon
* the rights to redistribute these changes.
*/
@@ -61,4 +61,4 @@ extern void vm_fault_unwire();
extern kern_return_t vm_fault_copy(); /* Copy pages from
* one object to another
*/
-#endif _VM_VM_FAULT_H_
+#endif /* _VM_VM_FAULT_H_ */
diff --git a/vm/vm_kern.h b/vm/vm_kern.h
index 8e00fcc..3ae4641 100644
--- a/vm/vm_kern.h
+++ b/vm/vm_kern.h
@@ -1,25 +1,25 @@
-/*
+/*
* Mach Operating System
* Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
* All Rights Reserved.
- *
+ *
* Permission to use, copy, modify and distribute this software and its
* documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
- *
+ *
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
* ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
+ *
* Carnegie Mellon requests users of this software to return to
- *
+ *
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
- *
+ *
* any improvements or extensions that they make and grant Carnegie Mellon
* the rights to redistribute these changes.
*/
@@ -60,4 +60,4 @@ extern vm_map_t kernel_map;
extern vm_map_t kernel_pageable_map;
extern vm_map_t ipc_kernel_map;
-#endif _VM_VM_KERN_H_
+#endif /* _VM_VM_KERN_H_ */
diff --git a/vm/vm_map.c b/vm/vm_map.c
index 8d17a49..c060196 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -89,7 +89,7 @@ MACRO_END
* selected by the (new) use_shared_copy bit in the object. See
* vm_object_copy_temporary in vm_object.c for details. All maps
* are now "top level" maps (either task map, kernel map or submap
- * of the kernel map).
+ * of the kernel map).
*
* Since portions of maps are specified by start/end addreses,
* which may not align with existing map entries, all
@@ -644,7 +644,7 @@ int vm_map_pmap_enter_enable = FALSE;
* As soon as a page not found in the object the scan ends.
*
* Returns:
- * Nothing.
+ * Nothing.
*
* In/out conditions:
* The source map should not be locked on entry.
@@ -797,7 +797,7 @@ kern_return_t vm_map_enter(
goto StartAgain;
}
}
-
+
RETURN(KERN_NO_SPACE);
}
@@ -895,7 +895,7 @@ kern_return_t vm_map_enter(
(entry->protection == cur_protection) &&
(entry->max_protection == max_protection) &&
(entry->wired_count == 0) && /* implies user_wired_count == 0 */
- (entry->projected_on == 0)) {
+ (entry->projected_on == 0)) {
if (vm_object_coalesce(entry->object.vm_object,
VM_OBJECT_NULL,
entry->offset,
@@ -966,9 +966,9 @@ kern_return_t vm_map_enter(
if ((object != VM_OBJECT_NULL) &&
(vm_map_pmap_enter_enable) &&
(!anywhere) &&
- (!needs_copy) &&
+ (!needs_copy) &&
(size < (128*1024))) {
- vm_map_pmap_enter(map, start, end,
+ vm_map_pmap_enter(map, start, end,
object, offset, cur_protection);
}
@@ -1353,7 +1353,7 @@ kern_return_t vm_map_pageable_common(map, start, end, access_type, user_wire)
(entry->vme_start < end)) {
if ((entry->wired_count == 0) ||
- ((entry->vme_end < end) &&
+ ((entry->vme_end < end) &&
((entry->vme_next == vm_map_to_entry(map)) ||
(entry->vme_next->vme_start > entry->vme_end))) ||
(user_wire && (entry->user_wired_count == 0))) {
@@ -1380,7 +1380,7 @@ kern_return_t vm_map_pageable_common(map, start, end, access_type, user_wire)
else {
entry->wired_count--;
}
-
+
if (entry->wired_count == 0)
vm_fault_unwire(map, entry);
@@ -1442,7 +1442,7 @@ kern_return_t vm_map_pageable_common(map, start, end, access_type, user_wire)
if (entry->object.vm_object == VM_OBJECT_NULL) {
entry->object.vm_object =
vm_object_allocate(
- (vm_size_t)(entry->vme_end
+ (vm_size_t)(entry->vme_end
- entry->vme_start));
entry->offset = (vm_offset_t)0;
}
@@ -1464,7 +1464,7 @@ kern_return_t vm_map_pageable_common(map, start, end, access_type, user_wire)
* this is the end of the region.
* Protection: Access requested must be allowed.
*/
- if (((entry->vme_end < end) &&
+ if (((entry->vme_end < end) &&
((entry->vme_next == vm_map_to_entry(map)) ||
(entry->vme_next->vme_start > entry->vme_end))) ||
((entry->protection & access_type) != access_type)) {
@@ -1556,7 +1556,7 @@ kern_return_t vm_map_pageable_common(map, start, end, access_type, user_wire)
* vm_map_entry_delete: [ internal use only ]
*
* Deallocate the given entry from the target map.
- */
+ */
void vm_map_entry_delete(map, entry)
register vm_map_t map;
register vm_map_entry_t entry;
@@ -1717,7 +1717,7 @@ kern_return_t vm_map_delete(map, start, end)
}
return KERN_SUCCESS;
}
-#endif NORMA_IPC
+#endif /* NORMA_IPC */
vm_map_clip_start(map, entry, start);
/*
@@ -2202,7 +2202,7 @@ start_pass_1:
/*
* XXXO If there are no permanent objects in the destination,
* XXXO and the source and destination map entry zones match,
- * XXXO and the destination map entry is not shared,
+ * XXXO and the destination map entry is not shared,
* XXXO then the map entries can be deleted and replaced
* XXXO with those from the copy. The following code is the
* XXXO basic idea of what to do, but there are lots of annoying
@@ -2243,7 +2243,7 @@ start_pass_1:
vm_map_entry_t copy_entry = vm_map_copy_first_entry(copy);
vm_size_t copy_size = (copy_entry->vme_end - copy_entry->vme_start);
vm_object_t object;
-
+
entry = tmp_entry;
size = (entry->vme_end - entry->vme_start);
/*
@@ -2428,7 +2428,7 @@ start_pass_1:
/*
* Macro: vm_map_copy_insert
- *
+ *
* Description:
* Link a copy chain ("copy") into a map at the
* specified location (after "where").
@@ -2811,7 +2811,7 @@ StartAgain:
(last->wired_count != 0))) {
goto create_object;
}
-
+
/*
* If this entry needs an object, make one.
*/
@@ -2913,7 +2913,7 @@ create_object:
entry->vme_start = start;
entry->vme_end = start + size;
-
+
entry->inheritance = VM_INHERIT_DEFAULT;
entry->protection = VM_PROT_DEFAULT;
entry->max_protection = VM_PROT_ALL;
@@ -2937,7 +2937,7 @@ create_object:
last = entry;
/*
- * Transfer pages into new object.
+ * Transfer pages into new object.
* Scan page list in vm_map_copy.
*/
insert_pages:
@@ -3028,7 +3028,7 @@ insert_pages:
vm_object_unlock(object);
*dst_addr = start + dst_offset;
-
+
/*
* Clear the in transition bits. This is easy if we
* didn't have a continuation.
@@ -3062,7 +3062,7 @@ error:
entry = entry->vme_next;
}
}
-
+
if (result != KERN_SUCCESS)
vm_map_delete(dst_map, start, end);
@@ -3080,7 +3080,7 @@ error:
if (result == KERN_SUCCESS) {
zfree(vm_map_copy_zone, (vm_offset_t) orig_copy);
}
-
+
return(result);
}
@@ -3151,7 +3151,7 @@ kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result)
/*
* Allocate a header element for the list.
*
- * Use the start and end in the header to
+ * Use the start and end in the header to
* remember the endpoints prior to rounding.
*/
@@ -3164,7 +3164,7 @@ kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result)
copy->offset = src_addr;
copy->size = len;
-
+
#define RETURN(x) \
MACRO_BEGIN \
vm_map_unlock(src_map); \
@@ -3345,7 +3345,7 @@ kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result)
&new_entry_needs_copy);
new_entry->needs_copy = new_entry_needs_copy;
-
+
if (result != KERN_SUCCESS) {
vm_map_copy_entry_dispose(copy, new_entry);
@@ -3427,7 +3427,7 @@ kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result)
vm_map_copy_entry_link(copy, vm_map_copy_last_entry(copy),
new_entry);
-
+
/*
* Determine whether the entire region
* has been copied.
@@ -3447,7 +3447,7 @@ kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result)
/*
* If the source should be destroyed, do it now, since the
- * copy was successful.
+ * copy was successful.
*/
if (src_destroy)
(void) vm_map_delete(src_map, trunc_page(src_addr), src_end);
@@ -3498,7 +3498,7 @@ kern_return_t vm_map_copyin_object(object, offset, size, copy_result)
* vm_map_copyin_page_list_cont:
*
* Continuation routine for vm_map_copyin_page_list.
- *
+ *
* If vm_map_copyin_page_list can't fit the entire vm range
* into a single page list object, it creates a continuation.
* When the target of the operation has used the pages in the
@@ -3527,7 +3527,7 @@ vm_map_copy_t *copy_result; /* OUT */
src_destroy_only = (cont_args->src_len == (vm_size_t) 0);
if (do_abort || src_destroy_only) {
- if (src_destroy)
+ if (src_destroy)
result = vm_map_remove(cont_args->map,
cont_args->destroy_addr,
cont_args->destroy_addr + cont_args->destroy_len);
@@ -3551,7 +3551,7 @@ vm_map_copy_t *copy_result; /* OUT */
new_args->destroy_len = cont_args->destroy_len;
}
}
-
+
vm_map_deallocate(cont_args->map);
kfree((vm_offset_t)cont_args, sizeof(vm_map_copyin_args_data_t));
@@ -3644,7 +3644,7 @@ kern_return_t vm_map_copyin_page_list(src_map, src_addr, len, src_destroy,
copy->size = len;
copy->cpy_cont = ((kern_return_t (*)()) 0);
copy->cpy_cont_args = (char *) VM_MAP_COPYIN_ARGS_NULL;
-
+
/*
* Find the beginning of the region.
*/
@@ -3717,7 +3717,7 @@ make_continuation:
* something stupid.
*/
- cont_args = (vm_map_copyin_args_t)
+ cont_args = (vm_map_copyin_args_t)
kalloc(sizeof(vm_map_copyin_args_data_t));
cont_args->map = src_map;
vm_map_reference(src_map);
@@ -3791,7 +3791,7 @@ make_continuation:
vm_prot_t result_prot;
vm_page_t top_page;
kern_return_t kr;
-
+
/*
* Have to fault the page in; must
* unlock the map to do so. While
@@ -3803,7 +3803,7 @@ make_continuation:
need_map_lookup = TRUE;
retry:
result_prot = VM_PROT_READ;
-
+
kr = vm_fault_page(src_object, src_offset,
VM_PROT_READ, FALSE, FALSE,
&result_prot, &m, &top_page,
@@ -3847,7 +3847,7 @@ retry:
result = KERN_MEMORY_ERROR;
goto error;
}
-
+
if (top_page != VM_PAGE_NULL) {
vm_object_lock(src_object);
VM_PAGE_FREE(top_page);
@@ -3872,13 +3872,13 @@ retry:
* we have a paging reference on it. Either
* the map is locked, or need_map_lookup is
* TRUE.
- *
+ *
* Put the page in the page list.
*/
copy->cpy_page_list[copy->cpy_npages++] = m;
vm_object_unlock(m->object);
}
-
+
/*
* DETERMINE whether the entire region
* has been copied.
@@ -3932,7 +3932,7 @@ retry:
* Remove the page from its object if it
* can be stolen. It can be stolen if:
*
- * (1) The source is being destroyed,
+ * (1) The source is being destroyed,
* the object is temporary, and
* not shared.
* (2) The page is not precious.
@@ -3950,7 +3950,7 @@ retry:
*
* Stealing wired pages requires telling the
* pmap module to let go of them.
- *
+ *
* NOTE: stealing clean pages from objects
* whose mappings survive requires a call to
* the pmap module. Maybe later.
@@ -3965,7 +3965,7 @@ retry:
(!src_object->use_shared_copy) &&
!m->precious) {
vm_offset_t page_vaddr;
-
+
page_vaddr = src_start + (i * PAGE_SIZE);
if (m->wire_count > 0) {
@@ -4063,7 +4063,7 @@ retry:
* a continuation to prevent this.
*/
if (src_destroy && !vm_map_copy_has_cont(copy)) {
- cont_args = (vm_map_copyin_args_t)
+ cont_args = (vm_map_copyin_args_t)
kalloc(sizeof(vm_map_copyin_args_data_t));
vm_map_reference(src_map);
cont_args->map = src_map;
@@ -4076,7 +4076,7 @@ retry:
copy->cpy_cont_args = (char *) cont_args;
copy->cpy_cont = vm_map_copyin_page_list_cont;
}
-
+
}
vm_map_unlock(src_map);
@@ -4165,7 +4165,7 @@ vm_map_t vm_map_fork(old_map)
&old_entry->offset,
(vm_size_t) (old_entry->vme_end -
old_entry->vme_start));
-
+
/*
* If we're making a shadow for other than
* copy on write reasons, then we have
@@ -4302,7 +4302,7 @@ vm_map_t vm_map_fork(old_map)
start,
entry_size,
FALSE,
- &copy)
+ &copy)
!= KERN_SUCCESS) {
vm_map_lock(old_map);
if (!vm_map_lookup_entry(old_map, start, &last))
@@ -4436,7 +4436,7 @@ kern_return_t vm_map_lookup(var_map, vaddr, fault_type, out_version,
vm_map_unlock_read(old_map);
goto RetryLookup;
}
-
+
/*
* Check whether this task is allowed to have
* this page.
@@ -4444,7 +4444,7 @@ kern_return_t vm_map_lookup(var_map, vaddr, fault_type, out_version,
prot = entry->protection;
- if ((fault_type & (prot)) != fault_type)
+ if ((fault_type & (prot)) != fault_type)
if ((prot & VM_PROT_NOTIFY) && (fault_type & VM_PROT_WRITE)) {
RETURN(KERN_WRITE_PROTECTION_FAILURE);
} else {
@@ -4489,9 +4489,9 @@ kern_return_t vm_map_lookup(var_map, vaddr, fault_type, out_version,
&entry->object.vm_object,
&entry->offset,
(vm_size_t) (entry->vme_end - entry->vme_start));
-
+
entry->needs_copy = FALSE;
-
+
vm_map_lock_write_to_read(map);
}
else {
@@ -4542,7 +4542,7 @@ kern_return_t vm_map_lookup(var_map, vaddr, fault_type, out_version,
out_version->main_timestamp = map->timestamp;
RETURN(KERN_SUCCESS);
-
+
#undef RETURN
}
@@ -4692,7 +4692,7 @@ void vm_map_simplify(map, start)
((prev_entry->offset + (prev_entry->vme_end - prev_entry->vme_start))
== this_entry->offset) &&
(prev_entry->projected_on == 0) &&
- (this_entry->projected_on == 0)
+ (this_entry->projected_on == 0)
) {
if (map->first_free == this_entry)
map->first_free = prev_entry;
@@ -4837,11 +4837,11 @@ void vm_map_copy_print(copy)
case VM_MAP_COPY_ENTRY_LIST:
printf("[entry_list]");
break;
-
+
case VM_MAP_COPY_OBJECT:
printf("[object]");
break;
-
+
case VM_MAP_COPY_PAGE_LIST:
printf("[page_list]");
break;
@@ -4886,7 +4886,7 @@ void vm_map_copy_print(copy)
indent -=2;
}
-#endif MACH_KDB
+#endif /* MACH_KDB */
#if NORMA_IPC
/*
@@ -4904,7 +4904,7 @@ void vm_map_copy_print(copy)
* We take responsibility for discarding the old structure and
* use a continuation to do so. Postponing this discard ensures
* that the objects containing the pages we've marked busy will stick
- * around.
+ * around.
*/
kern_return_t
vm_map_convert_to_page_list(caller_copy)
@@ -5024,10 +5024,10 @@ vm_map_convert_to_page_list(caller_copy)
vm_prot_t result_prot;
vm_page_t top_page;
kern_return_t kr;
-
+
retry:
result_prot = VM_PROT_READ;
-
+
kr = vm_fault_page(object, offset,
VM_PROT_READ, FALSE, FALSE,
&result_prot, &m, &top_page,
@@ -5104,7 +5104,7 @@ vm_map_convert_to_page_list_from_object(caller_copy)
m = vm_page_lookup(object, offset);
if ((m != VM_PAGE_NULL) && !m->busy && !m->fictitious &&
!m->absent && !m->error) {
-
+
/*
* This is the page. Mark it busy
* and keep the paging reference on
@@ -5116,10 +5116,10 @@ vm_map_convert_to_page_list_from_object(caller_copy)
vm_prot_t result_prot;
vm_page_t top_page;
kern_return_t kr;
-
+
retry:
result_prot = VM_PROT_READ;
-
+
kr = vm_fault_page(object, offset,
VM_PROT_READ, FALSE, FALSE,
&result_prot, &m, &top_page,
@@ -5136,7 +5136,7 @@ retry:
vm_object_paging_begin(object);
goto retry;
}
-
+
if (top_page != VM_PAGE_NULL) {
vm_object_lock(object);
VM_PAGE_FREE(top_page);
@@ -5241,4 +5241,4 @@ vm_map_convert_from_page_list(copy)
vm_map_copy_entry_link(copy, vm_map_copy_last_entry(copy), new_entry);
return(KERN_SUCCESS);
}
-#endif NORMA_IPC
+#endif /* NORMA_IPC */
diff --git a/vm/vm_map.h b/vm/vm_map.h
index 0bdb7d1..3e0246b 100644
--- a/vm/vm_map.h
+++ b/vm/vm_map.h
@@ -209,7 +209,7 @@ typedef struct vm_map_version {
* at a time.
*
* Implementation:
- * There are three formats for map copy objects.
+ * There are three formats for map copy objects.
* The first is very similar to the main
* address map in structure, and as a result, some
* of the internal maintenance functions/macros can
@@ -230,9 +230,9 @@ typedef struct vm_map_version {
#if iPSC386 || iPSC860
#define VM_MAP_COPY_PAGE_LIST_MAX 64
-#else iPSC386 || iPSC860
+#else /* iPSC386 || iPSC860 */
#define VM_MAP_COPY_PAGE_LIST_MAX 8
-#endif iPSC386 || iPSC860
+#endif /* iPSC386 || iPSC860 */
typedef struct vm_map_copy {
int type;
@@ -445,4 +445,4 @@ extern vm_object_t vm_submap_object;
#define vm_map_entry_wakeup(map) thread_wakeup((event_t)&(map)->hdr)
-#endif _VM_VM_MAP_H_
+#endif /* _VM_VM_MAP_H_ */
diff --git a/vm/vm_object.h b/vm/vm_object.h
index d3d050a..d710d99 100644
--- a/vm/vm_object.h
+++ b/vm/vm_object.h
@@ -1,25 +1,25 @@
-/*
+/*
* Mach Operating System
* Copyright (c) 1993-1987 Carnegie Mellon University
* All Rights Reserved.
- *
+ *
* Permission to use, copy, modify and distribute this software and its
* documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
- *
+ *
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
* ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
+ *
* Carnegie Mellon requests users of this software to return to
- *
+ *
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
- *
+ *
* any improvements or extensions that they make and grant Carnegie Mellon
* the rights to redistribute these changes.
*/
@@ -74,7 +74,7 @@ struct vm_object {
decl_simple_lock_data(, Lock) /* Synchronization */
#if VM_OBJECT_DEBUG
thread_t LockHolder; /* Thread holding Lock */
-#endif VM_OBJECT_DEBUG
+#endif /* VM_OBJECT_DEBUG */
vm_size_t size; /* Object size (only valid
* if internal)
*/
@@ -152,7 +152,7 @@ struct vm_object {
* progress restart search?
*/
/* boolean_t */ use_old_pageout : 1,
- /* Use old pageout primitives?
+ /* Use old pageout primitives?
*/
/* boolean_t */ use_shared_copy : 1,/* Use shared (i.e.,
* delayed) copy on write */
@@ -237,10 +237,10 @@ extern void vm_object_pager_create(
extern void vm_object_destroy(
struct ipc_port *pager);
-extern void vm_object_page_map(
- vm_object_t,
- vm_offset_t,
- vm_size_t,
+extern void vm_object_page_map(
+ vm_object_t,
+ vm_offset_t,
+ vm_size_t,
vm_offset_t (*)(void *, vm_offset_t),
void *);
diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c
index af605ac..f15e508 100644
--- a/vm/vm_pageout.c
+++ b/vm/vm_pageout.c
@@ -56,23 +56,23 @@
#ifndef VM_PAGEOUT_BURST_MAX
#define VM_PAGEOUT_BURST_MAX 10 /* number of pages */
-#endif VM_PAGEOUT_BURST_MAX
+#endif /* VM_PAGEOUT_BURST_MAX */
#ifndef VM_PAGEOUT_BURST_MIN
#define VM_PAGEOUT_BURST_MIN 5 /* number of pages */
-#endif VM_PAGEOUT_BURST_MIN
+#endif /* VM_PAGEOUT_BURST_MIN */
#ifndef VM_PAGEOUT_BURST_WAIT
#define VM_PAGEOUT_BURST_WAIT 10 /* milliseconds per page */
-#endif VM_PAGEOUT_BURST_WAIT
+#endif /* VM_PAGEOUT_BURST_WAIT */
#ifndef VM_PAGEOUT_EMPTY_WAIT
#define VM_PAGEOUT_EMPTY_WAIT 75 /* milliseconds */
-#endif VM_PAGEOUT_EMPTY_WAIT
+#endif /* VM_PAGEOUT_EMPTY_WAIT */
#ifndef VM_PAGEOUT_PAUSE_MAX
#define VM_PAGEOUT_PAUSE_MAX 10 /* number of pauses */
-#endif VM_PAGEOUT_PAUSE_MAX
+#endif /* VM_PAGEOUT_PAUSE_MAX */
/*
* To obtain a reasonable LRU approximation, the inactive queue
@@ -88,7 +88,7 @@
#ifndef VM_PAGE_INACTIVE_TARGET
#define VM_PAGE_INACTIVE_TARGET(avail) ((avail) * 2 / 3)
-#endif VM_PAGE_INACTIVE_TARGET
+#endif /* VM_PAGE_INACTIVE_TARGET */
/*
* Once the pageout daemon starts running, it keeps going
@@ -97,7 +97,7 @@
#ifndef VM_PAGE_FREE_TARGET
#define VM_PAGE_FREE_TARGET(free) (15 + (free) / 80)
-#endif VM_PAGE_FREE_TARGET
+#endif /* VM_PAGE_FREE_TARGET */
/*
* The pageout daemon always starts running once vm_page_free_count
@@ -106,7 +106,7 @@
#ifndef VM_PAGE_FREE_MIN
#define VM_PAGE_FREE_MIN(free) (10 + (free) / 100)
-#endif VM_PAGE_FREE_MIN
+#endif /* VM_PAGE_FREE_MIN */
/* When vm_page_external_count exceeds vm_page_external_limit,
* allocations of externally paged pages stops.
@@ -114,14 +114,14 @@
#ifndef VM_PAGE_EXTERNAL_LIMIT
#define VM_PAGE_EXTERNAL_LIMIT(free) ((free) / 2)
-#endif VM_PAGE_EXTERNAL_LIMIT
+#endif /* VM_PAGE_EXTERNAL_LIMIT */
/* Attempt to keep the number of externally paged pages less
* than vm_pages_external_target.
*/
#ifndef VM_PAGE_EXTERNAL_TARGET
#define VM_PAGE_EXTERNAL_TARGET(free) ((free) / 4)
-#endif VM_PAGE_EXTERNAL_TARGET
+#endif /* VM_PAGE_EXTERNAL_TARGET */
/*
* When vm_page_free_count falls below vm_page_free_reserved,
@@ -132,7 +132,7 @@
#ifndef VM_PAGE_FREE_RESERVED
#define VM_PAGE_FREE_RESERVED 50
-#endif VM_PAGE_FREE_RESERVED
+#endif /* VM_PAGE_FREE_RESERVED */
/*
* When vm_page_free_count falls below vm_pageout_reserved_internal,
@@ -144,7 +144,7 @@
#ifndef VM_PAGEOUT_RESERVED_INTERNAL
#define VM_PAGEOUT_RESERVED_INTERNAL(reserve) ((reserve) - 25)
-#endif VM_PAGEOUT_RESERVED_INTERNAL
+#endif /* VM_PAGEOUT_RESERVED_INTERNAL */
/*
* When vm_page_free_count falls below vm_pageout_reserved_really,
@@ -156,7 +156,7 @@
#ifndef VM_PAGEOUT_RESERVED_REALLY
#define VM_PAGEOUT_RESERVED_REALLY(reserve) ((reserve) - 40)
-#endif VM_PAGEOUT_RESERVED_REALLY
+#endif /* VM_PAGEOUT_RESERVED_REALLY */
extern void vm_pageout_continue();
extern void vm_pageout_scan_continue();
@@ -196,7 +196,7 @@ unsigned int vm_pageout_inactive_cleaned_external = 0;
*/
extern kern_return_t memory_object_data_initialize();
extern kern_return_t memory_object_data_write();
-#endif NORMA_VM
+#endif /* NORMA_VM */
/*
* Routine: vm_pageout_setup
@@ -289,7 +289,7 @@ vm_pageout_setup(m, paging_offset, new_object, new_offset, flush)
vm_external_state_set(old_object->existence_info,
paging_offset,
VM_EXTERNAL_STATE_EXISTS);
-#endif MACH_PAGEMAP
+#endif /* MACH_PAGEMAP */
vm_object_unlock(old_object);
@@ -336,7 +336,7 @@ vm_pageout_setup(m, paging_offset, new_object, new_offset, flush)
vm_external_state_set(old_object->existence_info,
paging_offset,
VM_EXTERNAL_STATE_EXISTS);
-#endif MACH_PAGEMAP
+#endif /* MACH_PAGEMAP */
vm_object_unlock(old_object);
diff --git a/vm/vm_pageout.h b/vm/vm_pageout.h
index 5b47a5e..946c68b 100644
--- a/vm/vm_pageout.h
+++ b/vm/vm_pageout.h
@@ -1,25 +1,25 @@
-/*
+/*
* Mach Operating System
* Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
* All Rights Reserved.
- *
+ *
* Permission to use, copy, modify and distribute this software and its
* documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
- *
+ *
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
* ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
+ *
* Carnegie Mellon requests users of this software to return to
- *
+ *
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
- *
+ *
* any improvements or extensions that they make and grant Carnegie Mellon
* the rights to redistribute these changes.
*/
@@ -43,4 +43,4 @@
extern vm_page_t vm_pageout_setup();
extern void vm_pageout_page();
-#endif _VM_VM_PAGEOUT_H_
+#endif /* _VM_VM_PAGEOUT_H_ */
diff --git a/vm/vm_user.h b/vm/vm_user.h
index f874010..85fc6a5 100644
--- a/vm/vm_user.h
+++ b/vm/vm_user.h
@@ -1,25 +1,25 @@
-/*
+/*
* Mach Operating System
* Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
* All Rights Reserved.
- *
+ *
* Permission to use, copy, modify and distribute this software and its
* documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
- *
+ *
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
* ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
+ *
* Carnegie Mellon requests users of this software to return to
- *
+ *
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
- *
+ *
* any improvements or extensions that they make and grant Carnegie Mellon
* the rights to redistribute these changes.
*/
@@ -47,4 +47,4 @@ extern kern_return_t vm_write();
extern kern_return_t vm_copy();
extern kern_return_t vm_map();
-#endif _VM_VM_USER_H_
+#endif /* _VM_VM_USER_H_ */