summaryrefslogtreecommitdiff
path: root/vm
diff options
context:
space:
mode:
authorThomas Schwinge <tschwinge@gnu.org>2006-03-20 11:31:36 +0000
committerThomas Schwinge <tschwinge@gnu.org>2009-06-18 00:26:36 +0200
commitec9defc2912e86a7e682ec6e37aac102fa69d94d (patch)
treed8ef07ee8146566139505f5236b3fb0eb273ab2f /vm
parentb80e893edaa066af2a0c5c725c0c394e7c7c20e0 (diff)
2006-03-20 Thomas Schwinge <tschwinge@gnu.org>
* DEVELOPMENT: Document the NORMA removal. 2006-03-20 Leonardo Lopes Pereira <leonardolopespereira@gmail.com> Remove unused and unsupported code. Consult the file `DEVELOPMENT' for details. [patch #4982] * bogus/norma_device.h: Remove file. * bogus/norma_ether.h: Likewise. * bogus/norma_ipc.h: Likewise. * bogus/norma_task.h: Likewise. * bogus/norma_vm.h: Likewise. * include/mach/mach_norma.defs: Likewise. * include/mach/norma_task.defs: Likewise. * include/mach/norma_special_ports.h: Likewise. * Makefile.in (bogus-files): Remove `norma_device.h', `norma_ether.h', `norma_ipc.h', `norma_task.h' and `norma_vm.h'. (mach-headers): Remove `mach_norma.defs', `norma_task.defs' and `norma_special_ports.h'. * device/ds_routines.c: Don't include <norma_device.h> anymore and adopt all users of NORMA_DEVICE as if it were always defined to `0'. * device/net_io.c: Likewise for <norma_ether.h>, NORMA_ETHER. * kern/machine.c: Likewise. * ddb/db_command.c: Likevise for <norma_ipc.h>, NORMA_IPC. * ipc/ipc_init.c: Likewise. * ipc/ipc_kmsg.c: Likewise. * ipc/ipc_kmsg.h: Likewise. * ipc/ipc_mqueue.c: Likewise. * ipc/ipc_notify.c: Likewise. * ipc/ipc_port.c: Likewise. * ipc/ipc_port.h: Likewise. * ipc/ipc_space.c: Likewise. * ipc/ipc_space.h: Likewise. * ipc/mach_msg.c: Likewise. * kern/ast.c: Likewise. * kern/debug.c: Likewise. * kern/exception.c: Likewise. * kern/startup.c: Likewise. * vm/memory_object.c: Likewise. * vm/vm_map.c: Likewise. * kern/ipc_kobject.c: Likewise for <norma_task.h>, NORMA_TASK. * kern/task.c: Likewise. * kern/task.h: Likewise. * ddb/db_command.c: Likewise for <norma_vm.h>, NORMA_VM. * device/dev_pager.c: Likewise. * include/mach/mach_types.defs: Likewise. * include/mach/mach_types.h: Likewise. * include/mach/memory_object_default.defs: Likewise. * include/mach/memory_object.defs: Likewise. * ipc/ipc_kmsg.c: Likewise. * kern/ipc_kobject.c: Likewise. * kern/ipc_mig.c: Likewise. * kern/startup.c: Likewise. * vm/memory_object.c: Likewise. * vm/vm_object.c: Likewise. * vm/vm_object.h: Likewise. * vm/vm_pageout.c: Likewise.
Diffstat (limited to 'vm')
-rw-r--r--vm/memory_object.c12
-rw-r--r--vm/vm_map.c422
-rw-r--r--vm/vm_object.c64
-rw-r--r--vm/vm_object.h5
-rw-r--r--vm/vm_pageout.c9
5 files changed, 0 insertions, 512 deletions
diff --git a/vm/memory_object.c b/vm/memory_object.c
index c01b740..16e858a 100644
--- a/vm/memory_object.c
+++ b/vm/memory_object.c
@@ -63,11 +63,6 @@
#include <vm/vm_map.h> /* For vm_map_pageable */
#include <ipc/ipc_port.h>
-#include <norma_vm.h>
-#include <norma_ipc.h>
-#if NORMA_VM
-#include <norma/xmm_server_rename.h>
-#endif /* NORMA_VM */
#include <mach_pagemap.h>
#if MACH_PAGEMAP
#include <vm/vm_external.h>
@@ -86,7 +81,6 @@ decl_simple_lock_data(,memory_manager_default_lock)
* argument conversion. Explicit deallocation is necessary.
*/
-#if !NORMA_VM
/*
* If successful, destroys the map copy object.
*/
@@ -102,7 +96,6 @@ kern_return_t memory_object_data_provided(object, offset, data, data_cnt,
data_cnt, lock_value, FALSE, IP_NULL,
0);
}
-#endif /* !NORMA_VM */
kern_return_t memory_object_data_supply(object, offset, data_copy, data_cnt,
@@ -896,7 +889,6 @@ MACRO_END
return (KERN_SUCCESS);
}
-#if !NORMA_VM
/*
* Old version of memory_object_lock_request.
*/
@@ -924,7 +916,6 @@ xxx_memory_object_lock_request(object, offset, size,
should_return, should_flush, prot,
reply_to, reply_to_type));
}
-#endif /* !NORMA_VM */
kern_return_t
memory_object_set_attributes_common(object, object_ready, may_cache,
@@ -989,8 +980,6 @@ memory_object_set_attributes_common(object, object_ready, may_cache,
return(KERN_SUCCESS);
}
-#if !NORMA_VM
-
/*
* XXX rpd claims that reply_to could be obviated in favor of a client
* XXX stub that made change_attributes an RPC. Need investigation.
@@ -1052,7 +1041,6 @@ kern_return_t memory_object_ready(object, may_cache, copy_strategy)
may_cache, copy_strategy,
FALSE);
}
-#endif /* !NORMA_VM */
kern_return_t memory_object_get_attributes(object, object_ready,
may_cache, copy_strategy)
diff --git a/vm/vm_map.c b/vm/vm_map.c
index c060196..fcd6265 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -34,8 +34,6 @@
* Virtual memory mapping module.
*/
-#include <norma_ipc.h>
-
#include <mach/kern_return.h>
#include <mach/port.h>
#include <mach/vm_attributes.h>
@@ -1657,67 +1655,6 @@ kern_return_t vm_map_delete(map, start, end)
entry = first_entry->vme_next;
else {
entry = first_entry;
-#if NORMA_IPC_xxx
- /*
- * XXX Had to disable this code because:
-
- _vm_map_delete(c0804b78,c2198000,c219a000,0,c219a000)+df
- [vm/vm_map.c:2007]
- _vm_map_remove(c0804b78,c2198000,c219a000,c0817834,
- c081786c)+42 [vm/vm_map.c:2094]
- _kmem_io_map_deallocate(c0804b78,c2198000,2000,c0817834,
- c081786c)+43 [vm/vm_kern.c:818]
- _device_write_dealloc(c081786c)+117 [device/ds_routines.c:814]
- _ds_write_done(c081786c,0)+2e [device/ds_routines.c:848]
- _io_done_thread_continue(c08150c0,c21d4e14,c21d4e30,c08150c0,
- c080c114)+14 [device/ds_routines.c:1350]
-
- */
- if (start > entry->vme_start
- && end == entry->vme_end
- && ! entry->wired_count /* XXX ??? */
- && ! entry->is_shared
- && ! entry->projected_on
- && ! entry->is_sub_map) {
- extern vm_object_t kernel_object;
- register vm_object_t object = entry->object.vm_object;
-
- /*
- * The region to be deleted lives at the end
- * of this entry, and thus all we have to do is
- * truncate the entry.
- *
- * This special case is necessary if we want
- * coalescing to do us any good.
- *
- * XXX Do we have to adjust object size?
- */
- if (object == kernel_object) {
- vm_object_lock(object);
- vm_object_page_remove(object,
- entry->offset + start,
- entry->offset +
- (end - start));
- vm_object_unlock(object);
- } else if (entry->is_shared) {
- vm_object_pmap_remove(object,
- entry->offset + start,
- entry->offset +
- (end - start));
- } else {
- pmap_remove(map->pmap, start, end);
- }
- object->size -= (end - start); /* XXX */
-
- entry->vme_end = start;
- map->size -= (end - start);
-
- if (map->wait_for_space) {
- thread_wakeup((event_t) map);
- }
- return KERN_SUCCESS;
- }
-#endif /* NORMA_IPC */
vm_map_clip_start(map, entry, start);
/*
@@ -2109,11 +2046,7 @@ kern_return_t vm_map_copy_overwrite(dst_map, dst_addr, copy, interruptible)
* support page lists LATER.
*/
-#if NORMA_IPC
- vm_map_convert_from_page_list(copy);
-#else
assert(copy->type == VM_MAP_COPY_ENTRY_LIST);
-#endif
/*
* Currently this routine only handles page-aligned
@@ -4887,358 +4820,3 @@ void vm_map_copy_print(copy)
indent -=2;
}
#endif /* MACH_KDB */
-
-#if NORMA_IPC
-/*
- * This should one day be eliminated;
- * we should always construct the right flavor of copy object
- * the first time. Troublesome areas include vm_read, where vm_map_copyin
- * is called without knowing whom the copy object is for.
- * There are also situations where we do want a lazy data structure
- * even if we are sending to a remote port...
- */
-
-/*
- * Convert a copy to a page list. The copy argument is in/out
- * because we probably have to allocate a new vm_map_copy structure.
- * We take responsibility for discarding the old structure and
- * use a continuation to do so. Postponing this discard ensures
- * that the objects containing the pages we've marked busy will stick
- * around.
- */
-kern_return_t
-vm_map_convert_to_page_list(caller_copy)
- vm_map_copy_t *caller_copy;
-{
- vm_map_entry_t entry, next_entry;
- vm_offset_t va;
- vm_offset_t offset;
- vm_object_t object;
- kern_return_t result;
- vm_map_copy_t copy, new_copy;
- int i, num_pages = 0;
-
- zone_t entry_zone;
-
- copy = *caller_copy;
-
- /*
- * We may not have to do anything,
- * or may not be able to do anything.
- */
- if (copy == VM_MAP_COPY_NULL || copy->type == VM_MAP_COPY_PAGE_LIST) {
- return KERN_SUCCESS;
- }
- if (copy->type == VM_MAP_COPY_OBJECT) {
- return vm_map_convert_to_page_list_from_object(caller_copy);
- }
- if (copy->type != VM_MAP_COPY_ENTRY_LIST) {
- panic("vm_map_convert_to_page_list: copy type %d!\n",
- copy->type);
- }
-
- /*
- * Allocate the new copy. Set its continuation to
- * discard the old one.
- */
- new_copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
- new_copy->type = VM_MAP_COPY_PAGE_LIST;
- new_copy->cpy_npages = 0;
- new_copy->offset = copy->offset;
- new_copy->size = copy->size;
- new_copy->cpy_cont = vm_map_copy_discard_cont;
- new_copy->cpy_cont_args = (char *) copy;
-
- /*
- * Iterate over entries.
- */
- for (entry = vm_map_copy_first_entry(copy);
- entry != vm_map_copy_to_entry(copy);
- entry = entry->vme_next) {
-
- object = entry->object.vm_object;
- offset = entry->offset;
- /*
- * Iterate over pages.
- */
- for (va = entry->vme_start;
- va < entry->vme_end;
- va += PAGE_SIZE, offset += PAGE_SIZE) {
-
- vm_page_t m;
-
- if (new_copy->cpy_npages == VM_MAP_COPY_PAGE_LIST_MAX) {
- /*
- * What a mess. We need a continuation
- * to do the page list, but also one
- * to discard the old copy. The right
- * thing to do is probably to copy
- * out the old copy into the kernel
- * map (or some temporary task holding
- * map if we're paranoid about large
- * copies), and then copyin the page
- * list that we really wanted with
- * src_destroy. LATER.
- */
- panic("vm_map_convert_to_page_list: num\n");
- }
-
- /*
- * Try to find the page of data.
- */
- vm_object_lock(object);
- vm_object_paging_begin(object);
- if (((m = vm_page_lookup(object, offset)) !=
- VM_PAGE_NULL) && !m->busy && !m->fictitious &&
- !m->absent && !m->error) {
-
- /*
- * This is the page. Mark it busy
- * and keep the paging reference on
- * the object whilst we do our thing.
- */
- m->busy = TRUE;
-
- /*
- * Also write-protect the page, so
- * that the map`s owner cannot change
- * the data. The busy bit will prevent
- * faults on the page from succeeding
- * until the copy is released; after
- * that, the page can be re-entered
- * as writable, since we didn`t alter
- * the map entry. This scheme is a
- * cheap copy-on-write.
- *
- * Don`t forget the protection and
- * the page_lock value!
- */
-
- pmap_page_protect(m->phys_addr,
- entry->protection
- & ~m->page_lock
- & ~VM_PROT_WRITE);
-
- }
- else {
- vm_prot_t result_prot;
- vm_page_t top_page;
- kern_return_t kr;
-
-retry:
- result_prot = VM_PROT_READ;
-
- kr = vm_fault_page(object, offset,
- VM_PROT_READ, FALSE, FALSE,
- &result_prot, &m, &top_page,
- FALSE, (void (*)()) 0);
- if (kr == VM_FAULT_MEMORY_SHORTAGE) {
- VM_PAGE_WAIT((void (*)()) 0);
- vm_object_lock(object);
- vm_object_paging_begin(object);
- goto retry;
- }
- if (kr != VM_FAULT_SUCCESS) {
- /* XXX what about data_error? */
- vm_object_lock(object);
- vm_object_paging_begin(object);
- goto retry;
- }
- if (top_page != VM_PAGE_NULL) {
- vm_object_lock(object);
- VM_PAGE_FREE(top_page);
- vm_object_paging_end(object);
- vm_object_unlock(object);
- }
- }
- assert(m);
- m->busy = TRUE;
- new_copy->cpy_page_list[new_copy->cpy_npages++] = m;
- vm_object_unlock(object);
- }
- }
-
- *caller_copy = new_copy;
- return KERN_SUCCESS;
-}
-
-kern_return_t
-vm_map_convert_to_page_list_from_object(caller_copy)
- vm_map_copy_t *caller_copy;
-{
- vm_object_t object;
- vm_offset_t offset;
- vm_map_copy_t copy, new_copy;
-
- copy = *caller_copy;
- assert(copy->type == VM_MAP_COPY_OBJECT);
- object = copy->cpy_object;
- assert(object->size == round_page(object->size));
-
- /*
- * Allocate the new copy. Set its continuation to
- * discard the old one.
- */
- new_copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
- new_copy->type = VM_MAP_COPY_PAGE_LIST;
- new_copy->cpy_npages = 0;
- new_copy->offset = copy->offset;
- new_copy->size = copy->size;
- new_copy->cpy_cont = vm_map_copy_discard_cont;
- new_copy->cpy_cont_args = (char *) copy;
-
- /*
- * XXX memory_object_lock_request can probably bust this
- * XXX See continuation comment in previous routine for solution.
- */
- assert(object->size <= VM_MAP_COPY_PAGE_LIST_MAX * PAGE_SIZE);
-
- for (offset = 0; offset < object->size; offset += PAGE_SIZE) {
- vm_page_t m;
-
- /*
- * Try to find the page of data.
- */
- vm_object_lock(object);
- vm_object_paging_begin(object);
- m = vm_page_lookup(object, offset);
- if ((m != VM_PAGE_NULL) && !m->busy && !m->fictitious &&
- !m->absent && !m->error) {
-
- /*
- * This is the page. Mark it busy
- * and keep the paging reference on
- * the object whilst we do our thing.
- */
- m->busy = TRUE;
- }
- else {
- vm_prot_t result_prot;
- vm_page_t top_page;
- kern_return_t kr;
-
-retry:
- result_prot = VM_PROT_READ;
-
- kr = vm_fault_page(object, offset,
- VM_PROT_READ, FALSE, FALSE,
- &result_prot, &m, &top_page,
- FALSE, (void (*)()) 0);
- if (kr == VM_FAULT_MEMORY_SHORTAGE) {
- VM_PAGE_WAIT((void (*)()) 0);
- vm_object_lock(object);
- vm_object_paging_begin(object);
- goto retry;
- }
- if (kr != VM_FAULT_SUCCESS) {
- /* XXX what about data_error? */
- vm_object_lock(object);
- vm_object_paging_begin(object);
- goto retry;
- }
-
- if (top_page != VM_PAGE_NULL) {
- vm_object_lock(object);
- VM_PAGE_FREE(top_page);
- vm_object_paging_end(object);
- vm_object_unlock(object);
- }
- }
- assert(m);
- m->busy = TRUE;
- new_copy->cpy_page_list[new_copy->cpy_npages++] = m;
- vm_object_unlock(object);
- }
-
- *caller_copy = new_copy;
- return (KERN_SUCCESS);
-}
-
-kern_return_t
-vm_map_convert_from_page_list(copy)
- vm_map_copy_t copy;
-{
- vm_object_t object;
- int i;
- vm_map_entry_t new_entry;
- vm_page_t *page_list;
-
- /*
- * Check type of copy object.
- */
- if (copy->type == VM_MAP_COPY_ENTRY_LIST) {
- return KERN_SUCCESS;
- }
- if (copy->type == VM_MAP_COPY_OBJECT) {
- printf("vm_map_convert_from_page_list: COPY_OBJECT?");
- return KERN_SUCCESS;
- }
- if (copy->type != VM_MAP_COPY_PAGE_LIST) {
- panic("vm_map_convert_from_page_list 0x%x %d",
- copy,
- copy->type);
- }
-
- /*
- * Make sure the pages are loose. This may be
- * a "Can't Happen", but just to be safe ...
- */
- page_list = &copy->cpy_page_list[0];
- if ((*page_list)->tabled)
- vm_map_copy_steal_pages(copy);
-
- /*
- * Create object, and stuff pages into it.
- */
- object = vm_object_allocate(copy->cpy_npages);
- for (i = 0; i < copy->cpy_npages; i++) {
- register vm_page_t m = *page_list++;
- vm_page_insert(m, object, i * PAGE_SIZE);
- m->busy = FALSE;
- m->dirty = TRUE;
- vm_page_activate(m);
- }
-
- /*
- * XXX If this page list contained a continuation, then
- * XXX we're screwed. The right thing to do is probably do
- * XXX the copyout, and then copyin the entry list we really
- * XXX wanted.
- */
- if (vm_map_copy_has_cont(copy))
- panic("convert_from_page_list: continuation");
-
- /*
- * Change type of copy object
- */
- vm_map_copy_first_entry(copy) =
- vm_map_copy_last_entry(copy) = vm_map_copy_to_entry(copy);
- copy->type = VM_MAP_COPY_ENTRY_LIST;
- copy->cpy_hdr.nentries = 0;
- copy->cpy_hdr.entries_pageable = TRUE;
-
- /*
- * Allocate and initialize an entry for object
- */
- new_entry = vm_map_copy_entry_create(copy);
- new_entry->vme_start = trunc_page(copy->offset);
- new_entry->vme_end = round_page(copy->offset + copy->size);
- new_entry->object.vm_object = object;
- new_entry->offset = 0;
- new_entry->is_shared = FALSE;
- new_entry->is_sub_map = FALSE;
- new_entry->needs_copy = FALSE;
- new_entry->protection = VM_PROT_DEFAULT;
- new_entry->max_protection = VM_PROT_ALL;
- new_entry->inheritance = VM_INHERIT_DEFAULT;
- new_entry->wired_count = 0;
- new_entry->user_wired_count = 0;
- new_entry->projected_on = 0;
-
- /*
- * Insert entry into copy object, and return.
- */
- vm_map_copy_entry_link(copy, vm_map_copy_last_entry(copy), new_entry);
- return(KERN_SUCCESS);
-}
-#endif /* NORMA_IPC */
diff --git a/vm/vm_object.c b/vm/vm_object.c
index 8b57205..0b57827 100644
--- a/vm/vm_object.c
+++ b/vm/vm_object.c
@@ -33,13 +33,8 @@
* Virtual memory object module.
*/
-#include <norma_vm.h>
#include <mach_pagemap.h>
-#if NORMA_VM
-#include <norma/xmm_server_rename.h>
-#endif /* NORMA_VM */
-
#include <mach/memory_object.h>
#include "memory_object_default.h"
#include "memory_object_user.h"
@@ -231,13 +226,11 @@ vm_object_t vm_object_allocate(
register ipc_port_t port;
object = _vm_object_allocate(size);
-#if !NORMA_VM
port = ipc_port_alloc_kernel();
if (port == IP_NULL)
panic("vm_object_allocate");
object->pager_name = port;
ipc_kobject_set(port, (ipc_kobject_t) object, IKOT_PAGING_NAME);
-#endif /* !NORMA_VM */
return object;
}
@@ -325,7 +318,6 @@ void vm_object_bootstrap(void)
void vm_object_init(void)
{
-#if !NORMA_VM
/*
* Finish initializing the kernel object.
* The submap object doesn't need a name port.
@@ -335,7 +327,6 @@ void vm_object_init(void)
ipc_kobject_set(kernel_object->pager_name,
(ipc_kobject_t) kernel_object,
IKOT_PAGING_NAME);
-#endif /* !NORMA_VM */
}
/*
@@ -656,11 +647,7 @@ void vm_object_terminate(
object->pager_name);
} else if (object->pager_name != IP_NULL) {
/* consumes our right for pager_name */
-#if NORMA_VM
- ipc_port_release_send(object->pager_name);
-#else /* NORMA_VM */
ipc_port_dealloc_kernel(object->pager_name);
-#endif /* NORMA_VM */
}
#if MACH_PAGEMAP
@@ -865,11 +852,7 @@ kern_return_t memory_object_destroy(
old_name);
} else if (old_name != IP_NULL) {
/* consumes our right for name */
-#if NORMA_VM
- ipc_port_release_send(object->pager_name);
-#else /* NORMA_VM */
ipc_port_dealloc_kernel(object->pager_name);
-#endif /* NORMA_VM */
}
/*
@@ -1861,11 +1844,7 @@ vm_object_t vm_object_lookup(
if (IP_VALID(port)) {
ip_lock(port);
if (ip_active(port) &&
-#if NORMA_VM
- (ip_kotype(port) == IKOT_PAGER)) {
-#else /* NORMA_VM */
(ip_kotype(port) == IKOT_PAGING_REQUEST)) {
-#endif /* NORMA_VM */
vm_object_cache_lock();
object = (vm_object_t) port->ip_kobject;
vm_object_lock(object);
@@ -1976,16 +1955,10 @@ void vm_object_destroy(
*/
ipc_port_release_send(pager);
-#if !NORMA_VM
if (old_request != IP_NULL)
ipc_port_dealloc_kernel(old_request);
-#endif /* !NORMA_VM */
if (old_name != IP_NULL)
-#if NORMA_VM
- ipc_port_release_send(old_name);
-#else /* NORMA_VM */
ipc_port_dealloc_kernel(old_name);
-#endif /* NORMA_VM */
/*
* Restart pending page requests
@@ -2138,27 +2111,6 @@ restart:
object->pager_created = TRUE;
object->pager = pager;
-#if NORMA_VM
-
- /*
- * Let the xmm system know that we want to use the pager.
- *
- * Name port will be provided by the xmm system
- * when set_attributes_common is called.
- */
-
- object->internal = internal;
- object->pager_ready = internal;
- if (internal) {
- assert(object->temporary);
- } else {
- object->temporary = FALSE;
- }
- object->pager_name = IP_NULL;
-
- (void) xmm_memory_object_init(object);
-#else /* NORMA_VM */
-
/*
* Allocate request port.
*/
@@ -2207,7 +2159,6 @@ restart:
PAGE_SIZE);
}
-#endif /* NORMA_VM */
vm_object_lock(object);
object->pager_initialized = TRUE;
@@ -2359,7 +2310,6 @@ void vm_object_remove(
else if (ip_kotype(port) != IKOT_NONE)
panic("vm_object_remove: bad object port");
}
-#if !NORMA_VM
if ((port = object->pager_request) != IP_NULL) {
if (ip_kotype(port) == IKOT_PAGING_REQUEST)
ipc_kobject_set(port, IKO_NULL, IKOT_NONE);
@@ -2372,7 +2322,6 @@ void vm_object_remove(
else if (ip_kotype(port) != IKOT_NONE)
panic("vm_object_remove: bad name port");
}
-#endif /* !NORMA_VM */
}
/*
@@ -2611,10 +2560,6 @@ void vm_object_collapse(
object->pager_created = backing_object->pager_created;
object->pager_request = backing_object->pager_request;
-#if NORMA_VM
- old_name_port = object->pager_name;
- object->pager_name = backing_object->pager_name;
-#else /* NORMA_VM */
if (object->pager_request != IP_NULL)
ipc_kobject_set(object->pager_request,
(ipc_kobject_t) object,
@@ -2628,7 +2573,6 @@ void vm_object_collapse(
ipc_kobject_set(object->pager_name,
(ipc_kobject_t) object,
IKOT_PAGING_NAME);
-#endif /* NORMA_VM */
vm_object_cache_unlock();
@@ -2677,11 +2621,7 @@ void vm_object_collapse(
vm_object_unlock(object);
if (old_name_port != IP_NULL)
-#if NORMA_VM
- ipc_port_release_send(old_name_port);
-#else /* NORMA_VM */
ipc_port_dealloc_kernel(old_name_port);
-#endif /* NORMA_VM */
zfree(vm_object_zone, (vm_offset_t) backing_object);
vm_object_lock(object);
@@ -2959,11 +2899,7 @@ ipc_port_t vm_object_name(
p = object->pager_name;
if (p != IP_NULL)
-#if NORMA_VM
- p = ipc_port_copy_send(p);
-#else /* NORMA_VM */
p = ipc_port_make_send(p);
-#endif /* NORMA_VM */
vm_object_unlock(object);
return p;
diff --git a/vm/vm_object.h b/vm/vm_object.h
index d710d99..940168d 100644
--- a/vm/vm_object.h
+++ b/vm/vm_object.h
@@ -35,7 +35,6 @@
#define _VM_VM_OBJECT_H_
#include <mach_pagemap.h>
-#include <norma_vm.h>
#include <mach/kern_return.h>
#include <mach/boolean.h>
@@ -53,11 +52,7 @@
#include <vm/vm_external.h>
#endif /* MACH_PAGEMAP */
-#if NORMA_VM
-typedef struct xmm_obj * pager_request_t;
-#else /* NORMA_VM */
typedef struct ipc_port * pager_request_t;
-#endif /* NORMA_VM */
#define PAGER_REQUEST_NULL ((pager_request_t) 0)
/*
diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c
index f15e508..a8f1b3c 100644
--- a/vm/vm_pageout.c
+++ b/vm/vm_pageout.c
@@ -35,7 +35,6 @@
*/
#include <mach_pagemap.h>
-#include <norma_vm.h>
#include <mach/mach_types.h>
#include <mach/memory_object.h>
@@ -190,14 +189,6 @@ unsigned int vm_pageout_inactive_dirty = 0; /* debugging */
unsigned int vm_pageout_inactive_double = 0; /* debugging */
unsigned int vm_pageout_inactive_cleaned_external = 0;
-#if NORMA_VM
-/*
- * Define them here, since they won't be defined by memory_object_user.h.
- */
-extern kern_return_t memory_object_data_initialize();
-extern kern_return_t memory_object_data_write();
-#endif /* NORMA_VM */
-
/*
* Routine: vm_pageout_setup
* Purpose: