summaryrefslogtreecommitdiff
path: root/vm/vm_map.c
diff options
context:
space:
mode:
authorThomas Schwinge <tschwinge@gnu.org>2006-03-20 11:31:36 +0000
committerThomas Schwinge <tschwinge@gnu.org>2009-06-18 00:26:36 +0200
commitec9defc2912e86a7e682ec6e37aac102fa69d94d (patch)
treed8ef07ee8146566139505f5236b3fb0eb273ab2f /vm/vm_map.c
parentb80e893edaa066af2a0c5c725c0c394e7c7c20e0 (diff)
2006-03-20 Thomas Schwinge <tschwinge@gnu.org>
* DEVELOPMENT: Document the NORMA removal. 2006-03-20 Leonardo Lopes Pereira <leonardolopespereira@gmail.com> Remove unused and unsupported code. Consult the file `DEVELOPMENT' for details. [patch #4982] * bogus/norma_device.h: Remove file. * bogus/norma_ether.h: Likewise. * bogus/norma_ipc.h: Likewise. * bogus/norma_task.h: Likewise. * bogus/norma_vm.h: Likewise. * include/mach/mach_norma.defs: Likewise. * include/mach/norma_task.defs: Likewise. * include/mach/norma_special_ports.h: Likewise. * Makefile.in (bogus-files): Remove `norma_device.h', `norma_ether.h', `norma_ipc.h', `norma_task.h' and `norma_vm.h'. (mach-headers): Remove `mach_norma.defs', `norma_task.defs' and `norma_special_ports.h'. * device/ds_routines.c: Don't include <norma_device.h> anymore and adopt all users of NORMA_DEVICE as if it were always defined to `0'. * device/net_io.c: Likewise for <norma_ether.h>, NORMA_ETHER. * kern/machine.c: Likewise. * ddb/db_command.c: Likevise for <norma_ipc.h>, NORMA_IPC. * ipc/ipc_init.c: Likewise. * ipc/ipc_kmsg.c: Likewise. * ipc/ipc_kmsg.h: Likewise. * ipc/ipc_mqueue.c: Likewise. * ipc/ipc_notify.c: Likewise. * ipc/ipc_port.c: Likewise. * ipc/ipc_port.h: Likewise. * ipc/ipc_space.c: Likewise. * ipc/ipc_space.h: Likewise. * ipc/mach_msg.c: Likewise. * kern/ast.c: Likewise. * kern/debug.c: Likewise. * kern/exception.c: Likewise. * kern/startup.c: Likewise. * vm/memory_object.c: Likewise. * vm/vm_map.c: Likewise. * kern/ipc_kobject.c: Likewise for <norma_task.h>, NORMA_TASK. * kern/task.c: Likewise. * kern/task.h: Likewise. * ddb/db_command.c: Likewise for <norma_vm.h>, NORMA_VM. * device/dev_pager.c: Likewise. * include/mach/mach_types.defs: Likewise. * include/mach/mach_types.h: Likewise. * include/mach/memory_object_default.defs: Likewise. * include/mach/memory_object.defs: Likewise. * ipc/ipc_kmsg.c: Likewise. * kern/ipc_kobject.c: Likewise. * kern/ipc_mig.c: Likewise. * kern/startup.c: Likewise. * vm/memory_object.c: Likewise. * vm/vm_object.c: Likewise. * vm/vm_object.h: Likewise. * vm/vm_pageout.c: Likewise.
Diffstat (limited to 'vm/vm_map.c')
-rw-r--r--vm/vm_map.c422
1 files changed, 0 insertions, 422 deletions
diff --git a/vm/vm_map.c b/vm/vm_map.c
index c060196..fcd6265 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -34,8 +34,6 @@
* Virtual memory mapping module.
*/
-#include <norma_ipc.h>
-
#include <mach/kern_return.h>
#include <mach/port.h>
#include <mach/vm_attributes.h>
@@ -1657,67 +1655,6 @@ kern_return_t vm_map_delete(map, start, end)
entry = first_entry->vme_next;
else {
entry = first_entry;
-#if NORMA_IPC_xxx
- /*
- * XXX Had to disable this code because:
-
- _vm_map_delete(c0804b78,c2198000,c219a000,0,c219a000)+df
- [vm/vm_map.c:2007]
- _vm_map_remove(c0804b78,c2198000,c219a000,c0817834,
- c081786c)+42 [vm/vm_map.c:2094]
- _kmem_io_map_deallocate(c0804b78,c2198000,2000,c0817834,
- c081786c)+43 [vm/vm_kern.c:818]
- _device_write_dealloc(c081786c)+117 [device/ds_routines.c:814]
- _ds_write_done(c081786c,0)+2e [device/ds_routines.c:848]
- _io_done_thread_continue(c08150c0,c21d4e14,c21d4e30,c08150c0,
- c080c114)+14 [device/ds_routines.c:1350]
-
- */
- if (start > entry->vme_start
- && end == entry->vme_end
- && ! entry->wired_count /* XXX ??? */
- && ! entry->is_shared
- && ! entry->projected_on
- && ! entry->is_sub_map) {
- extern vm_object_t kernel_object;
- register vm_object_t object = entry->object.vm_object;
-
- /*
- * The region to be deleted lives at the end
- * of this entry, and thus all we have to do is
- * truncate the entry.
- *
- * This special case is necessary if we want
- * coalescing to do us any good.
- *
- * XXX Do we have to adjust object size?
- */
- if (object == kernel_object) {
- vm_object_lock(object);
- vm_object_page_remove(object,
- entry->offset + start,
- entry->offset +
- (end - start));
- vm_object_unlock(object);
- } else if (entry->is_shared) {
- vm_object_pmap_remove(object,
- entry->offset + start,
- entry->offset +
- (end - start));
- } else {
- pmap_remove(map->pmap, start, end);
- }
- object->size -= (end - start); /* XXX */
-
- entry->vme_end = start;
- map->size -= (end - start);
-
- if (map->wait_for_space) {
- thread_wakeup((event_t) map);
- }
- return KERN_SUCCESS;
- }
-#endif /* NORMA_IPC */
vm_map_clip_start(map, entry, start);
/*
@@ -2109,11 +2046,7 @@ kern_return_t vm_map_copy_overwrite(dst_map, dst_addr, copy, interruptible)
* support page lists LATER.
*/
-#if NORMA_IPC
- vm_map_convert_from_page_list(copy);
-#else
assert(copy->type == VM_MAP_COPY_ENTRY_LIST);
-#endif
/*
* Currently this routine only handles page-aligned
@@ -4887,358 +4820,3 @@ void vm_map_copy_print(copy)
indent -=2;
}
#endif /* MACH_KDB */
-
-#if NORMA_IPC
-/*
- * This should one day be eliminated;
- * we should always construct the right flavor of copy object
- * the first time. Troublesome areas include vm_read, where vm_map_copyin
- * is called without knowing whom the copy object is for.
- * There are also situations where we do want a lazy data structure
- * even if we are sending to a remote port...
- */
-
-/*
- * Convert a copy to a page list. The copy argument is in/out
- * because we probably have to allocate a new vm_map_copy structure.
- * We take responsibility for discarding the old structure and
- * use a continuation to do so. Postponing this discard ensures
- * that the objects containing the pages we've marked busy will stick
- * around.
- */
-kern_return_t
-vm_map_convert_to_page_list(caller_copy)
- vm_map_copy_t *caller_copy;
-{
- vm_map_entry_t entry, next_entry;
- vm_offset_t va;
- vm_offset_t offset;
- vm_object_t object;
- kern_return_t result;
- vm_map_copy_t copy, new_copy;
- int i, num_pages = 0;
-
- zone_t entry_zone;
-
- copy = *caller_copy;
-
- /*
- * We may not have to do anything,
- * or may not be able to do anything.
- */
- if (copy == VM_MAP_COPY_NULL || copy->type == VM_MAP_COPY_PAGE_LIST) {
- return KERN_SUCCESS;
- }
- if (copy->type == VM_MAP_COPY_OBJECT) {
- return vm_map_convert_to_page_list_from_object(caller_copy);
- }
- if (copy->type != VM_MAP_COPY_ENTRY_LIST) {
- panic("vm_map_convert_to_page_list: copy type %d!\n",
- copy->type);
- }
-
- /*
- * Allocate the new copy. Set its continuation to
- * discard the old one.
- */
- new_copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
- new_copy->type = VM_MAP_COPY_PAGE_LIST;
- new_copy->cpy_npages = 0;
- new_copy->offset = copy->offset;
- new_copy->size = copy->size;
- new_copy->cpy_cont = vm_map_copy_discard_cont;
- new_copy->cpy_cont_args = (char *) copy;
-
- /*
- * Iterate over entries.
- */
- for (entry = vm_map_copy_first_entry(copy);
- entry != vm_map_copy_to_entry(copy);
- entry = entry->vme_next) {
-
- object = entry->object.vm_object;
- offset = entry->offset;
- /*
- * Iterate over pages.
- */
- for (va = entry->vme_start;
- va < entry->vme_end;
- va += PAGE_SIZE, offset += PAGE_SIZE) {
-
- vm_page_t m;
-
- if (new_copy->cpy_npages == VM_MAP_COPY_PAGE_LIST_MAX) {
- /*
- * What a mess. We need a continuation
- * to do the page list, but also one
- * to discard the old copy. The right
- * thing to do is probably to copy
- * out the old copy into the kernel
- * map (or some temporary task holding
- * map if we're paranoid about large
- * copies), and then copyin the page
- * list that we really wanted with
- * src_destroy. LATER.
- */
- panic("vm_map_convert_to_page_list: num\n");
- }
-
- /*
- * Try to find the page of data.
- */
- vm_object_lock(object);
- vm_object_paging_begin(object);
- if (((m = vm_page_lookup(object, offset)) !=
- VM_PAGE_NULL) && !m->busy && !m->fictitious &&
- !m->absent && !m->error) {
-
- /*
- * This is the page. Mark it busy
- * and keep the paging reference on
- * the object whilst we do our thing.
- */
- m->busy = TRUE;
-
- /*
- * Also write-protect the page, so
- * that the map`s owner cannot change
- * the data. The busy bit will prevent
- * faults on the page from succeeding
- * until the copy is released; after
- * that, the page can be re-entered
- * as writable, since we didn`t alter
- * the map entry. This scheme is a
- * cheap copy-on-write.
- *
- * Don`t forget the protection and
- * the page_lock value!
- */
-
- pmap_page_protect(m->phys_addr,
- entry->protection
- & ~m->page_lock
- & ~VM_PROT_WRITE);
-
- }
- else {
- vm_prot_t result_prot;
- vm_page_t top_page;
- kern_return_t kr;
-
-retry:
- result_prot = VM_PROT_READ;
-
- kr = vm_fault_page(object, offset,
- VM_PROT_READ, FALSE, FALSE,
- &result_prot, &m, &top_page,
- FALSE, (void (*)()) 0);
- if (kr == VM_FAULT_MEMORY_SHORTAGE) {
- VM_PAGE_WAIT((void (*)()) 0);
- vm_object_lock(object);
- vm_object_paging_begin(object);
- goto retry;
- }
- if (kr != VM_FAULT_SUCCESS) {
- /* XXX what about data_error? */
- vm_object_lock(object);
- vm_object_paging_begin(object);
- goto retry;
- }
- if (top_page != VM_PAGE_NULL) {
- vm_object_lock(object);
- VM_PAGE_FREE(top_page);
- vm_object_paging_end(object);
- vm_object_unlock(object);
- }
- }
- assert(m);
- m->busy = TRUE;
- new_copy->cpy_page_list[new_copy->cpy_npages++] = m;
- vm_object_unlock(object);
- }
- }
-
- *caller_copy = new_copy;
- return KERN_SUCCESS;
-}
-
-kern_return_t
-vm_map_convert_to_page_list_from_object(caller_copy)
- vm_map_copy_t *caller_copy;
-{
- vm_object_t object;
- vm_offset_t offset;
- vm_map_copy_t copy, new_copy;
-
- copy = *caller_copy;
- assert(copy->type == VM_MAP_COPY_OBJECT);
- object = copy->cpy_object;
- assert(object->size == round_page(object->size));
-
- /*
- * Allocate the new copy. Set its continuation to
- * discard the old one.
- */
- new_copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
- new_copy->type = VM_MAP_COPY_PAGE_LIST;
- new_copy->cpy_npages = 0;
- new_copy->offset = copy->offset;
- new_copy->size = copy->size;
- new_copy->cpy_cont = vm_map_copy_discard_cont;
- new_copy->cpy_cont_args = (char *) copy;
-
- /*
- * XXX memory_object_lock_request can probably bust this
- * XXX See continuation comment in previous routine for solution.
- */
- assert(object->size <= VM_MAP_COPY_PAGE_LIST_MAX * PAGE_SIZE);
-
- for (offset = 0; offset < object->size; offset += PAGE_SIZE) {
- vm_page_t m;
-
- /*
- * Try to find the page of data.
- */
- vm_object_lock(object);
- vm_object_paging_begin(object);
- m = vm_page_lookup(object, offset);
- if ((m != VM_PAGE_NULL) && !m->busy && !m->fictitious &&
- !m->absent && !m->error) {
-
- /*
- * This is the page. Mark it busy
- * and keep the paging reference on
- * the object whilst we do our thing.
- */
- m->busy = TRUE;
- }
- else {
- vm_prot_t result_prot;
- vm_page_t top_page;
- kern_return_t kr;
-
-retry:
- result_prot = VM_PROT_READ;
-
- kr = vm_fault_page(object, offset,
- VM_PROT_READ, FALSE, FALSE,
- &result_prot, &m, &top_page,
- FALSE, (void (*)()) 0);
- if (kr == VM_FAULT_MEMORY_SHORTAGE) {
- VM_PAGE_WAIT((void (*)()) 0);
- vm_object_lock(object);
- vm_object_paging_begin(object);
- goto retry;
- }
- if (kr != VM_FAULT_SUCCESS) {
- /* XXX what about data_error? */
- vm_object_lock(object);
- vm_object_paging_begin(object);
- goto retry;
- }
-
- if (top_page != VM_PAGE_NULL) {
- vm_object_lock(object);
- VM_PAGE_FREE(top_page);
- vm_object_paging_end(object);
- vm_object_unlock(object);
- }
- }
- assert(m);
- m->busy = TRUE;
- new_copy->cpy_page_list[new_copy->cpy_npages++] = m;
- vm_object_unlock(object);
- }
-
- *caller_copy = new_copy;
- return (KERN_SUCCESS);
-}
-
-kern_return_t
-vm_map_convert_from_page_list(copy)
- vm_map_copy_t copy;
-{
- vm_object_t object;
- int i;
- vm_map_entry_t new_entry;
- vm_page_t *page_list;
-
- /*
- * Check type of copy object.
- */
- if (copy->type == VM_MAP_COPY_ENTRY_LIST) {
- return KERN_SUCCESS;
- }
- if (copy->type == VM_MAP_COPY_OBJECT) {
- printf("vm_map_convert_from_page_list: COPY_OBJECT?");
- return KERN_SUCCESS;
- }
- if (copy->type != VM_MAP_COPY_PAGE_LIST) {
- panic("vm_map_convert_from_page_list 0x%x %d",
- copy,
- copy->type);
- }
-
- /*
- * Make sure the pages are loose. This may be
- * a "Can't Happen", but just to be safe ...
- */
- page_list = &copy->cpy_page_list[0];
- if ((*page_list)->tabled)
- vm_map_copy_steal_pages(copy);
-
- /*
- * Create object, and stuff pages into it.
- */
- object = vm_object_allocate(copy->cpy_npages);
- for (i = 0; i < copy->cpy_npages; i++) {
- register vm_page_t m = *page_list++;
- vm_page_insert(m, object, i * PAGE_SIZE);
- m->busy = FALSE;
- m->dirty = TRUE;
- vm_page_activate(m);
- }
-
- /*
- * XXX If this page list contained a continuation, then
- * XXX we're screwed. The right thing to do is probably do
- * XXX the copyout, and then copyin the entry list we really
- * XXX wanted.
- */
- if (vm_map_copy_has_cont(copy))
- panic("convert_from_page_list: continuation");
-
- /*
- * Change type of copy object
- */
- vm_map_copy_first_entry(copy) =
- vm_map_copy_last_entry(copy) = vm_map_copy_to_entry(copy);
- copy->type = VM_MAP_COPY_ENTRY_LIST;
- copy->cpy_hdr.nentries = 0;
- copy->cpy_hdr.entries_pageable = TRUE;
-
- /*
- * Allocate and initialize an entry for object
- */
- new_entry = vm_map_copy_entry_create(copy);
- new_entry->vme_start = trunc_page(copy->offset);
- new_entry->vme_end = round_page(copy->offset + copy->size);
- new_entry->object.vm_object = object;
- new_entry->offset = 0;
- new_entry->is_shared = FALSE;
- new_entry->is_sub_map = FALSE;
- new_entry->needs_copy = FALSE;
- new_entry->protection = VM_PROT_DEFAULT;
- new_entry->max_protection = VM_PROT_ALL;
- new_entry->inheritance = VM_INHERIT_DEFAULT;
- new_entry->wired_count = 0;
- new_entry->user_wired_count = 0;
- new_entry->projected_on = 0;
-
- /*
- * Insert entry into copy object, and return.
- */
- vm_map_copy_entry_link(copy, vm_map_copy_last_entry(copy), new_entry);
- return(KERN_SUCCESS);
-}
-#endif /* NORMA_IPC */