summaryrefslogtreecommitdiff
path: root/vm
diff options
context:
space:
mode:
Diffstat (limited to 'vm')
-rw-r--r--vm/vm_user.c103
1 files changed, 103 insertions, 0 deletions
diff --git a/vm/vm_user.c b/vm/vm_user.c
index e65f6d5..788f43a 100644
--- a/vm/vm_user.c
+++ b/vm/vm_user.c
@@ -449,3 +449,106 @@ kern_return_t vm_wire(port, map, start, size, access)
round_page(start+size),
access);
}
+
+kern_return_t experimental_vm_allocate_contiguous(host_priv, map, result_vaddr, result_paddr, size)
+ host_t host_priv;
+ vm_map_t map;
+ vm_address_t *result_vaddr;
+ vm_address_t *result_paddr;
+ vm_size_t size;
+{
+ unsigned int npages;
+ unsigned int i;
+ unsigned int order;
+ vm_page_t pages;
+ vm_object_t object;
+ vm_map_entry_t entry;
+ kern_return_t kr;
+ vm_address_t vaddr;
+ vm_offset_t offset = 0;
+
+ if (host_priv == HOST_NULL)
+ return KERN_INVALID_HOST;
+
+ if (map == VM_MAP_NULL)
+ return KERN_INVALID_TASK;
+
+ /*
+ * XXX The page allocator returns blocks with a power-of-two size.
+ * The requested size may not be a power-of-two, causing the pages
+ * at the end of a block to be unused. In order to keep track of
+ * those pages, they must all be inserted in the VM object created
+ * by this function.
+ */
+ order = vm_page_order(size);
+ size = (1 << (order + PAGE_SHIFT));
+
+ /* We allocate the contiguous physical pages for the buffer. */
+
+ npages = size / PAGE_SIZE;
+ pages = vm_page_grab_contig(size, VM_PAGE_SEL_DIRECTMAP);
+ if (pages == NULL)
+ {
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+#if 0
+ kr = vm_page_grab_contig(npages, pages, NULL, TRUE);
+ if (kr)
+ {
+ kfree (pages, npages * sizeof (vm_page_t));
+ return kr;
+ }
+#endif
+
+ /* Allocate the object
+ * and find the virtual address for the DMA buffer */
+
+ object = vm_object_allocate(size);
+ vm_map_lock(map);
+ /* TODO user_wired_count might need to be set as 1 */
+ kr = vm_map_find_entry(map, &vaddr, size, (vm_offset_t) 0,
+ VM_OBJECT_NULL, &entry);
+ if (kr != KERN_SUCCESS)
+ {
+ vm_map_unlock(map);
+ vm_object_deallocate(object);
+ vm_page_free_contig(pages, size);
+ return kr;
+ }
+
+ entry->object.vm_object = object;
+ entry->offset = 0;
+
+ /* We can unlock map now. */
+ vm_map_unlock(map);
+
+ /* We have physical pages we need and now we need to do the mapping. */
+
+ pmap_pageable (map->pmap, vaddr, vaddr + size, FALSE);
+
+ *result_vaddr = vaddr;
+ *result_paddr = pages->phys_addr;
+
+ for (i = 0; i < npages; i++)
+ {
+ vm_object_lock(object);
+ vm_page_lock_queues();
+ vm_page_insert(&pages[i], object, offset);
+ vm_page_wire(&pages[i]);
+ vm_page_unlock_queues();
+ vm_object_unlock(object);
+
+ /* Enter it in the kernel pmap */
+ PMAP_ENTER(map->pmap, vaddr, &pages[i], VM_PROT_DEFAULT, TRUE);
+
+ vm_object_lock(object);
+ PAGE_WAKEUP_DONE(&pages[i]);
+ vm_object_unlock(object);
+
+ vaddr += PAGE_SIZE;
+ offset += PAGE_SIZE;
+ }
+
+ return KERN_SUCCESS;
+}