diff options
-rw-r--r-- | linux/dev/glue/kmem.c | 10 | ||||
-rw-r--r-- | linux/dev/init/main.c | 125 |
2 files changed, 13 insertions, 122 deletions
diff --git a/linux/dev/glue/kmem.c b/linux/dev/glue/kmem.c index ff052ff..25f6fd5 100644 --- a/linux/dev/glue/kmem.c +++ b/linux/dev/glue/kmem.c @@ -107,16 +107,6 @@ linux_kmem_init () assert (pages_free[i].start); assert ((pages_free[i].start & 0xffff) == 0); - /* Sanity check: ensure pages are contiguous and within DMA limits. */ - for (p = pages, j = 0; j < MEM_CHUNK_SIZE - PAGE_SIZE; j += PAGE_SIZE) - { - assert (p->phys_addr < MEM_DMA_LIMIT); - assert (p->phys_addr + PAGE_SIZE - == ((vm_page_t) p->pageq.next)->phys_addr); - - p = (vm_page_t) p->pageq.next; - } - pages_free[i].end = pages_free[i].start + MEM_CHUNK_SIZE; /* Initialize free page bitmap. */ diff --git a/linux/dev/init/main.c b/linux/dev/init/main.c index ecbd0b6..0e5ac86 100644 --- a/linux/dev/init/main.c +++ b/linux/dev/init/main.c @@ -182,140 +182,41 @@ linux_init (void) /* * Allocate contiguous memory with the given constraints. - * This routine is horribly inefficient but it is presently - * only used during initialization so it's not that bad. */ void * alloc_contig_mem (unsigned size, unsigned limit, unsigned mask, vm_page_t * pages) { - int i, j, bits_len; - unsigned *bits, len; - void *m; - vm_page_t p, page_list, tail, prev; - vm_offset_t addr, max_addr; + struct vm_page *p; if (size == 0) return (NULL); - size = round_page (size); - if ((size >> PAGE_SHIFT) > vm_page_free_count) - return (NULL); - - /* Allocate bit array. */ - max_addr = phys_last_addr; - if (max_addr > limit) - max_addr = limit; - bits_len = ((((max_addr >> PAGE_SHIFT) + NBPW - 1) / NBPW) - * sizeof (unsigned)); - bits = (unsigned *) kalloc (bits_len); - if (!bits) - return (NULL); - memset (bits, 0, bits_len); - - /* - * Walk the page free list and set a bit for every usable page. - */ - simple_lock (&vm_page_queue_free_lock); - p = vm_page_queue_free; - while (p) - { - if (p->phys_addr < limit) - (bits[(p->phys_addr >> PAGE_SHIFT) / NBPW] - |= 1 << ((p->phys_addr >> PAGE_SHIFT) % NBPW)); - p = (vm_page_t) p->pageq.next; - } + size = vm_page_atop(round_page(size)); - /* - * Scan bit array for contiguous pages. - */ - len = 0; - m = NULL; - for (i = 0; len < size && i < bits_len / sizeof (unsigned); i++) - for (j = 0; len < size && j < NBPW; j++) - if (!(bits[i] & (1 << j))) - { - len = 0; - m = NULL; - } - else - { - if (len == 0) - { - addr = ((vm_offset_t) (i * NBPW + j) - << PAGE_SHIFT); - if ((addr & mask) == 0) - { - len += PAGE_SIZE; - m = (void *) addr; - } - } - else - len += PAGE_SIZE; - } - - if (len != size) - { - simple_unlock (&vm_page_queue_free_lock); - kfree ((vm_offset_t) bits, bits_len); - return (NULL); - } + p = vm_page_alloc_p(iorder2(size), VM_PAGE_SEL_DMA, VM_PAGE_KERNEL); + if (! p) + return NULL; - /* - * Remove pages from free list - * and construct list to return to caller. - */ - page_list = NULL; - for (len = 0; len < size; len += PAGE_SIZE, addr += PAGE_SIZE) - { - prev = NULL; - for (p = vm_page_queue_free; p; p = (vm_page_t) p->pageq.next) - { - if (p->phys_addr == addr) - break; - prev = p; - } - if (!p) - panic ("alloc_contig_mem: page not on free list"); - if (prev) - prev->pageq.next = p->pageq.next; - else - vm_page_queue_free = (vm_page_t) p->pageq.next; - p->free = FALSE; - p->pageq.next = NULL; - if (!page_list) - page_list = tail = p; - else - { - tail->pageq.next = (queue_entry_t) p; - tail = p; - } - vm_page_free_count--; - } + if (p->phys_addr > limit || p->phys_addr & mask) { + vm_page_free_p(p, iorder2(size)); + return NULL; + } - simple_unlock (&vm_page_queue_free_lock); - kfree ((vm_offset_t) bits, bits_len); if (pages) - *pages = page_list; - return phystokv(m); + *pages = p; + return phystokv(p->phys_addr); } /* * Free memory allocated by alloc_contig_mem. */ void -free_contig_mem (vm_page_t pages) +free_contig_mem (vm_page_t page) { int i; vm_page_t p; - for (p = pages, i = 0; p->pageq.next; p = (vm_page_t) p->pageq.next, i++) - p->free = TRUE; - p->free = TRUE; - simple_lock (&vm_page_queue_free_lock); - vm_page_free_count += i + 1; - p->pageq.next = (queue_entry_t) vm_page_queue_free; - vm_page_queue_free = pages; - simple_unlock (&vm_page_queue_free_lock); + vm_page_free_p (page, page->order); } /* This is the number of bits of precision for the loops_per_second. Each |